diff options
Diffstat (limited to 'meta/classes-recipe')
160 files changed, 0 insertions, 19226 deletions
diff --git a/meta/classes-recipe/allarch.bbclass b/meta/classes-recipe/allarch.bbclass deleted file mode 100644 index e429b92437..0000000000 --- a/meta/classes-recipe/allarch.bbclass +++ /dev/null | |||
| @@ -1,71 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # This class is used for architecture independent recipes/data files (usually scripts) | ||
| 9 | # | ||
| 10 | |||
| 11 | python allarch_package_arch_handler () { | ||
| 12 | if bb.data.inherits_class("native", d) or bb.data.inherits_class("nativesdk", d) \ | ||
| 13 | or bb.data.inherits_class("crosssdk", d): | ||
| 14 | return | ||
| 15 | |||
| 16 | variants = d.getVar("MULTILIB_VARIANTS") | ||
| 17 | if not variants: | ||
| 18 | d.setVar("PACKAGE_ARCH", "all" ) | ||
| 19 | } | ||
| 20 | |||
| 21 | addhandler allarch_package_arch_handler | ||
| 22 | allarch_package_arch_handler[eventmask] = "bb.event.RecipePreFinalise" | ||
| 23 | |||
| 24 | python () { | ||
| 25 | # Allow this class to be included but overridden - only set | ||
| 26 | # the values if we're still "all" package arch. | ||
| 27 | if d.getVar("PACKAGE_ARCH") == "all": | ||
| 28 | # No need for virtual/libc or a cross compiler | ||
| 29 | d.setVar("INHIBIT_DEFAULT_DEPS","1") | ||
| 30 | |||
| 31 | # Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory | ||
| 32 | # naming anyway | ||
| 33 | d.setVar("baselib", "lib") | ||
| 34 | d.setVar("TARGET_ARCH", "allarch") | ||
| 35 | d.setVar("TARGET_OS", "linux") | ||
| 36 | d.setVar("TARGET_CC_ARCH", "none") | ||
| 37 | d.setVar("TARGET_LD_ARCH", "none") | ||
| 38 | d.setVar("TARGET_AS_ARCH", "none") | ||
| 39 | d.setVar("TARGET_FPU", "") | ||
| 40 | d.setVar("TARGET_PREFIX", "") | ||
| 41 | # Expand PACKAGE_EXTRA_ARCHS since the staging code needs this | ||
| 42 | # (this removes any dependencies from the hash perspective) | ||
| 43 | d.setVar("PACKAGE_EXTRA_ARCHS", d.getVar("PACKAGE_EXTRA_ARCHS")) | ||
| 44 | d.setVar("SDK_ARCH", "none") | ||
| 45 | d.setVar("SDK_CC_ARCH", "none") | ||
| 46 | d.setVar("TARGET_CPPFLAGS", "none") | ||
| 47 | d.setVar("TARGET_CFLAGS", "none") | ||
| 48 | d.setVar("TARGET_CXXFLAGS", "none") | ||
| 49 | d.setVar("TARGET_LDFLAGS", "none") | ||
| 50 | d.setVar("POPULATESYSROOTDEPS", "") | ||
| 51 | |||
| 52 | # Avoid this being unnecessarily different due to nuances of | ||
| 53 | # the target machine that aren't important for "all" arch | ||
| 54 | # packages. | ||
| 55 | d.setVar("LDFLAGS", "") | ||
| 56 | |||
| 57 | # No need to do shared library processing or debug symbol handling | ||
| 58 | d.setVar("EXCLUDE_FROM_SHLIBS", "1") | ||
| 59 | d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1") | ||
| 60 | d.setVar("INHIBIT_PACKAGE_STRIP", "1") | ||
| 61 | |||
| 62 | # These multilib values shouldn't change allarch packages so exclude them | ||
| 63 | d.appendVarFlag("emit_pkgdata", "vardepsexclude", " MULTILIB_VARIANTS") | ||
| 64 | d.appendVarFlag("write_specfile", "vardepsexclude", " MULTILIBS") | ||
| 65 | d.appendVarFlag("do_package", "vardepsexclude", " package_do_shlibs") | ||
| 66 | |||
| 67 | d.setVar("qemu_wrapper_cmdline", "def qemu_wrapper_cmdline(data, rootfs_path, library_paths):\n return 'false'") | ||
| 68 | elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d): | ||
| 69 | bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE")) | ||
| 70 | } | ||
| 71 | |||
diff --git a/meta/classes-recipe/autotools-brokensep.bbclass b/meta/classes-recipe/autotools-brokensep.bbclass deleted file mode 100644 index a0fb4b7b50..0000000000 --- a/meta/classes-recipe/autotools-brokensep.bbclass +++ /dev/null | |||
| @@ -1,11 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Autotools class for recipes where separate build dir doesn't work | ||
| 8 | # Ideally we should fix software so it does work. Standard autotools supports | ||
| 9 | # this. | ||
| 10 | inherit autotools | ||
| 11 | B = "${S}" | ||
diff --git a/meta/classes-recipe/autotools.bbclass b/meta/classes-recipe/autotools.bbclass deleted file mode 100644 index bd477dc60f..0000000000 --- a/meta/classes-recipe/autotools.bbclass +++ /dev/null | |||
| @@ -1,248 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | def get_autotools_dep(d): | ||
| 8 | if d.getVar('INHIBIT_AUTOTOOLS_DEPS'): | ||
| 9 | return '' | ||
| 10 | |||
| 11 | pn = d.getVar('PN') | ||
| 12 | deps = '' | ||
| 13 | |||
| 14 | if pn in ['autoconf-native', 'automake-native']: | ||
| 15 | return deps | ||
| 16 | deps += 'autoconf-native automake-native ' | ||
| 17 | |||
| 18 | if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"): | ||
| 19 | deps += 'libtool-native ' | ||
| 20 | if not bb.data.inherits_class('native', d) \ | ||
| 21 | and not bb.data.inherits_class('nativesdk', d) \ | ||
| 22 | and not bb.data.inherits_class('cross', d) \ | ||
| 23 | and not d.getVar('INHIBIT_DEFAULT_DEPS'): | ||
| 24 | deps += 'libtool-cross ' | ||
| 25 | |||
| 26 | return deps | ||
| 27 | |||
| 28 | |||
| 29 | DEPENDS:prepend = "${@get_autotools_dep(d)} " | ||
| 30 | |||
| 31 | inherit siteinfo | ||
| 32 | |||
| 33 | # Space separated list of shell scripts with variables defined to supply test | ||
| 34 | # results for autoconf tests we cannot run at build time. | ||
| 35 | # The value of this variable is filled in in a prefunc because it depends on | ||
| 36 | # the contents of the sysroot. | ||
| 37 | export CONFIG_SITE | ||
| 38 | |||
| 39 | EXTRA_AUTORECONF += "--exclude=autopoint" | ||
| 40 | |||
| 41 | export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}" | ||
| 42 | |||
| 43 | # When building tools for use at build-time it's recommended for the build | ||
| 44 | # system to use these variables when cross-compiling. | ||
| 45 | # https://www.gnu.org/software/autoconf-archive/ax_prog_cc_for_build.html | ||
| 46 | # https://stackoverflow.com/questions/24201260/autotools-cross-compilation-and-generated-sources/24208587#24208587 | ||
| 47 | export CPP_FOR_BUILD = "${BUILD_CPP}" | ||
| 48 | export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}" | ||
| 49 | |||
| 50 | export CC_FOR_BUILD = "${BUILD_CC}" | ||
| 51 | export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}" | ||
| 52 | |||
| 53 | export CXX_FOR_BUILD = "${BUILD_CXX}" | ||
| 54 | export CXXFLAGS_FOR_BUILD = "${BUILD_CXXFLAGS}" | ||
| 55 | |||
| 56 | export LD_FOR_BUILD = "${BUILD_LD}" | ||
| 57 | export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}" | ||
| 58 | |||
| 59 | CONFIGUREOPTS = " --build=${BUILD_SYS} \ | ||
| 60 | --host=${HOST_SYS} \ | ||
| 61 | --target=${TARGET_SYS} \ | ||
| 62 | --prefix=${prefix} \ | ||
| 63 | --exec_prefix=${exec_prefix} \ | ||
| 64 | --bindir=${bindir} \ | ||
| 65 | --sbindir=${sbindir} \ | ||
| 66 | --libexecdir=${libexecdir} \ | ||
| 67 | --datadir=${datadir} \ | ||
| 68 | --sysconfdir=${sysconfdir} \ | ||
| 69 | --sharedstatedir=${sharedstatedir} \ | ||
| 70 | --localstatedir=${localstatedir} \ | ||
| 71 | --libdir=${libdir} \ | ||
| 72 | --includedir=${includedir} \ | ||
| 73 | --oldincludedir=${includedir} \ | ||
| 74 | --infodir=${infodir} \ | ||
| 75 | --mandir=${mandir} \ | ||
| 76 | --disable-silent-rules \ | ||
| 77 | ${CONFIGUREOPT_DEPTRACK}" | ||
| 78 | CONFIGUREOPT_DEPTRACK ?= "--disable-dependency-tracking" | ||
| 79 | |||
| 80 | CACHED_CONFIGUREVARS ?= "" | ||
| 81 | |||
| 82 | AUTOTOOLS_SCRIPT_PATH ?= "${S}" | ||
| 83 | CONFIGURE_SCRIPT ?= "${AUTOTOOLS_SCRIPT_PATH}/configure" | ||
| 84 | |||
| 85 | AUTOTOOLS_AUXDIR ?= "${AUTOTOOLS_SCRIPT_PATH}" | ||
| 86 | |||
| 87 | oe_runconf () { | ||
| 88 | # Use relative path to avoid buildpaths in files | ||
| 89 | cfgscript_name="`basename ${CONFIGURE_SCRIPT}`" | ||
| 90 | cfgscript=`python3 -c "import os; print(os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.'))"`/$cfgscript_name | ||
| 91 | if [ -x "$cfgscript" ] ; then | ||
| 92 | bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@" | ||
| 93 | if ! CONFIG_SHELL=${CONFIG_SHELL-/bin/bash} ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then | ||
| 94 | bbnote "The following config.log files may provide further information." | ||
| 95 | bbnote `find ${B} -ignore_readdir_race -type f -name config.log` | ||
| 96 | bbfatal_log "configure failed" | ||
| 97 | fi | ||
| 98 | else | ||
| 99 | bbfatal "no configure script found at $cfgscript" | ||
| 100 | fi | ||
| 101 | } | ||
| 102 | |||
| 103 | CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate" | ||
| 104 | |||
| 105 | autotools_preconfigure() { | ||
| 106 | if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then | ||
| 107 | if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then | ||
| 108 | if [ "${S}" != "${B}" ]; then | ||
| 109 | echo "Previously configured separate build directory detected, cleaning ${B}" | ||
| 110 | rm -rf ${B} | ||
| 111 | mkdir -p ${B} | ||
| 112 | else | ||
| 113 | # At least remove the .la files since automake won't automatically | ||
| 114 | # regenerate them even if CFLAGS/LDFLAGS are different | ||
| 115 | cd ${S} | ||
| 116 | if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then | ||
| 117 | oe_runmake clean | ||
| 118 | fi | ||
| 119 | find ${S} -ignore_readdir_race -name \*.la -delete | ||
| 120 | fi | ||
| 121 | fi | ||
| 122 | fi | ||
| 123 | } | ||
| 124 | |||
| 125 | autotools_postconfigure(){ | ||
| 126 | if [ -n "${CONFIGURESTAMPFILE}" ]; then | ||
| 127 | mkdir -p `dirname ${CONFIGURESTAMPFILE}` | ||
| 128 | echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE} | ||
| 129 | fi | ||
| 130 | } | ||
| 131 | |||
| 132 | EXTRACONFFUNCS ??= "" | ||
| 133 | |||
| 134 | EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}" | ||
| 135 | |||
| 136 | do_configure[prefuncs] += "autotools_preconfigure autotools_sitefiles ${EXTRACONFFUNCS}" | ||
| 137 | do_configure[postfuncs] += "autotools_postconfigure" | ||
| 138 | |||
| 139 | # Tell autoconf to load the site defaults from siteinfo | ||
| 140 | python autotools_sitefiles () { | ||
| 141 | sitefiles, searched = siteinfo_get_files(d, sysrootcache=True) | ||
| 142 | d.setVar("CONFIG_SITE", " ".join(sitefiles)) | ||
| 143 | } | ||
| 144 | |||
| 145 | do_configure[file-checksums] += "${@' '.join(siteinfo_get_files(d, sysrootcache=False)[1])}" | ||
| 146 | |||
| 147 | CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in *.m4 Makefile.am" | ||
| 148 | |||
| 149 | autotools_do_configure() { | ||
| 150 | # WARNING: gross hack follows: | ||
| 151 | # An autotools built package generally needs these scripts, however only | ||
| 152 | # automake or libtoolize actually install the current versions of them. | ||
| 153 | # This is a problem in builds that do not use libtool or automake, in the case | ||
| 154 | # where we -need- the latest version of these scripts. e.g. running a build | ||
| 155 | # for a package whose autotools are old, on an x86_64 machine, which the old | ||
| 156 | # config.sub does not support. Work around this by installing them manually | ||
| 157 | # regardless. | ||
| 158 | |||
| 159 | PRUNE_M4="" | ||
| 160 | |||
| 161 | for ac in `find ${S} -ignore_readdir_race -name configure.in -o -name configure.ac`; do | ||
| 162 | rm -f `dirname $ac`/configure | ||
| 163 | done | ||
| 164 | if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then | ||
| 165 | olddir=`pwd` | ||
| 166 | cd ${AUTOTOOLS_SCRIPT_PATH} | ||
| 167 | # aclocal looks in the native sysroot by default, so tell it to also look in the target sysroot. | ||
| 168 | ACLOCAL="aclocal --aclocal-path=${STAGING_DATADIR}/aclocal/" | ||
| 169 | # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look | ||
| 170 | # like it was auto-generated. Work around this by blowing it away | ||
| 171 | # by hand, unless the package specifically asked not to run aclocal. | ||
| 172 | if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then | ||
| 173 | bbnote Removing existing aclocal.m4 | ||
| 174 | rm -f aclocal.m4 | ||
| 175 | fi | ||
| 176 | if [ -e configure.in ]; then | ||
| 177 | CONFIGURE_AC=configure.in | ||
| 178 | else | ||
| 179 | CONFIGURE_AC=configure.ac | ||
| 180 | fi | ||
| 181 | if grep -q "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC; then | ||
| 182 | if grep -q "sed.*POTFILES" $CONFIGURE_AC; then | ||
| 183 | : do nothing -- we still have an old unmodified configure.ac | ||
| 184 | else | ||
| 185 | bbnote Executing glib-gettextize --force --copy | ||
| 186 | echo "no" | glib-gettextize --force --copy | ||
| 187 | fi | ||
| 188 | elif [ "${BPN}" != "gettext" ] && grep -q "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC; then | ||
| 189 | # Gettextize could be called here, however it doesn't make the job much easier: | ||
| 190 | # It doesn't discover relevant po folders on its own, so they still need to be | ||
| 191 | # found by some heurestics. Also, it would require always the full gettext | ||
| 192 | # package always, instead of gettext-minimal-native. | ||
| 193 | cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/ | ||
| 194 | if [ -d ${S}/po ]; then | ||
| 195 | # Copy the latest Makefile.in.in to the /po folder, regardless if it exists or not | ||
| 196 | # If it exists, then also look for identical Makefile.in.in files, and update them too | ||
| 197 | makefiles_to_update="./po/Makefile.in.in" | ||
| 198 | if [ -f ${S}/po/Makefile.in.in ]; then | ||
| 199 | # search for all Makefile.in.in files that are identical to ./po/Makefile.in.in, by md5sum | ||
| 200 | base_makefile_hash=`md5sum ${S}/po/Makefile.in.in | tr -s ' ' | cut -f1 -d' '` | ||
| 201 | makefiles_to_update=`find ${S} -name Makefile.in.in -exec md5sum {} \+ | grep $base_makefile_hash | tr -s ' ' | cut -d' ' -f2` | ||
| 202 | fi | ||
| 203 | bbnote List of Makefile.in.ins to update: $makefiles_to_update | ||
| 204 | for makefile in ${makefiles_to_update}; do | ||
| 205 | makefile_dir=$(dirname $makefile) | ||
| 206 | bbnote Executing: cp ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${makefile_dir}/ | ||
| 207 | cp ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${makefile_dir}/ | ||
| 208 | if [ ! -e ${makefile_dir}/remove-potcdate.sed ]; then | ||
| 209 | cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sed ${makefile_dir}/ | ||
| 210 | fi | ||
| 211 | done | ||
| 212 | for makevars in `find ${S} -name Makevars`; do | ||
| 213 | bbnote Concatenating Makevars: $makevars | ||
| 214 | cat ${STAGING_DATADIR_NATIVE}/gettext/po/Makevars.template.minimal ${makevars} >> ${makevars}.yocto_temp | ||
| 215 | mv ${makevars}.yocto_temp ${makevars} | ||
| 216 | done | ||
| 217 | fi | ||
| 218 | PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4" | ||
| 219 | fi | ||
| 220 | mkdir -p m4 | ||
| 221 | |||
| 222 | for i in $PRUNE_M4; do | ||
| 223 | find ${S} -ignore_readdir_race -name $i -delete | ||
| 224 | done | ||
| 225 | |||
| 226 | bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} | ||
| 227 | ACLOCAL="$ACLOCAL" autoreconf -Wcross -Wno-obsolete --verbose --install --force ${EXTRA_AUTORECONF} || die "autoreconf execution failed." | ||
| 228 | cd $olddir | ||
| 229 | fi | ||
| 230 | |||
| 231 | oe_runconf | ||
| 232 | } | ||
| 233 | |||
| 234 | autotools_do_compile() { | ||
| 235 | oe_runmake | ||
| 236 | } | ||
| 237 | |||
| 238 | autotools_do_install() { | ||
| 239 | oe_runmake 'DESTDIR=${D}' install | ||
| 240 | # Info dir listing isn't interesting at this point so remove it if it exists. | ||
| 241 | if [ -e "${D}${infodir}/dir" ]; then | ||
| 242 | rm -f ${D}${infodir}/dir | ||
| 243 | fi | ||
| 244 | } | ||
| 245 | |||
| 246 | EXPORT_FUNCTIONS do_configure do_compile do_install | ||
| 247 | |||
| 248 | B = "${WORKDIR}/build" | ||
diff --git a/meta/classes-recipe/barebox.bbclass b/meta/classes-recipe/barebox.bbclass deleted file mode 100644 index 73615999aa..0000000000 --- a/meta/classes-recipe/barebox.bbclass +++ /dev/null | |||
| @@ -1,162 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit kernel-arch deploy cml1 pkgconfig | ||
| 8 | |||
| 9 | LICENSE ?= "GPL-2.0-only" | ||
| 10 | |||
| 11 | PROVIDES += "virtual/bootloader" | ||
| 12 | |||
| 13 | PACKAGE_ARCH = "${MACHINE_ARCH}" | ||
| 14 | |||
| 15 | DEPENDS += "bison-native flex-native lz4-native" | ||
| 16 | |||
| 17 | S = "${UNPACKDIR}/barebox-${PV}" | ||
| 18 | B = "${WORKDIR}/build" | ||
| 19 | KBUILD_OUTPUT = "${B}" | ||
| 20 | OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT" | ||
| 21 | |||
| 22 | require conf/image-uefi.conf | ||
| 23 | |||
| 24 | # For some platforms and configuration, the barebox build process will require | ||
| 25 | # additional host tools that can be activated/deactivated here. | ||
| 26 | PACKAGECONFIG ??= "openssl libusb fit" | ||
| 27 | |||
| 28 | PACKAGECONFIG[openssl] = ",,openssl-native" | ||
| 29 | PACKAGECONFIG[libusb] = ",,libusb1-native" | ||
| 30 | PACKAGECONFIG[fit] = ",,u-boot-tools-native dtc-native" | ||
| 31 | |||
| 32 | export KBUILD_BUILD_USER ?= "oe-user" | ||
| 33 | export KBUILD_BUILD_HOST ?= "oe-host" | ||
| 34 | |||
| 35 | # unlike the kernel, barebox may build against host tools like openssl | ||
| 36 | export HOST_EXTRACFLAGS | ||
| 37 | |||
| 38 | def get_layer_rev(path): | ||
| 39 | try: | ||
| 40 | rev, _ = bb.process.run("git describe --match='' --always --dirty --broken", cwd=path) | ||
| 41 | except bb.process.ExecutionError: | ||
| 42 | rev = "" | ||
| 43 | return rev.strip() | ||
| 44 | |||
| 45 | BAREBOX_BUILDSYSTEM_VERSION[doc] = "Build system version to add to the barebox image. By default this is the git description of the containing layer." | ||
| 46 | BAREBOX_BUILDSYSTEM_VERSION ??= "${@get_layer_rev(os.path.dirname(d.getVar('FILE')))}" | ||
| 47 | |||
| 48 | BAREBOX_FIRMWARE_DIR[doc] = "Overwrite barebox' firmware blobs search directory (CONFIG_EXTRA_FIRMWARE_DIR) with this path, default ${B}/firmware" | ||
| 49 | BAREBOX_FIRMWARE_DIR ??= "${B}/firmware" | ||
| 50 | |||
| 51 | EXTRA_OEMAKE = " \ | ||
| 52 | CROSS_COMPILE=${TARGET_PREFIX} -C ${S} O=${B} \ | ||
| 53 | BUILDSYSTEM_VERSION=${BAREBOX_BUILDSYSTEM_VERSION} \ | ||
| 54 | CONFIG_EXTRA_FIRMWARE_DIR=${BAREBOX_FIRMWARE_DIR} \ | ||
| 55 | PKG_CONFIG=pkg-config-native \ | ||
| 56 | CROSS_PKG_CONFIG=pkg-config \ | ||
| 57 | " | ||
| 58 | |||
| 59 | BAREBOX_CONFIG[doc] = "The barebox kconfig defconfig file. Not used if a file called defconfig is added to the SRC_URI." | ||
| 60 | BAREBOX_CONFIG ?= "" | ||
| 61 | |||
| 62 | # set sensible default configs for some of oe-core's QEMU MACHINEs | ||
| 63 | BAREBOX_CONFIG:qemuarm = "multi_v7_defconfig" | ||
| 64 | BAREBOX_CONFIG:qemuarm64 = "multi_v8_defconfig" | ||
| 65 | BAREBOX_CONFIG:qemux86-64 = "efi_defconfig" | ||
| 66 | |||
| 67 | # prevent from acting as non-buildable provider | ||
| 68 | python () { | ||
| 69 | bareboxconfig = d.getVar('BAREBOX_CONFIG') | ||
| 70 | bareboxdefconfig = 'file://defconfig' in d.getVar('SRC_URI') | ||
| 71 | |||
| 72 | if not bareboxconfig and not bareboxdefconfig: | ||
| 73 | raise bb.parse.SkipRecipe("BAREBOX_CONFIG must be set in the %s machine configuration or file://defconfig must be given in SRC_URI." % d.getVar("MACHINE")) | ||
| 74 | } | ||
| 75 | |||
| 76 | barebox_do_configure() { | ||
| 77 | if [ -e ${UNPACKDIR}/defconfig ]; then | ||
| 78 | cp ${UNPACKDIR}/defconfig ${B}/.config | ||
| 79 | else | ||
| 80 | if [ -n "${BAREBOX_CONFIG}" ]; then | ||
| 81 | oe_runmake ${BAREBOX_CONFIG} | ||
| 82 | else | ||
| 83 | bbfatal "No defconfig given. Either add file 'file://defconfig' to SRC_URI or set BAREBOX_CONFIG" | ||
| 84 | fi | ||
| 85 | fi | ||
| 86 | |||
| 87 | ${S}/scripts/kconfig/merge_config.sh -m .config ${@" ".join(find_cfgs(d))} | ||
| 88 | cml1_do_configure | ||
| 89 | } | ||
| 90 | |||
| 91 | BAREBOX_ENV_DIR[doc] = "Overlay the barebox built-in environment with the environment provided by the BSP if specified." | ||
| 92 | BAREBOX_ENV_DIR ??= "${UNPACKDIR}/env/" | ||
| 93 | |||
| 94 | barebox_do_compile () { | ||
| 95 | export userccflags="${TARGET_LDFLAGS}${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}" | ||
| 96 | unset LDFLAGS | ||
| 97 | unset CFLAGS | ||
| 98 | unset CPPFLAGS | ||
| 99 | unset CXXFLAGS | ||
| 100 | unset MACHINE | ||
| 101 | # Allow to use ${UNPACKDIR} in kconfig options to include additionally fetched files | ||
| 102 | export UNPACKDIR=${UNPACKDIR} | ||
| 103 | |||
| 104 | if [ -d ${BAREBOX_ENV_DIR} ]; then | ||
| 105 | BAREBOX_DEFAULT_ENV="$(grep ^CONFIG_DEFAULT_ENVIRONMENT_PATH .config | cut -d '=' -f 2 | tr -d '"')" | ||
| 106 | oe_runmake CONFIG_DEFAULT_ENVIRONMENT_PATH="\"${BAREBOX_DEFAULT_ENV} ${BAREBOX_ENV_DIR}\"" | ||
| 107 | else | ||
| 108 | oe_runmake | ||
| 109 | fi | ||
| 110 | } | ||
| 111 | |||
| 112 | BAREBOX_BINARY[doc] = "Specify the barebox binary to install. If not specified all barebox artifacts are installed." | ||
| 113 | BAREBOX_BINARY ??= "${@'barebox.efi' if d.getVar('EFI_PROVIDER') == 'barebox' else ''}" | ||
| 114 | BAREBOX_SUFFIX[doc] = "Specify the suffix for ${BAREBOX_IMAGE}." | ||
| 115 | BAREBOX_SUFFIX ??= "img" | ||
| 116 | BAREBOX_IMAGE[doc] = "A unique barebox image name. Unused if ${BAREBOX_BINARY} is not set." | ||
| 117 | BAREBOX_IMAGE_DEFAULT ?= "${PN}-${MACHINE}-${PV}-${PR}.${BAREBOX_SUFFIX}" | ||
| 118 | BAREBOX_IMAGE ?= "${@'${EFI_BOOT_IMAGE}' if d.getVar('EFI_PROVIDER') == 'barebox' else '${BAREBOX_IMAGE_DEFAULT}'}" | ||
| 119 | |||
| 120 | BAREBOX_INSTALL_PATH ?= "${@'${EFI_FILES_PATH}' if d.getVar('EFI_PROVIDER') == 'barebox' else '/boot'}" | ||
| 121 | |||
| 122 | barebox_do_install () { | ||
| 123 | if [ -n "${BAREBOX_BINARY}" ]; then | ||
| 124 | |||
| 125 | BAREBOX_BIN=${B}/${BAREBOX_BINARY} | ||
| 126 | if [ ! -f "${BAREBOX_BIN}" ]; then | ||
| 127 | BAREBOX_BIN=${B}/images/${BAREBOX_BINARY} | ||
| 128 | fi | ||
| 129 | if [ ! -f "${BAREBOX_BIN}" ]; then | ||
| 130 | bbfatal "Failed to locate ${BAREBOX_BINARY}" | ||
| 131 | fi | ||
| 132 | |||
| 133 | install -D -m 644 ${BAREBOX_BIN} ${D}${BAREBOX_INSTALL_PATH}/${BAREBOX_IMAGE} | ||
| 134 | ln -sf ${BAREBOX_IMAGE} ${D}${BAREBOX_INSTALL_PATH}/${BAREBOX_BINARY} | ||
| 135 | else | ||
| 136 | install -d ${D}${BAREBOX_INSTALL_PATH}/ | ||
| 137 | for image in $(cat ${B}/barebox-flash-images); do | ||
| 138 | install -m 644 ${B}/${image} ${D}${BAREBOX_INSTALL_PATH}/ | ||
| 139 | done | ||
| 140 | fi | ||
| 141 | } | ||
| 142 | FILES:${PN} = "${BAREBOX_INSTALL_PATH}" | ||
| 143 | |||
| 144 | barebox_do_deploy () { | ||
| 145 | if [ -n "${BAREBOX_BINARY}" ]; then | ||
| 146 | |||
| 147 | BAREBOX_BIN=${B}/${BAREBOX_BINARY} | ||
| 148 | if [ ! -f "${BAREBOX_BIN}" ]; then | ||
| 149 | BAREBOX_BIN=${B}/images/${BAREBOX_BINARY} | ||
| 150 | fi | ||
| 151 | |||
| 152 | install -D -m 644 ${BAREBOX_BIN} ${DEPLOYDIR}/${BAREBOX_IMAGE} | ||
| 153 | ln -sf ${BAREBOX_IMAGE} ${DEPLOYDIR}/${BAREBOX_BINARY} | ||
| 154 | else | ||
| 155 | for image in $(cat ${B}/barebox-flash-images); do | ||
| 156 | cp ${B}/${image} ${DEPLOYDIR} | ||
| 157 | done | ||
| 158 | fi | ||
| 159 | } | ||
| 160 | addtask deploy after do_compile | ||
| 161 | |||
| 162 | EXPORT_FUNCTIONS do_configure do_compile do_install do_deploy | ||
diff --git a/meta/classes-recipe/baremetal-image.bbclass b/meta/classes-recipe/baremetal-image.bbclass deleted file mode 100644 index 4afc171314..0000000000 --- a/meta/classes-recipe/baremetal-image.bbclass +++ /dev/null | |||
| @@ -1,170 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Baremetal image class | ||
| 8 | # | ||
| 9 | # This class is meant to be inherited by recipes for baremetal/RTOS applications | ||
| 10 | # It contains code that would be used by all of them, every recipe just needs to | ||
| 11 | # override certain variables. | ||
| 12 | # | ||
| 13 | # For scalability purposes, code within this class focuses on the "image" wiring | ||
| 14 | # to satisfy the OpenEmbedded image creation and testing infrastructure. | ||
| 15 | # | ||
| 16 | # See meta-skeleton for a working example. | ||
| 17 | |||
| 18 | |||
| 19 | # Toolchain should be baremetal or newlib/picolibc based. | ||
| 20 | # TCLIBC="baremetal" or TCLIBC="newlib" or TCLIBC="picolibc" | ||
| 21 | COMPATIBLE_HOST:libc-musl:class-target = "null" | ||
| 22 | COMPATIBLE_HOST:libc-glibc:class-target = "null" | ||
| 23 | |||
| 24 | |||
| 25 | inherit rootfs-postcommands | ||
| 26 | |||
| 27 | # Set some defaults, but these should be overriden by each recipe if required | ||
| 28 | IMGDEPLOYDIR ?= "${WORKDIR}/deploy-${PN}-image-complete" | ||
| 29 | BAREMETAL_BINNAME ?= "hello_baremetal_${MACHINE}" | ||
| 30 | IMAGE_LINK_NAME ?= "baremetal-helloworld-image-${MACHINE}" | ||
| 31 | IMAGE_NAME_SUFFIX ?= "" | ||
| 32 | |||
| 33 | IMAGE_OUTPUT_MANIFEST_DIR = "${WORKDIR}/deploy-image-output-manifest" | ||
| 34 | IMAGE_OUTPUT_MANIFEST = "${IMAGE_OUTPUT_MANIFEST_DIR}/manifest.json" | ||
| 35 | |||
| 36 | do_rootfs[dirs] = "${IMGDEPLOYDIR} ${DEPLOY_DIR_IMAGE}" | ||
| 37 | |||
| 38 | do_image(){ | ||
| 39 | install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.bin ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.bin | ||
| 40 | install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.elf ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.elf | ||
| 41 | } | ||
| 42 | |||
| 43 | python do_image_complete(){ | ||
| 44 | from pathlib import Path | ||
| 45 | import json | ||
| 46 | |||
| 47 | data = { | ||
| 48 | "taskname": "do_image", | ||
| 49 | "imagetype": "baremetal-image", | ||
| 50 | "images": [] | ||
| 51 | } | ||
| 52 | |||
| 53 | img_deploy_dir = Path(d.getVar("IMGDEPLOYDIR")) | ||
| 54 | |||
| 55 | for child in img_deploy_dir.iterdir(): | ||
| 56 | if not child.is_file() or child.is_symlink(): | ||
| 57 | continue | ||
| 58 | |||
| 59 | data["images"].append({ | ||
| 60 | "filename": child.name, | ||
| 61 | }) | ||
| 62 | |||
| 63 | with open(d.getVar("IMAGE_OUTPUT_MANIFEST"), "w") as f: | ||
| 64 | json.dump([data], f) | ||
| 65 | } | ||
| 66 | |||
| 67 | python do_rootfs(){ | ||
| 68 | from oe.utils import execute_pre_post_process | ||
| 69 | from pathlib import Path | ||
| 70 | |||
| 71 | # Write empty manifest file to satisfy test infrastructure | ||
| 72 | deploy_dir = d.getVar('IMGDEPLOYDIR') | ||
| 73 | link_name = d.getVar('IMAGE_LINK_NAME') | ||
| 74 | manifest_name = d.getVar('IMAGE_MANIFEST') | ||
| 75 | |||
| 76 | Path(manifest_name).touch() | ||
| 77 | if os.path.exists(manifest_name) and link_name: | ||
| 78 | manifest_link = deploy_dir + "/" + link_name + ".manifest" | ||
| 79 | if manifest_link != manifest_name: | ||
| 80 | if os.path.lexists(manifest_link): | ||
| 81 | os.remove(manifest_link) | ||
| 82 | os.symlink(os.path.basename(manifest_name), manifest_link) | ||
| 83 | # A lot of postprocess commands assume the existence of rootfs/etc | ||
| 84 | sysconfdir = d.getVar("IMAGE_ROOTFS") + d.getVar('sysconfdir') | ||
| 85 | bb.utils.mkdirhier(sysconfdir) | ||
| 86 | |||
| 87 | execute_pre_post_process(d, d.getVar('ROOTFS_POSTPROCESS_COMMAND')) | ||
| 88 | execute_pre_post_process(d, d.getVar("ROOTFS_POSTUNINSTALL_COMMAND")) | ||
| 89 | } | ||
| 90 | |||
| 91 | |||
| 92 | # Assure binaries, manifest and qemubootconf are populated on DEPLOY_DIR_IMAGE | ||
| 93 | do_image_complete[dirs] = "${TOPDIR}" | ||
| 94 | SSTATETASKS += "do_image_complete" | ||
| 95 | SSTATE_SKIP_CREATION:task-image-complete = '1' | ||
| 96 | do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}" | ||
| 97 | do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}" | ||
| 98 | do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}" | ||
| 99 | do_image_complete[sstate-plaindirs] += "${IMAGE_OUTPUT_MANIFEST_DIR}" | ||
| 100 | do_image_complete[dirs] += "${IMAGE_OUTPUT_MANIFEST_DIR}" | ||
| 101 | addtask do_image_complete after do_image before do_build | ||
| 102 | |||
| 103 | python do_image_complete_setscene () { | ||
| 104 | sstate_setscene(d) | ||
| 105 | } | ||
| 106 | addtask do_image_complete_setscene | ||
| 107 | |||
| 108 | # QEMU generic Baremetal/RTOS parameters | ||
| 109 | QB_DEFAULT_KERNEL ?= "${IMAGE_LINK_NAME}.bin" | ||
| 110 | QB_MEM ?= "-m 256" | ||
| 111 | QB_DEFAULT_FSTYPE ?= "bin" | ||
| 112 | QB_DTB ?= "" | ||
| 113 | QB_OPT_APPEND:append = " -nographic" | ||
| 114 | |||
| 115 | # QEMU x86 requires an .elf kernel to boot rather than a .bin | ||
| 116 | QB_DEFAULT_KERNEL:qemux86 ?= "${IMAGE_LINK_NAME}.elf" | ||
| 117 | # QEMU x86-64 refuses to boot from -kernel, needs a multiboot compatible image | ||
| 118 | QB_DEFAULT_FSTYPE:qemux86-64 ?= "iso" | ||
| 119 | |||
| 120 | # RISC-V tunes set the BIOS, unset, and instruct QEMU to | ||
| 121 | # ignore the BIOS and boot from -kernel | ||
| 122 | QB_DEFAULT_BIOS:qemuriscv64 = "" | ||
| 123 | QB_DEFAULT_BIOS:qemuriscv32 = "" | ||
| 124 | QB_OPT_APPEND:append:qemuriscv64 = " -bios none" | ||
| 125 | QB_OPT_APPEND:append:qemuriscv32 = " -bios none" | ||
| 126 | |||
| 127 | |||
| 128 | # Use the medium-any code model for the RISC-V 64 bit implementation, | ||
| 129 | # since medlow can only access addresses below 0x80000000 and RAM | ||
| 130 | # starts at 0x80000000 on RISC-V 64 | ||
| 131 | # Keep RISC-V 32 using -mcmodel=medlow (symbols lie between -2GB:2GB) | ||
| 132 | TARGET_CFLAGS:append:qemuriscv64 = " -mcmodel=medany" | ||
| 133 | |||
| 134 | |||
| 135 | ## Emulate image.bbclass | ||
| 136 | # Handle inherits of any of the image classes we need | ||
| 137 | IMAGE_CLASSES ??= "" | ||
| 138 | IMGCLASSES = " ${IMAGE_CLASSES}" | ||
| 139 | inherit_defer ${IMGCLASSES} | ||
| 140 | # Set defaults to satisfy IMAGE_FEATURES check | ||
| 141 | IMAGE_FEATURES ?= "" | ||
| 142 | IMAGE_FEATURES[type] = "list" | ||
| 143 | IMAGE_FEATURES[validitems] += "" | ||
| 144 | |||
| 145 | |||
| 146 | # This next part is necessary to trick the build system into thinking | ||
| 147 | # its building an image recipe so it generates the qemuboot.conf | ||
| 148 | addtask do_rootfs before do_image after do_install | ||
| 149 | addtask do_image after do_rootfs before do_image_complete | ||
| 150 | addtask do_image_complete after do_image before do_build | ||
| 151 | inherit qemuboot | ||
| 152 | |||
| 153 | # Based on image.bbclass to make sure we build qemu | ||
| 154 | python(){ | ||
| 155 | # do_addto_recipe_sysroot doesnt exist for all recipes, but we need it to have | ||
| 156 | # /usr/bin on recipe-sysroot (qemu) populated | ||
| 157 | # The do_addto_recipe_sysroot dependency is coming from EXTRA_IMAGDEPENDS now, | ||
| 158 | # we just need to add the logic to add its dependency to do_image. | ||
| 159 | def extraimage_getdepends(task): | ||
| 160 | deps = "" | ||
| 161 | for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split(): | ||
| 162 | # Make sure we only add it for qemu | ||
| 163 | if 'qemu' in dep: | ||
| 164 | if ":" in dep: | ||
| 165 | deps += " %s " % (dep) | ||
| 166 | else: | ||
| 167 | deps += " %s:%s" % (dep, task) | ||
| 168 | return deps | ||
| 169 | d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot')) | ||
| 170 | } | ||
diff --git a/meta/classes-recipe/bash-completion.bbclass b/meta/classes-recipe/bash-completion.bbclass deleted file mode 100644 index b656e76c09..0000000000 --- a/meta/classes-recipe/bash-completion.bbclass +++ /dev/null | |||
| @@ -1,13 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | DEPENDS:append:class-target = " bash-completion" | ||
| 8 | |||
| 9 | PACKAGES += "${PN}-bash-completion" | ||
| 10 | |||
| 11 | FILES:${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d" | ||
| 12 | |||
| 13 | RDEPENDS:${PN}-bash-completion = "bash-completion" | ||
diff --git a/meta/classes-recipe/bin_package.bbclass b/meta/classes-recipe/bin_package.bbclass deleted file mode 100644 index 3a1befc29c..0000000000 --- a/meta/classes-recipe/bin_package.bbclass +++ /dev/null | |||
| @@ -1,42 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Common variable and task for the binary package recipe. | ||
| 8 | # Basic principle: | ||
| 9 | # * The files have been unpacked to ${S} by base.bbclass | ||
| 10 | # * Skip do_configure and do_compile | ||
| 11 | # * Use do_install to install the files to ${D} | ||
| 12 | # | ||
| 13 | # Note: | ||
| 14 | # The "subdir" parameter in the SRC_URI is useful when the input package | ||
| 15 | # is rpm, ipk, deb and so on, for example: | ||
| 16 | # | ||
| 17 | # SRC_URI = "http://foo.com/foo-1.0-r1.i586.rpm;subdir=foo-1.0" | ||
| 18 | # | ||
| 19 | # Then the files would be unpacked to ${WORKDIR}/foo-1.0, otherwise | ||
| 20 | # they would be in ${WORKDIR}. | ||
| 21 | # | ||
| 22 | |||
| 23 | # Skip the unwanted steps | ||
| 24 | do_configure[noexec] = "1" | ||
| 25 | do_compile[noexec] = "1" | ||
| 26 | |||
| 27 | # Install the files to ${D} | ||
| 28 | bin_package_do_install () { | ||
| 29 | # Do it carefully | ||
| 30 | [ -d "${S}" ] || exit 1 | ||
| 31 | if [ -z "$(ls -A ${S})" ]; then | ||
| 32 | bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S. | ||
| 33 | fi | ||
| 34 | cd ${S} | ||
| 35 | install -d ${D}${base_prefix} | ||
| 36 | tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \ | ||
| 37 | | tar --no-same-owner -xpf - -C ${D}${base_prefix} | ||
| 38 | } | ||
| 39 | |||
| 40 | FILES:${PN} = "/" | ||
| 41 | |||
| 42 | EXPORT_FUNCTIONS do_install | ||
diff --git a/meta/classes-recipe/binconfig-disabled.bbclass b/meta/classes-recipe/binconfig-disabled.bbclass deleted file mode 100644 index cbe2078e0f..0000000000 --- a/meta/classes-recipe/binconfig-disabled.bbclass +++ /dev/null | |||
| @@ -1,36 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # Class to disable binconfig files instead of installing them | ||
| 9 | # | ||
| 10 | |||
| 11 | # The list of scripts which should be disabled. | ||
| 12 | BINCONFIG ?= "" | ||
| 13 | |||
| 14 | FILES:${PN}-dev += "${bindir}/*-config" | ||
| 15 | |||
| 16 | do_install:append () { | ||
| 17 | for x in ${BINCONFIG}; do | ||
| 18 | # Make the disabled script emit invalid parameters for those configure | ||
| 19 | # scripts which call it without checking the return code. | ||
| 20 | echo "#!/bin/sh" > ${D}$x | ||
| 21 | echo "echo 'ERROR: $x should not be used, use an alternative such as pkg-config' >&2" >> ${D}$x | ||
| 22 | echo "echo '--should-not-have-used-$x'" >> ${D}$x | ||
| 23 | echo "exit 1" >> ${D}$x | ||
| 24 | chmod +x ${D}$x | ||
| 25 | done | ||
| 26 | } | ||
| 27 | |||
| 28 | SYSROOT_PREPROCESS_FUNCS += "binconfig_disabled_sysroot_preprocess" | ||
| 29 | |||
| 30 | binconfig_disabled_sysroot_preprocess () { | ||
| 31 | for x in ${BINCONFIG}; do | ||
| 32 | configname=`basename $x` | ||
| 33 | install -d ${SYSROOT_DESTDIR}${bindir_crossscripts} | ||
| 34 | install ${D}$x ${SYSROOT_DESTDIR}${bindir_crossscripts} | ||
| 35 | done | ||
| 36 | } | ||
diff --git a/meta/classes-recipe/binconfig.bbclass b/meta/classes-recipe/binconfig.bbclass deleted file mode 100644 index 427dba7f1f..0000000000 --- a/meta/classes-recipe/binconfig.bbclass +++ /dev/null | |||
| @@ -1,60 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | FILES:${PN}-dev += "${bindir}/*-config" | ||
| 8 | |||
| 9 | # The namespaces can clash here hence the two step replace | ||
| 10 | def get_binconfig_mangle(d): | ||
| 11 | s = "-e ''" | ||
| 12 | if not bb.data.inherits_class('native', d): | ||
| 13 | optional_quote = r"\(\"\?\)" | ||
| 14 | s += " -e 's:=%s${base_libdir}:=\\1OEBASELIBDIR:;'" % optional_quote | ||
| 15 | s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote | ||
| 16 | s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote | ||
| 17 | s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote | ||
| 18 | s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote | ||
| 19 | s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote | ||
| 20 | s += " -e 's:-L${libdir}:-LOELIBDIR:;'" | ||
| 21 | s += " -e 's:-I${includedir}:-IOEINCDIR:;'" | ||
| 22 | s += " -e 's:-L${WORKDIR}:-LOELIBDIR:'" | ||
| 23 | s += " -e 's:-I${WORKDIR}:-IOEINCDIR:'" | ||
| 24 | s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'" | ||
| 25 | s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'" | ||
| 26 | s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'" | ||
| 27 | s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'" | ||
| 28 | s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'" | ||
| 29 | s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'" | ||
| 30 | if d.getVar("OE_BINCONFIG_EXTRA_MANGLE", False): | ||
| 31 | s += d.getVar("OE_BINCONFIG_EXTRA_MANGLE") | ||
| 32 | |||
| 33 | return s | ||
| 34 | |||
| 35 | BINCONFIG_GLOB ?= "*-config" | ||
| 36 | |||
| 37 | PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess" | ||
| 38 | |||
| 39 | binconfig_package_preprocess () { | ||
| 40 | for config in `find ${PKGD} -type f -name '${BINCONFIG_GLOB}'`; do | ||
| 41 | sed -i \ | ||
| 42 | -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \ | ||
| 43 | -e 's:${STAGING_LIBDIR}:${libdir}:g;' \ | ||
| 44 | -e 's:${STAGING_INCDIR}:${includedir}:g;' \ | ||
| 45 | -e 's:${STAGING_DATADIR}:${datadir}:' \ | ||
| 46 | -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \ | ||
| 47 | $config | ||
| 48 | done | ||
| 49 | } | ||
| 50 | |||
| 51 | SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess" | ||
| 52 | |||
| 53 | binconfig_sysroot_preprocess () { | ||
| 54 | for config in `find ${S} -type f -name '${BINCONFIG_GLOB}'` `find ${B} -type f -name '${BINCONFIG_GLOB}'`; do | ||
| 55 | configname=`basename $config` | ||
| 56 | install -d ${SYSROOT_DESTDIR}${bindir_crossscripts} | ||
| 57 | sed ${@get_binconfig_mangle(d)} $config > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname | ||
| 58 | chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname | ||
| 59 | done | ||
| 60 | } | ||
diff --git a/meta/classes-recipe/cargo-update-recipe-crates.bbclass b/meta/classes-recipe/cargo-update-recipe-crates.bbclass deleted file mode 100644 index 3251d5ef2e..0000000000 --- a/meta/classes-recipe/cargo-update-recipe-crates.bbclass +++ /dev/null | |||
| @@ -1,81 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | ## | ||
| 8 | ## Purpose: | ||
| 9 | ## This class is used to update the list of crates in SRC_URI | ||
| 10 | ## by reading Cargo.lock in the source tree. | ||
| 11 | ## | ||
| 12 | ## See meta/recipes-devtools/python/python3-bcrypt_*.bb for an example | ||
| 13 | ## | ||
| 14 | ## To perform the update: bitbake -c update_crates recipe-name | ||
| 15 | |||
| 16 | addtask do_update_crates after do_patch | ||
| 17 | do_update_crates[depends] = "python3-native:do_populate_sysroot" | ||
| 18 | do_update_crates[nostamp] = "1" | ||
| 19 | do_update_crates[doc] = "Update the recipe by reading Cargo.lock and write in ${THISDIR}/${BPN}-crates.inc" | ||
| 20 | |||
| 21 | RECIPE_UPGRADE_EXTRA_TASKS += "do_update_crates" | ||
| 22 | |||
| 23 | # The directory where to search for Cargo.lock files | ||
| 24 | CARGO_LOCK_SRC_DIR ??= "${S}" | ||
| 25 | |||
| 26 | do_update_crates() { | ||
| 27 | TARGET_FILE="${THISDIR}/${BPN}-crates.inc" | ||
| 28 | |||
| 29 | nativepython3 - <<EOF | ||
| 30 | |||
| 31 | def get_crates(f): | ||
| 32 | import tomllib | ||
| 33 | c_list = '# from %s' % os.path.relpath(f, '${CARGO_LOCK_SRC_DIR}') | ||
| 34 | c_list += '\nSRC_URI += " \\\' | ||
| 35 | crates = tomllib.load(open(f, 'rb')) | ||
| 36 | |||
| 37 | # Build a list with crates info that have crates.io in the source | ||
| 38 | crates_candidates = list(filter(lambda c: 'crates.io' in c.get('source', ''), crates['package'])) | ||
| 39 | |||
| 40 | if not crates_candidates: | ||
| 41 | raise ValueError("Unable to find any candidate crates that use crates.io") | ||
| 42 | |||
| 43 | # Update crates uri and their checksum, to avoid name clashing on the checksum | ||
| 44 | # we need to rename crates with name and version to have a unique key | ||
| 45 | cksum_list = '' | ||
| 46 | for c in crates_candidates: | ||
| 47 | rename = "%s-%s" % (c['name'], c['version']) | ||
| 48 | c_list += '\n crate://crates.io/%s/%s \\\' % (c['name'], c['version']) | ||
| 49 | if 'checksum' in c: | ||
| 50 | cksum_list += '\nSRC_URI[%s.sha256sum] = "%s"' % (rename, c['checksum']) | ||
| 51 | |||
| 52 | c_list += '\n"\n' | ||
| 53 | c_list += cksum_list | ||
| 54 | c_list += '\n' | ||
| 55 | return c_list | ||
| 56 | |||
| 57 | import os | ||
| 58 | crates = "# Autogenerated with 'bitbake -c update_crates ${PN}'\n\n" | ||
| 59 | found = False | ||
| 60 | for root, dirs, files in os.walk('${CARGO_LOCK_SRC_DIR}'): | ||
| 61 | # ignore git and patches directories | ||
| 62 | if root.startswith(os.path.join('${CARGO_LOCK_SRC_DIR}', '.pc')): | ||
| 63 | continue | ||
| 64 | if root.startswith(os.path.join('${CARGO_LOCK_SRC_DIR}', '.git')): | ||
| 65 | continue | ||
| 66 | for file in files: | ||
| 67 | if file == 'Cargo.lock': | ||
| 68 | try: | ||
| 69 | cargo_lock_path = os.path.join(root, file) | ||
| 70 | crates += get_crates(os.path.join(root, file)) | ||
| 71 | except Exception as e: | ||
| 72 | raise ValueError("Cannot parse '%s'" % cargo_lock_path) from e | ||
| 73 | else: | ||
| 74 | found = True | ||
| 75 | if not found: | ||
| 76 | raise ValueError("Unable to find any Cargo.lock in ${CARGO_LOCK_SRC_DIR}") | ||
| 77 | open("${TARGET_FILE}", 'w').write(crates) | ||
| 78 | EOF | ||
| 79 | |||
| 80 | bbnote "Successfully update crates inside '${TARGET_FILE}'" | ||
| 81 | } | ||
diff --git a/meta/classes-recipe/cargo.bbclass b/meta/classes-recipe/cargo.bbclass deleted file mode 100644 index 2dd28e95d3..0000000000 --- a/meta/classes-recipe/cargo.bbclass +++ /dev/null | |||
| @@ -1,95 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | ## | ||
| 8 | ## Purpose: | ||
| 9 | ## This class is used by any recipes that are built using | ||
| 10 | ## Cargo. | ||
| 11 | |||
| 12 | inherit cargo_common | ||
| 13 | inherit rust-target-config | ||
| 14 | |||
| 15 | # the binary we will use | ||
| 16 | CARGO = "cargo" | ||
| 17 | |||
| 18 | # We need cargo to compile for the target | ||
| 19 | BASEDEPENDS:append = " cargo-native" | ||
| 20 | |||
| 21 | # Ensure we get the right rust variant | ||
| 22 | DEPENDS:append:class-target = " rust-native ${RUSTLIB_DEP}" | ||
| 23 | DEPENDS:append:class-nativesdk = " rust-native ${RUSTLIB_DEP}" | ||
| 24 | DEPENDS:append:class-native = " rust-native" | ||
| 25 | |||
| 26 | # Enable build separation | ||
| 27 | B = "${WORKDIR}/build" | ||
| 28 | |||
| 29 | # In case something fails in the build process, give a bit more feedback on | ||
| 30 | # where the issue occured | ||
| 31 | export RUST_BACKTRACE = "1" | ||
| 32 | |||
| 33 | RUSTFLAGS ??= "" | ||
| 34 | BUILD_MODE = "${@['--release', ''][d.getVar('DEBUG_BUILD') == '1']}" | ||
| 35 | # --frozen flag will prevent network access (which is required since only | ||
| 36 | # the do_fetch step is authorized to access network) | ||
| 37 | # and will require an up to date Cargo.lock file. | ||
| 38 | # This force the package being built to already ship a Cargo.lock, in the end | ||
| 39 | # this is what we want, at least, for reproducibility of the build. | ||
| 40 | CARGO_BUILD_FLAGS = "-v --frozen --target ${RUST_HOST_SYS} ${BUILD_MODE} --manifest-path=${CARGO_MANIFEST_PATH}" | ||
| 41 | |||
| 42 | # This is based on the content of CARGO_BUILD_FLAGS and generally will need to | ||
| 43 | # change if CARGO_BUILD_FLAGS changes. | ||
| 44 | BUILD_DIR = "${@['release', 'debug'][d.getVar('DEBUG_BUILD') == '1']}" | ||
| 45 | CARGO_TARGET_SUBDIR = "${RUST_HOST_SYS}/${BUILD_DIR}" | ||
| 46 | oe_cargo_build () { | ||
| 47 | export RUSTFLAGS="${RUSTFLAGS}" | ||
| 48 | bbnote "Using rust targets from ${RUST_TARGET_PATH}" | ||
| 49 | bbnote "cargo = $(which ${CARGO})" | ||
| 50 | bbnote "${CARGO} build ${CARGO_BUILD_FLAGS} ${PACKAGECONFIG_CONFARGS} $@" | ||
| 51 | "${CARGO}" build ${CARGO_BUILD_FLAGS} ${PACKAGECONFIG_CONFARGS} "$@" | ||
| 52 | } | ||
| 53 | |||
| 54 | do_compile[progress] = "outof:\s+(\d+)/(\d+)" | ||
| 55 | cargo_do_compile () { | ||
| 56 | oe_cargo_build | ||
| 57 | } | ||
| 58 | |||
| 59 | cargo_do_install () { | ||
| 60 | local have_installed=false | ||
| 61 | for tgt in "${B}/target/${CARGO_TARGET_SUBDIR}/"*; do | ||
| 62 | case $tgt in | ||
| 63 | *.so|*.rlib) | ||
| 64 | if [ -n "${CARGO_INSTALL_LIBRARIES}" ]; then | ||
| 65 | install -d "${D}${rustlibdir}" | ||
| 66 | install -m755 "$tgt" "${D}${rustlibdir}" | ||
| 67 | have_installed=true | ||
| 68 | fi | ||
| 69 | ;; | ||
| 70 | *examples) | ||
| 71 | if [ -d "$tgt" ]; then | ||
| 72 | for example in "$tgt/"*; do | ||
| 73 | if [ -f "$example" ] && [ -x "$example" ]; then | ||
| 74 | install -d "${D}${bindir}" | ||
| 75 | install -m755 "$example" "${D}${bindir}" | ||
| 76 | have_installed=true | ||
| 77 | fi | ||
| 78 | done | ||
| 79 | fi | ||
| 80 | ;; | ||
| 81 | *) | ||
| 82 | if [ -f "$tgt" ] && [ -x "$tgt" ]; then | ||
| 83 | install -d "${D}${bindir}" | ||
| 84 | install -m755 "$tgt" "${D}${bindir}" | ||
| 85 | have_installed=true | ||
| 86 | fi | ||
| 87 | ;; | ||
| 88 | esac | ||
| 89 | done | ||
| 90 | if ! $have_installed; then | ||
| 91 | die "Did not find anything to install" | ||
| 92 | fi | ||
| 93 | } | ||
| 94 | |||
| 95 | EXPORT_FUNCTIONS do_compile do_install | ||
diff --git a/meta/classes-recipe/cargo_c.bbclass b/meta/classes-recipe/cargo_c.bbclass deleted file mode 100644 index ef431634a2..0000000000 --- a/meta/classes-recipe/cargo_c.bbclass +++ /dev/null | |||
| @@ -1,41 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | ## | ||
| 8 | ## Purpose: | ||
| 9 | ## This class is used by any recipes that want to compile a C ABI compatible | ||
| 10 | ## library with header and pkg config file | ||
| 11 | |||
| 12 | inherit cargo pkgconfig | ||
| 13 | |||
| 14 | # the binaries we will use | ||
| 15 | CARGO_C_BUILD = "cargo-cbuild" | ||
| 16 | CARGO_C_INSTALL = "cargo-cinstall" | ||
| 17 | |||
| 18 | # We need cargo-c to compile for the target | ||
| 19 | BASEDEPENDS:append = " cargo-c-native" | ||
| 20 | |||
| 21 | do_compile[progress] = "outof:\s+(\d+)/(\d+)" | ||
| 22 | cargo_c_do_compile() { | ||
| 23 | oe_cargo_fix_env | ||
| 24 | export RUSTFLAGS="${RUSTFLAGS}" | ||
| 25 | bbnote "Using rust targets from ${RUST_TARGET_PATH}" | ||
| 26 | bbnote "cargo-cbuild = $(which ${CARGO_C_BUILD})" | ||
| 27 | bbnote "${CARGO_C_BUILD} cbuild ${CARGO_BUILD_FLAGS}" | ||
| 28 | "${CARGO_C_BUILD}" cbuild ${CARGO_BUILD_FLAGS} | ||
| 29 | } | ||
| 30 | |||
| 31 | cargo_c_do_install() { | ||
| 32 | oe_cargo_fix_env | ||
| 33 | export RUSTFLAGS="${RUSTFLAGS}" | ||
| 34 | bbnote "cargo-cinstall = $(which ${CARGO_C_INSTALL})" | ||
| 35 | "${CARGO_C_INSTALL}" cinstall ${CARGO_BUILD_FLAGS} \ | ||
| 36 | --destdir ${D} \ | ||
| 37 | --prefix ${prefix} \ | ||
| 38 | --library-type cdylib | ||
| 39 | } | ||
| 40 | |||
| 41 | EXPORT_FUNCTIONS do_compile do_install | ||
diff --git a/meta/classes-recipe/cargo_common.bbclass b/meta/classes-recipe/cargo_common.bbclass deleted file mode 100644 index bc44ad7918..0000000000 --- a/meta/classes-recipe/cargo_common.bbclass +++ /dev/null | |||
| @@ -1,242 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | ## | ||
| 8 | ## Purpose: | ||
| 9 | ## This class is to support building with cargo. It | ||
| 10 | ## must be different than cargo.bbclass because Rust | ||
| 11 | ## now builds with Cargo but cannot use cargo.bbclass | ||
| 12 | ## due to dependencies and assumptions in cargo.bbclass | ||
| 13 | ## that Rust & Cargo are already installed. So this | ||
| 14 | ## is used by cargo.bbclass and Rust | ||
| 15 | ## | ||
| 16 | |||
| 17 | # add crate fetch support | ||
| 18 | inherit rust-common | ||
| 19 | |||
| 20 | # Where we download our registry and dependencies to | ||
| 21 | export CARGO_HOME = "${UNPACKDIR}/cargo_home" | ||
| 22 | |||
| 23 | # The pkg-config-rs library used by cargo build scripts disables itself when | ||
| 24 | # cross compiling unless this is defined. We set up pkg-config appropriately | ||
| 25 | # for cross compilation, so tell it we know better than it. | ||
| 26 | export PKG_CONFIG_ALLOW_CROSS = "1" | ||
| 27 | |||
| 28 | # Don't instruct cargo to use crates downloaded by bitbake. Some rust packages, | ||
| 29 | # for example the rust compiler itself, come with their own vendored sources. | ||
| 30 | # Specifying two [source.crates-io] will not work. | ||
| 31 | CARGO_DISABLE_BITBAKE_VENDORING ??= "0" | ||
| 32 | |||
| 33 | # Used by libstd-rs to point to the vendor dir included in rustc src | ||
| 34 | CARGO_VENDORING_DIRECTORY ??= "${CARGO_HOME}/bitbake" | ||
| 35 | |||
| 36 | # The directory of the Cargo.toml relative to the root directory, per default | ||
| 37 | # assume there's a Cargo.toml directly in the root directory | ||
| 38 | CARGO_SRC_DIR ??= "" | ||
| 39 | |||
| 40 | # The actual path to the Cargo.toml | ||
| 41 | CARGO_MANIFEST_PATH ??= "${S}/${CARGO_SRC_DIR}/Cargo.toml" | ||
| 42 | |||
| 43 | # Path to Cargo.lock | ||
| 44 | CARGO_LOCK_PATH ??= "${@ os.path.join(os.path.dirname(d.getVar('CARGO_MANIFEST_PATH')), 'Cargo.lock')}" | ||
| 45 | |||
| 46 | CARGO_RUST_TARGET_CCLD ??= "${RUST_TARGET_CCLD}" | ||
| 47 | cargo_common_do_configure () { | ||
| 48 | mkdir -p ${CARGO_HOME}/bitbake | ||
| 49 | |||
| 50 | cat <<- EOF > ${CARGO_HOME}/config.toml | ||
| 51 | # EXTRA_OECARGO_PATHS | ||
| 52 | paths = [ | ||
| 53 | $(for p in ${EXTRA_OECARGO_PATHS}; do echo \"$p\",; done) | ||
| 54 | ] | ||
| 55 | EOF | ||
| 56 | |||
| 57 | cat <<- EOF >> ${CARGO_HOME}/config.toml | ||
| 58 | |||
| 59 | # Local mirror vendored by bitbake | ||
| 60 | [source.bitbake] | ||
| 61 | directory = "${CARGO_VENDORING_DIRECTORY}" | ||
| 62 | EOF | ||
| 63 | |||
| 64 | if [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then | ||
| 65 | cat <<- EOF >> ${CARGO_HOME}/config.toml | ||
| 66 | |||
| 67 | [source.crates-io] | ||
| 68 | replace-with = "bitbake" | ||
| 69 | local-registry = "/nonexistent" | ||
| 70 | EOF | ||
| 71 | fi | ||
| 72 | |||
| 73 | cat <<- EOF >> ${CARGO_HOME}/config.toml | ||
| 74 | |||
| 75 | [http] | ||
| 76 | # Multiplexing can't be enabled because http2 can't be enabled | ||
| 77 | # in curl-native without dependency loops | ||
| 78 | multiplexing = false | ||
| 79 | |||
| 80 | # Ignore the hard coded and incorrect path to certificates | ||
| 81 | cainfo = "${STAGING_ETCDIR_NATIVE}/ssl/certs/ca-certificates.crt" | ||
| 82 | |||
| 83 | EOF | ||
| 84 | |||
| 85 | cat <<- EOF >> ${CARGO_HOME}/config.toml | ||
| 86 | |||
| 87 | # HOST_SYS | ||
| 88 | [target.${RUST_HOST_SYS}] | ||
| 89 | linker = "${CARGO_RUST_TARGET_CCLD}" | ||
| 90 | EOF | ||
| 91 | |||
| 92 | if [ "${RUST_HOST_SYS}" != "${RUST_BUILD_SYS}" ]; then | ||
| 93 | cat <<- EOF >> ${CARGO_HOME}/config.toml | ||
| 94 | |||
| 95 | # BUILD_SYS | ||
| 96 | [target.${RUST_BUILD_SYS}] | ||
| 97 | linker = "${RUST_BUILD_CCLD}" | ||
| 98 | EOF | ||
| 99 | fi | ||
| 100 | |||
| 101 | if [ "${RUST_TARGET_SYS}" != "${RUST_BUILD_SYS}" -a "${RUST_TARGET_SYS}" != "${RUST_HOST_SYS}" ]; then | ||
| 102 | cat <<- EOF >> ${CARGO_HOME}/config.toml | ||
| 103 | |||
| 104 | # TARGET_SYS | ||
| 105 | [target.${RUST_TARGET_SYS}] | ||
| 106 | linker = "${RUST_TARGET_CCLD}" | ||
| 107 | EOF | ||
| 108 | fi | ||
| 109 | |||
| 110 | # Put build output in build directory preferred by bitbake instead of | ||
| 111 | # inside source directory unless they are the same | ||
| 112 | if [ "${B}" != "${S}" ]; then | ||
| 113 | # We should consider mandating out-of-tree builds and just using [cleandirs] | ||
| 114 | rm -rf ${B}/target | ||
| 115 | mkdir -p ${B} | ||
| 116 | |||
| 117 | cat <<- EOF >> ${CARGO_HOME}/config.toml | ||
| 118 | |||
| 119 | [build] | ||
| 120 | # Use out of tree build destination to avoid polluting the source tree | ||
| 121 | target-dir = "${B}/target" | ||
| 122 | EOF | ||
| 123 | fi | ||
| 124 | |||
| 125 | cat <<- EOF >> ${CARGO_HOME}/config.toml | ||
| 126 | |||
| 127 | [term] | ||
| 128 | progress.when = 'always' | ||
| 129 | progress.width = 80 | ||
| 130 | EOF | ||
| 131 | } | ||
| 132 | |||
| 133 | python cargo_common_do_patch_paths() { | ||
| 134 | import shutil | ||
| 135 | |||
| 136 | cargo_config = os.path.join(d.getVar("CARGO_HOME"), "config.toml") | ||
| 137 | if not os.path.exists(cargo_config): | ||
| 138 | return | ||
| 139 | |||
| 140 | src_uri = (d.getVar('SRC_URI') or "").split() | ||
| 141 | if len(src_uri) == 0: | ||
| 142 | return | ||
| 143 | |||
| 144 | patches = dict() | ||
| 145 | workdir = d.getVar('UNPACKDIR') | ||
| 146 | fetcher = bb.fetch2.Fetch(src_uri, d) | ||
| 147 | for url in fetcher.urls: | ||
| 148 | ud = fetcher.ud[url] | ||
| 149 | if ud.type == 'git' or ud.type == 'gitsm': | ||
| 150 | name = ud.parm.get('name') | ||
| 151 | destsuffix = ud.parm.get('destsuffix') | ||
| 152 | if name is not None and destsuffix is not None: | ||
| 153 | if ud.user: | ||
| 154 | repo = '%s://%s@%s%s' % (ud.proto, ud.user, ud.host, ud.path) | ||
| 155 | else: | ||
| 156 | repo = '%s://%s%s' % (ud.proto, ud.host, ud.path) | ||
| 157 | path = '%s = { path = "%s" }' % (name, os.path.join(workdir, destsuffix)) | ||
| 158 | patches.setdefault(repo, []).append(path) | ||
| 159 | |||
| 160 | with open(cargo_config, "a+") as config: | ||
| 161 | for k, v in patches.items(): | ||
| 162 | print('\n[patch."%s"]' % k, file=config) | ||
| 163 | for name in v: | ||
| 164 | print(name, file=config) | ||
| 165 | |||
| 166 | if not patches: | ||
| 167 | return | ||
| 168 | |||
| 169 | # Cargo.lock file is needed for to be sure that artifacts | ||
| 170 | # downloaded by the fetch steps are those expected by the | ||
| 171 | # project and that the possible patches are correctly applied. | ||
| 172 | # Moreover since we do not want any modification | ||
| 173 | # of this file (for reproducibility purpose), we prevent it by | ||
| 174 | # using --frozen flag (in CARGO_BUILD_FLAGS) and raise a clear error | ||
| 175 | # here is better than letting cargo tell (in case the file is missing) | ||
| 176 | # "Cargo.lock should be modified but --frozen was given" | ||
| 177 | |||
| 178 | lockfile = d.getVar("CARGO_LOCK_PATH") | ||
| 179 | if not os.path.exists(lockfile): | ||
| 180 | bb.fatal(f"{lockfile} file doesn't exist") | ||
| 181 | |||
| 182 | # There are patched files and so Cargo.lock should be modified but we use | ||
| 183 | # --frozen so let's handle that modifications here. | ||
| 184 | # | ||
| 185 | # Note that a "better" (more elegant ?) would have been to use cargo update for | ||
| 186 | # patched packages: | ||
| 187 | # cargo update --offline -p package_1 -p package_2 | ||
| 188 | # But this is not possible since it requires that cargo local git db | ||
| 189 | # to be populated and this is not the case as we fetch git repo ourself. | ||
| 190 | |||
| 191 | lockfile_orig = lockfile + ".orig" | ||
| 192 | if not os.path.exists(lockfile_orig): | ||
| 193 | shutil.copy(lockfile, lockfile_orig) | ||
| 194 | |||
| 195 | newlines = [] | ||
| 196 | with open(lockfile_orig, "r") as f: | ||
| 197 | for line in f.readlines(): | ||
| 198 | if not line.startswith("source = \"git"): | ||
| 199 | newlines.append(line) | ||
| 200 | |||
| 201 | with open(lockfile, "w") as f: | ||
| 202 | f.writelines(newlines) | ||
| 203 | } | ||
| 204 | do_configure[postfuncs] += "cargo_common_do_patch_paths" | ||
| 205 | |||
| 206 | do_compile:prepend () { | ||
| 207 | oe_cargo_fix_env | ||
| 208 | } | ||
| 209 | |||
| 210 | oe_cargo_fix_env () { | ||
| 211 | export CC="${RUST_TARGET_CC}" | ||
| 212 | export CXX="${RUST_TARGET_CXX}" | ||
| 213 | export CFLAGS="${CFLAGS}" | ||
| 214 | export CXXFLAGS="${CXXFLAGS}" | ||
| 215 | export AR="${AR}" | ||
| 216 | export TARGET_CC="${RUST_TARGET_CC}" | ||
| 217 | export TARGET_CXX="${RUST_TARGET_CXX}" | ||
| 218 | export TARGET_CFLAGS="${CFLAGS}" | ||
| 219 | export TARGET_CXXFLAGS="${CXXFLAGS}" | ||
| 220 | export TARGET_AR="${AR}" | ||
| 221 | export HOST_CC="${RUST_BUILD_CC}" | ||
| 222 | export HOST_CXX="${RUST_BUILD_CXX}" | ||
| 223 | export HOST_CFLAGS="${BUILD_CFLAGS}" | ||
| 224 | export HOST_CXXFLAGS="${BUILD_CXXFLAGS}" | ||
| 225 | export HOST_AR="${BUILD_AR}" | ||
| 226 | } | ||
| 227 | |||
| 228 | EXTRA_OECARGO_PATHS ??= "" | ||
| 229 | |||
| 230 | EXPORT_FUNCTIONS do_configure | ||
| 231 | |||
| 232 | # The culprit for this setting is the libc crate, | ||
| 233 | # which as of Jun 2023 calls directly into 32 bit time functions in glibc, | ||
| 234 | # bypassing all of glibc provisions to choose the right Y2038-safe functions. As | ||
| 235 | # rust components statically link with that crate, pretty much everything | ||
| 236 | # is affected, and so there's no point trying to have recipe-specific | ||
| 237 | # INSANE_SKIP entries. | ||
| 238 | # | ||
| 239 | # Upstream ticket and PR: | ||
| 240 | # https://github.com/rust-lang/libc/issues/3223 | ||
| 241 | # https://github.com/rust-lang/libc/pull/3175 | ||
| 242 | INSANE_SKIP:append = " 32bit-time" | ||
diff --git a/meta/classes-recipe/cmake-qemu.bbclass b/meta/classes-recipe/cmake-qemu.bbclass deleted file mode 100644 index 383fc74bf2..0000000000 --- a/meta/classes-recipe/cmake-qemu.bbclass +++ /dev/null | |||
| @@ -1,32 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | # Not all platforms are supported by Qemu. Using qemu-user therefore | ||
| 7 | # involves a certain risk, which is also the reason why this feature | ||
| 8 | # is not part of the main cmake class by default. | ||
| 9 | # | ||
| 10 | # One use case is the execution of cross-compiled unit tests with CTest | ||
| 11 | # on the build machine. If CMAKE_EXEWRAPPER_ENABLED is configured, | ||
| 12 | # cmake --build --target test | ||
| 13 | # works transparently with qemu-user. If the cmake project is developed | ||
| 14 | # with this use case in mind this works very nicely also out of an IDE | ||
| 15 | # configured to use cmake-native for cross compiling. | ||
| 16 | |||
| 17 | inherit qemu cmake | ||
| 18 | |||
| 19 | DEPENDS:append:class-target = "${@' qemu-native' if bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', True, False, d) else ''}" | ||
| 20 | |||
| 21 | cmake_do_generate_toolchain_file:append:class-target() { | ||
| 22 | if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true', 'false', d)}; then | ||
| 23 | # Write out a qemu wrapper that will be used as exe_wrapper so that cmake | ||
| 24 | # can run target helper binaries through that. This also allows to execute ctest. | ||
| 25 | qemu_binary="${@qemu_wrapper_cmdline(d, '${STAGING_DIR_HOST}', ['${STAGING_DIR_HOST}/${libdir}','${STAGING_DIR_HOST}/${base_libdir}'])}" | ||
| 26 | echo "#!/bin/sh" > "${WORKDIR}/cmake-qemuwrapper" | ||
| 27 | echo "$qemu_binary \"\$@\"" >> "${WORKDIR}/cmake-qemuwrapper" | ||
| 28 | chmod +x "${WORKDIR}/cmake-qemuwrapper" | ||
| 29 | echo "set( CMAKE_CROSSCOMPILING_EMULATOR ${WORKDIR}/cmake-qemuwrapper)" \ | ||
| 30 | >> ${WORKDIR}/toolchain.cmake | ||
| 31 | fi | ||
| 32 | } | ||
diff --git a/meta/classes-recipe/cmake.bbclass b/meta/classes-recipe/cmake.bbclass deleted file mode 100644 index 4f59966521..0000000000 --- a/meta/classes-recipe/cmake.bbclass +++ /dev/null | |||
| @@ -1,321 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Path to the CMake file to process. | ||
| 8 | OECMAKE_SOURCEPATH ??= "${S}" | ||
| 9 | |||
| 10 | DEPENDS:prepend = "cmake-native " | ||
| 11 | B = "${WORKDIR}/build" | ||
| 12 | |||
| 13 | # What CMake generator to use. | ||
| 14 | # The supported options are "Unix Makefiles" or "Ninja". | ||
| 15 | OECMAKE_GENERATOR ?= "Ninja" | ||
| 16 | |||
| 17 | python() { | ||
| 18 | generator = d.getVar("OECMAKE_GENERATOR") | ||
| 19 | if "Unix Makefiles" in generator: | ||
| 20 | args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=" + d.getVar("MAKE") | ||
| 21 | d.setVar("OECMAKE_GENERATOR_ARGS", args) | ||
| 22 | d.setVarFlag("do_compile", "progress", "percent") | ||
| 23 | elif "Ninja" in generator: | ||
| 24 | args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=ninja" | ||
| 25 | d.appendVar("DEPENDS", " ninja-native") | ||
| 26 | d.setVar("OECMAKE_GENERATOR_ARGS", args) | ||
| 27 | d.setVarFlag("do_compile", "progress", r"outof:^\[(\d+)/(\d+)\]\s+") | ||
| 28 | else: | ||
| 29 | bb.fatal("Unknown CMake Generator %s" % generator) | ||
| 30 | } | ||
| 31 | OECMAKE_AR ?= "${AR}" | ||
| 32 | |||
| 33 | # Compiler flags | ||
| 34 | OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}" | ||
| 35 | OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS}" | ||
| 36 | OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG" | ||
| 37 | OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG" | ||
| 38 | OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}" | ||
| 39 | OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}" | ||
| 40 | |||
| 41 | def oecmake_map_compiler(compiler, d): | ||
| 42 | args = d.getVar(compiler).split() | ||
| 43 | if args[0] == "ccache": | ||
| 44 | return args[1], args[0] | ||
| 45 | return args[0], "" | ||
| 46 | |||
| 47 | # C/C++ Compiler (without cpu arch/tune arguments) | ||
| 48 | OECMAKE_C_COMPILER ?= "${@oecmake_map_compiler('CC', d)[0]}" | ||
| 49 | OECMAKE_C_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CC', d)[1]}" | ||
| 50 | OECMAKE_CXX_COMPILER ?= "${@oecmake_map_compiler('CXX', d)[0]}" | ||
| 51 | OECMAKE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CXX', d)[1]}" | ||
| 52 | |||
| 53 | # Native C/C++ Compiler (without cpu arch/tune arguments) | ||
| 54 | OECMAKE_NATIVE_C_COMPILER ?= "${@oecmake_map_compiler('BUILD_CC', d)[0]}" | ||
| 55 | OECMAKE_NATIVE_C_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('BUILD_CC', d)[1]}" | ||
| 56 | OECMAKE_NATIVE_CXX_COMPILER ?= "${@oecmake_map_compiler('BUILD_CXX', d)[0]}" | ||
| 57 | OECMAKE_NATIVE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('BUILD_CXX', d)[1]}" | ||
| 58 | OECMAKE_NATIVE_AR ?= "${BUILD_AR}" | ||
| 59 | OECMAKE_NATIVE_RANLIB ?= "${@d.getVar('BUILD_RANLIB').split()[0]}" | ||
| 60 | OECMAKE_NATIVE_NM ?= "${BUILD_NM}" | ||
| 61 | |||
| 62 | # Native compiler flags | ||
| 63 | OECMAKE_NATIVE_C_FLAGS ?= "${BUILD_CC_ARCH} ${BUILD_CFLAGS}" | ||
| 64 | OECMAKE_NATIVE_CXX_FLAGS ?= "${BUILD_CC_ARCH} ${BUILD_CXXFLAGS}" | ||
| 65 | OECMAKE_NATIVE_C_FLAGS_RELEASE ?= "-DNDEBUG" | ||
| 66 | OECMAKE_NATIVE_CXX_FLAGS_RELEASE ?= "-DNDEBUG" | ||
| 67 | OECMAKE_NATIVE_C_LINK_FLAGS ?= "${BUILD_CC_ARCH} ${BUILD_CPPFLAGS} ${BUILD_LDFLAGS}" | ||
| 68 | OECMAKE_NATIVE_CXX_LINK_FLAGS ?= "${BUILD_CC_ARCH} ${BUILD_CXXFLAGS} ${BUILD_LDFLAGS}" | ||
| 69 | BUILD_CXXFLAGS += "${BUILD_CC_ARCH}" | ||
| 70 | BUILD_CFLAGS += "${BUILD_CC_ARCH}" | ||
| 71 | |||
| 72 | # clear compiler vars for allarch to avoid sig hash difference | ||
| 73 | OECMAKE_C_COMPILER:allarch = "" | ||
| 74 | OECMAKE_C_COMPILER_LAUNCHER:allarch = "" | ||
| 75 | OECMAKE_CXX_COMPILER:allarch = "" | ||
| 76 | OECMAKE_CXX_COMPILER_LAUNCHER:allarch = "" | ||
| 77 | OECMAKE_NATIVE_C_COMPILER:allarch = "" | ||
| 78 | OECMAKE_NATIVE_C_COMPILER_LAUNCHER:allarch = "" | ||
| 79 | OECMAKE_NATIVE_CXX_COMPILER:allarch = "" | ||
| 80 | OECMAKE_NATIVE_CXX_COMPILER_LAUNCHER:allarch = "" | ||
| 81 | |||
| 82 | OECMAKE_RPATH ?= "" | ||
| 83 | OECMAKE_PERLNATIVE_DIR ??= "" | ||
| 84 | OECMAKE_EXTRA_ROOT_PATH ?= "" | ||
| 85 | |||
| 86 | OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "ONLY" | ||
| 87 | |||
| 88 | EXTRA_OECMAKE:append = " ${PACKAGECONFIG_CONFARGS}" | ||
| 89 | |||
| 90 | export CMAKE_BUILD_PARALLEL_LEVEL | ||
| 91 | CMAKE_BUILD_PARALLEL_LEVEL:task-compile = "${@oe.utils.parallel_make(d, False)}" | ||
| 92 | CMAKE_BUILD_PARALLEL_LEVEL:task-install = "${@oe.utils.parallel_make(d, True)}" | ||
| 93 | CMAKE_BUILD_PARALLEL_LEVEL:task-compile-ptest-base = "${@oe.utils.parallel_make(d, False)}" | ||
| 94 | CMAKE_BUILD_PARALLEL_LEVEL:task-install-ptest-base = "${@oe.utils.parallel_make(d, True)}" | ||
| 95 | |||
| 96 | OECMAKE_TARGET_COMPILE ?= "all" | ||
| 97 | OECMAKE_TARGET_INSTALL ?= "install" | ||
| 98 | |||
| 99 | def map_host_os_to_system_name(host_os): | ||
| 100 | if host_os.startswith('darwin'): | ||
| 101 | return 'Darwin' | ||
| 102 | if host_os.startswith('mingw'): | ||
| 103 | return 'Windows' | ||
| 104 | if host_os.startswith('linux'): | ||
| 105 | return 'Linux' | ||
| 106 | return host_os | ||
| 107 | |||
| 108 | # CMake expects target architectures in the format of uname(2), | ||
| 109 | # which do not always match TARGET_ARCH, so all the necessary | ||
| 110 | # conversions should happen here. | ||
| 111 | def map_host_arch_to_uname_arch(host_arch): | ||
| 112 | if host_arch == "powerpc": | ||
| 113 | return "ppc" | ||
| 114 | if host_arch == "powerpc64le": | ||
| 115 | return "ppc64le" | ||
| 116 | if host_arch == "powerpc64": | ||
| 117 | return "ppc64" | ||
| 118 | return host_arch | ||
| 119 | |||
| 120 | |||
| 121 | cmake_do_generate_toolchain_file() { | ||
| 122 | if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then | ||
| 123 | cmake_crosscompiling="set( CMAKE_CROSSCOMPILING FALSE )" | ||
| 124 | else | ||
| 125 | cmake_crosscompiling="set( CMAKE_CROSSCOMPILING TRUE )" | ||
| 126 | cmake_sysroot="set( CMAKE_SYSROOT \"${RECIPE_SYSROOT}\" )" | ||
| 127 | fi | ||
| 128 | |||
| 129 | cat > ${WORKDIR}/toolchain.cmake <<EOF | ||
| 130 | # CMake system name must be something like "Linux". | ||
| 131 | # This is important for cross-compiling. | ||
| 132 | $cmake_crosscompiling | ||
| 133 | set( CMAKE_SYSTEM_NAME ${@map_host_os_to_system_name(d.getVar('HOST_OS'))} ) | ||
| 134 | set( CMAKE_SYSTEM_PROCESSOR ${@map_host_arch_to_uname_arch(d.getVar('HOST_ARCH'))} ) | ||
| 135 | set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} ) | ||
| 136 | set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} ) | ||
| 137 | set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} ) | ||
| 138 | set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} ) | ||
| 139 | set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} ) | ||
| 140 | find_program( CMAKE_AR ${OECMAKE_AR} DOC "Archiver" REQUIRED ) | ||
| 141 | |||
| 142 | set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" ) | ||
| 143 | set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" ) | ||
| 144 | set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" ) | ||
| 145 | set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional CFLAGS for release" ) | ||
| 146 | set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "Additional CXXFLAGS for release" ) | ||
| 147 | set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional ASM FLAGS for release" ) | ||
| 148 | set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" ) | ||
| 149 | set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" ) | ||
| 150 | |||
| 151 | # only search in the paths provided so cmake doesnt pick | ||
| 152 | # up libraries and tools from the native build machine | ||
| 153 | set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN} ${COREBASE}/scripts ${HOSTTOOLS_DIR} ) | ||
| 154 | set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY ) | ||
| 155 | set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ${OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM} ) | ||
| 156 | set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY ) | ||
| 157 | set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY ) | ||
| 158 | set( CMAKE_PROGRAM_PATH "/" ) | ||
| 159 | |||
| 160 | $cmake_sysroot | ||
| 161 | |||
| 162 | # Use qt.conf settings | ||
| 163 | set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf ) | ||
| 164 | |||
| 165 | # We need to set the rpath to the correct directory as cmake does not provide any | ||
| 166 | # directory as rpath by default | ||
| 167 | set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} ) | ||
| 168 | |||
| 169 | # Use RPATHs relative to build directory for reproducibility | ||
| 170 | set( CMAKE_BUILD_RPATH_USE_ORIGIN ON ) | ||
| 171 | |||
| 172 | # Use our cmake modules | ||
| 173 | list(APPEND CMAKE_MODULE_PATH "${STAGING_DATADIR}/cmake/Modules/") | ||
| 174 | |||
| 175 | # add for non /usr/lib libdir, e.g. /usr/lib64 | ||
| 176 | set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir}) | ||
| 177 | |||
| 178 | # add include dir to implicit includes in case it differs from /usr/include | ||
| 179 | list(APPEND CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES ${includedir}) | ||
| 180 | list(APPEND CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES ${includedir}) | ||
| 181 | |||
| 182 | EOF | ||
| 183 | cat > ${WORKDIR}/toolchain-native.cmake <<EOF | ||
| 184 | set( CMAKE_C_COMPILER ${OECMAKE_NATIVE_C_COMPILER} ) | ||
| 185 | set( CMAKE_CXX_COMPILER ${OECMAKE_NATIVE_CXX_COMPILER} ) | ||
| 186 | set( CMAKE_ASM_COMPILER ${OECMAKE_NATIVE_C_COMPILER} ) | ||
| 187 | set( CMAKE_AR ${OECMAKE_NATIVE_AR} CACHE FILEPATH "Archiver" ) | ||
| 188 | set( CMAKE_RANLIB ${OECMAKE_NATIVE_RANLIB} CACHE FILEPATH "Archive Indexer" ) | ||
| 189 | set( CMAKE_NM ${OECMAKE_NATIVE_NM} CACHE FILEPATH "Symbol Lister" ) | ||
| 190 | set( CMAKE_C_FLAGS "${OECMAKE_NATIVE_C_FLAGS}" CACHE STRING "CFLAGS" ) | ||
| 191 | set( CMAKE_CXX_FLAGS "${OECMAKE_NATIVE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" ) | ||
| 192 | set( CMAKE_ASM_FLAGS "${OECMAKE_NATIVE_C_FLAGS}" CACHE STRING "ASM FLAGS" ) | ||
| 193 | set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_NATIVE_C_FLAGS_RELEASE}" CACHE STRING "Additional CFLAGS for release" ) | ||
| 194 | set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_NATIVE_CXX_FLAGS_RELEASE}" CACHE STRING "Additional CXXFLAGS for release" ) | ||
| 195 | set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_NATIVE_C_FLAGS_RELEASE}" CACHE STRING "Additional ASM FLAGS for release" ) | ||
| 196 | set( CMAKE_C_LINK_FLAGS "${OECMAKE_NATIVE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" ) | ||
| 197 | set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_NATIVE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" ) | ||
| 198 | |||
| 199 | set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_NATIVE} ) | ||
| 200 | set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY ) | ||
| 201 | set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM BOTH ) | ||
| 202 | set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY ) | ||
| 203 | set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY ) | ||
| 204 | |||
| 205 | # Use native cmake modules | ||
| 206 | list(APPEND CMAKE_MODULE_PATH "${STAGING_DATADIR_NATIVE}/cmake/Modules/") | ||
| 207 | |||
| 208 | # add for non /usr/lib libdir, e.g. /usr/lib64 | ||
| 209 | set( CMAKE_LIBRARY_PATH ${STAGING_BASE_LIBDIR_NATIVE} ${STAGING_LIBDIR_NATIVE}) | ||
| 210 | |||
| 211 | # add include dir to implicit includes in case it differs from /usr/include | ||
| 212 | list(APPEND CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES ${STAGING_INCDIR_NATIVE}) | ||
| 213 | list(APPEND CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES ${STAGING_INCDIR_NATIVE}) | ||
| 214 | |||
| 215 | # The assignmens above override CFLAGS and CXXFLAGS from the environment but | ||
| 216 | # not LDFLAGS, which ends up in CMAKE_EXE_LINKER_FLAGS. This then means our | ||
| 217 | # native builds use target flags, and can fail. | ||
| 218 | # | ||
| 219 | # As there are a number of variables that are set from LDFLAGS, | ||
| 220 | # clear it at source. | ||
| 221 | # | ||
| 222 | # https://cmake.org/cmake/help/latest/envvar/LDFLAGS.html | ||
| 223 | unset(ENV{LDFLAGS}) | ||
| 224 | EOF | ||
| 225 | } | ||
| 226 | |||
| 227 | cmake_do_generate_toolchain_file:append:toolchain-clang() { | ||
| 228 | cat >> ${WORKDIR}/toolchain.cmake <<EOF | ||
| 229 | set( CMAKE_CLANG_TIDY ${HOST_PREFIX}clang-tidy ) | ||
| 230 | EOF | ||
| 231 | } | ||
| 232 | |||
| 233 | addtask generate_toolchain_file after do_patch before do_configure | ||
| 234 | |||
| 235 | CONFIGURE_FILES = "CMakeLists.txt *.cmake" | ||
| 236 | |||
| 237 | do_configure[cleandirs] = "${@d.getVar('B') if d.getVar('S') != d.getVar('B') else ''}" | ||
| 238 | |||
| 239 | OECMAKE_ARGS = "\ | ||
| 240 | -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \ | ||
| 241 | -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix') + '/')} \ | ||
| 242 | -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \ | ||
| 243 | -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \ | ||
| 244 | -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \ | ||
| 245 | -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d.getVar('prefix') + '/')} \ | ||
| 246 | -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \ | ||
| 247 | -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \ | ||
| 248 | -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \ | ||
| 249 | -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix') + '/')} \ | ||
| 250 | -DPYTHON_EXECUTABLE:PATH=${PYTHON} \ | ||
| 251 | -DPython_EXECUTABLE:PATH=${PYTHON} \ | ||
| 252 | -DPython3_EXECUTABLE:PATH=${PYTHON} \ | ||
| 253 | -DLIB_SUFFIX=${@d.getVar('baselib').replace('lib', '')} \ | ||
| 254 | -DCMAKE_INSTALL_SO_NO_EXE=0 \ | ||
| 255 | -DCMAKE_TOOLCHAIN_FILE:FILEPATH=${WORKDIR}/toolchain.cmake \ | ||
| 256 | -DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \ | ||
| 257 | -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON \ | ||
| 258 | -DFETCHCONTENT_FULLY_DISCONNECTED=ON \ | ||
| 259 | -DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=ON \ | ||
| 260 | " | ||
| 261 | |||
| 262 | cmake_do_configure() { | ||
| 263 | if [ "${OECMAKE_BUILDPATH}" ]; then | ||
| 264 | bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build." | ||
| 265 | fi | ||
| 266 | |||
| 267 | if [ "${S}" = "${B}" ]; then | ||
| 268 | find ${B} -name CMakeFiles -or -name Makefile -or -name cmake_install.cmake -or -name CMakeCache.txt -delete | ||
| 269 | fi | ||
| 270 | |||
| 271 | # Just like autotools cmake can use a site file to cache result that need generated binaries to run | ||
| 272 | if [ -e ${WORKDIR}/site-file.cmake ] ; then | ||
| 273 | oecmake_sitefile="-C ${WORKDIR}/site-file.cmake" | ||
| 274 | else | ||
| 275 | oecmake_sitefile= | ||
| 276 | fi | ||
| 277 | |||
| 278 | cmake \ | ||
| 279 | ${OECMAKE_GENERATOR_ARGS} \ | ||
| 280 | $oecmake_sitefile \ | ||
| 281 | ${OECMAKE_SOURCEPATH} \ | ||
| 282 | ${OECMAKE_ARGS} \ | ||
| 283 | ${EXTRA_OECMAKE} \ | ||
| 284 | -Wno-dev | ||
| 285 | } | ||
| 286 | |||
| 287 | # To disable verbose cmake logs for a given recipe or globally config metadata e.g. local.conf | ||
| 288 | # add following | ||
| 289 | # | ||
| 290 | # CMAKE_VERBOSE = "" | ||
| 291 | # | ||
| 292 | |||
| 293 | CMAKE_VERBOSE ??= "VERBOSE=1" | ||
| 294 | |||
| 295 | # Then run do_compile again | ||
| 296 | cmake_runcmake_build() { | ||
| 297 | bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD} | ||
| 298 | eval ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD} | ||
| 299 | } | ||
| 300 | |||
| 301 | # Install an already-generated project binary tree. Not checking the compile | ||
| 302 | # dependencies again is particularly important for SDK use cases. | ||
| 303 | cmake_runcmake_install() { | ||
| 304 | bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --install '${B}' | ||
| 305 | eval ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --install '${B}' | ||
| 306 | } | ||
| 307 | |||
| 308 | cmake_do_compile() { | ||
| 309 | cmake_runcmake_build --target ${OECMAKE_TARGET_COMPILE} | ||
| 310 | } | ||
| 311 | |||
| 312 | cmake_do_install() { | ||
| 313 | if [ "${OECMAKE_TARGET_INSTALL}" = "install" ]; then | ||
| 314 | DESTDIR='${D}' cmake_runcmake_install | ||
| 315 | else | ||
| 316 | # Legacy path which supports also custom install targets | ||
| 317 | DESTDIR='${D}' cmake_runcmake_build --target ${OECMAKE_TARGET_INSTALL} | ||
| 318 | fi | ||
| 319 | } | ||
| 320 | |||
| 321 | EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file | ||
diff --git a/meta/classes-recipe/cml1.bbclass b/meta/classes-recipe/cml1.bbclass deleted file mode 100644 index 3c2b4da4af..0000000000 --- a/meta/classes-recipe/cml1.bbclass +++ /dev/null | |||
| @@ -1,121 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # returns all the elements from the src uri that are .cfg files | ||
| 8 | def find_cfgs(d): | ||
| 9 | sources=src_patches(d, True) | ||
| 10 | sources_list=[] | ||
| 11 | for s in sources: | ||
| 12 | if s.endswith('.cfg'): | ||
| 13 | sources_list.append(s) | ||
| 14 | |||
| 15 | return sources_list | ||
| 16 | |||
| 17 | cml1_do_configure() { | ||
| 18 | set -e | ||
| 19 | unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS | ||
| 20 | yes '' | oe_runmake oldconfig | ||
| 21 | } | ||
| 22 | |||
| 23 | EXPORT_FUNCTIONS do_configure | ||
| 24 | |||
| 25 | inherit terminal | ||
| 26 | |||
| 27 | OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS TERMINFO CROSS_CURSES_LIB CROSS_CURSES_INC" | ||
| 28 | HOST_EXTRACFLAGS = "${BUILD_CFLAGS} ${BUILD_LDFLAGS}" | ||
| 29 | HOSTLDFLAGS = "${BUILD_LDFLAGS}" | ||
| 30 | CROSS_CURSES_LIB = "-lncurses -ltinfo" | ||
| 31 | CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"' | ||
| 32 | TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo" | ||
| 33 | |||
| 34 | KCONFIG_CONFIG_COMMAND ??= "menuconfig ${EXTRA_OEMAKE}" | ||
| 35 | KCONFIG_CONFIG_ENABLE_MENUCONFIG ??= "true" | ||
| 36 | KCONFIG_CONFIG_ROOTDIR ??= "${B}" | ||
| 37 | python do_menuconfig() { | ||
| 38 | import shutil | ||
| 39 | |||
| 40 | if not bb.utils.to_boolean(d.getVar("KCONFIG_CONFIG_ENABLE_MENUCONFIG")): | ||
| 41 | bb.fatal("do_menuconfig is disabled, please check KCONFIG_CONFIG_ENABLE_MENUCONFIG variable.") | ||
| 42 | return | ||
| 43 | |||
| 44 | config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config") | ||
| 45 | configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig") | ||
| 46 | |||
| 47 | try: | ||
| 48 | mtime = os.path.getmtime(config) | ||
| 49 | shutil.copy(config, configorig) | ||
| 50 | except OSError: | ||
| 51 | mtime = 0 | ||
| 52 | |||
| 53 | # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native) | ||
| 54 | d.setVar("PKG_CONFIG_DIR", "${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig") | ||
| 55 | d.setVar("PKG_CONFIG_PATH", "${PKG_CONFIG_DIR}:${STAGING_DATADIR_NATIVE}/pkgconfig") | ||
| 56 | d.setVar("PKG_CONFIG_LIBDIR", "${PKG_CONFIG_DIR}") | ||
| 57 | d.setVarFlag("PKG_CONFIG_SYSROOT_DIR", "unexport", "1") | ||
| 58 | # ensure that environment variables are overwritten with this tasks 'd' values | ||
| 59 | d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR") | ||
| 60 | |||
| 61 | oe_terminal("sh -c 'make %s; if [ $? -ne 0 ]; then echo \"Command failed.\"; printf \"Press any key to continue... \"; read r; fi'" % d.getVar('KCONFIG_CONFIG_COMMAND'), | ||
| 62 | d.getVar('PN') + ' Configuration', d) | ||
| 63 | |||
| 64 | try: | ||
| 65 | newmtime = os.path.getmtime(config) | ||
| 66 | except OSError: | ||
| 67 | newmtime = 0 | ||
| 68 | |||
| 69 | if newmtime > mtime: | ||
| 70 | bb.plain("Changed configuration saved at:\n %s\nRecompile will be forced" % config) | ||
| 71 | bb.build.write_taint('do_compile', d) | ||
| 72 | } | ||
| 73 | do_menuconfig[depends] += "ncurses-native:do_populate_sysroot" | ||
| 74 | do_menuconfig[nostamp] = "1" | ||
| 75 | do_menuconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}" | ||
| 76 | addtask menuconfig after do_configure | ||
| 77 | |||
| 78 | python do_diffconfig() { | ||
| 79 | import shutil | ||
| 80 | import subprocess | ||
| 81 | |||
| 82 | workdir = d.getVar('WORKDIR') | ||
| 83 | fragment = workdir + '/fragment.cfg' | ||
| 84 | configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig") | ||
| 85 | config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config") | ||
| 86 | |||
| 87 | try: | ||
| 88 | md5newconfig = bb.utils.md5_file(configorig) | ||
| 89 | md5config = bb.utils.md5_file(config) | ||
| 90 | isdiff = md5newconfig != md5config | ||
| 91 | except IOError as e: | ||
| 92 | bb.fatal("No config files found. Did you do menuconfig ?\n%s" % e) | ||
| 93 | |||
| 94 | if isdiff: | ||
| 95 | statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment | ||
| 96 | # No need to check the exit code as we know it's going to be | ||
| 97 | # non-zero, but that's what we expect. | ||
| 98 | subprocess.call(statement, shell=True) | ||
| 99 | |||
| 100 | bb.plain("Config fragment has been dumped into:\n %s" % fragment) | ||
| 101 | else: | ||
| 102 | if os.path.exists(fragment): | ||
| 103 | os.unlink(fragment) | ||
| 104 | } | ||
| 105 | |||
| 106 | do_diffconfig[nostamp] = "1" | ||
| 107 | do_diffconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}" | ||
| 108 | addtask diffconfig | ||
| 109 | |||
| 110 | do_showconfig() { | ||
| 111 | bbplain "Config file written to ${KCONFIG_CONFIG_ROOTDIR}/.config" | ||
| 112 | } | ||
| 113 | do_showconfig[nostamp] = "1" | ||
| 114 | addtask showconfig after do_configure | ||
| 115 | |||
| 116 | do_savedefconfig() { | ||
| 117 | bbplain "Saving defconfig to:\n${B}/defconfig" | ||
| 118 | oe_runmake -C ${B} savedefconfig | ||
| 119 | } | ||
| 120 | do_savedefconfig[nostamp] = "1" | ||
| 121 | addtask savedefconfig after do_configure | ||
diff --git a/meta/classes-recipe/compress_doc.bbclass b/meta/classes-recipe/compress_doc.bbclass deleted file mode 100644 index d603caf858..0000000000 --- a/meta/classes-recipe/compress_doc.bbclass +++ /dev/null | |||
| @@ -1,269 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Compress man pages in ${mandir} and info pages in ${infodir} | ||
| 8 | # | ||
| 9 | # 1. The doc will be compressed to gz format by default. | ||
| 10 | # | ||
| 11 | # 2. It will automatically correct the compressed doc which is not | ||
| 12 | # in ${DOC_COMPRESS} but in ${DOC_COMPRESS_LIST} to the format | ||
| 13 | # of ${DOC_COMPRESS} policy | ||
| 14 | # | ||
| 15 | # 3. It is easy to add a new type compression by editing | ||
| 16 | # local.conf, such as: | ||
| 17 | # DOC_COMPRESS_LIST:append = ' abc' | ||
| 18 | # DOC_COMPRESS = 'abc' | ||
| 19 | # DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***' | ||
| 20 | # DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***' | ||
| 21 | |||
| 22 | # All supported compression policy | ||
| 23 | DOC_COMPRESS_LIST ?= "gz xz bz2" | ||
| 24 | |||
| 25 | # Compression policy, must be one of ${DOC_COMPRESS_LIST} | ||
| 26 | DOC_COMPRESS ?= "gz" | ||
| 27 | |||
| 28 | # Compression shell command | ||
| 29 | DOC_COMPRESS_CMD[gz] ?= 'gzip -v -9 -n' | ||
| 30 | DOC_COMPRESS_CMD[bz2] ?= "bzip2 -v -9" | ||
| 31 | DOC_COMPRESS_CMD[xz] ?= "xz -v" | ||
| 32 | |||
| 33 | # Decompression shell command | ||
| 34 | DOC_DECOMPRESS_CMD[gz] ?= 'gunzip -v' | ||
| 35 | DOC_DECOMPRESS_CMD[bz2] ?= "bunzip2 -v" | ||
| 36 | DOC_DECOMPRESS_CMD[xz] ?= "unxz -v" | ||
| 37 | |||
| 38 | PACKAGE_PREPROCESS_FUNCS += "package_do_compress_doc compress_doc_updatealternatives" | ||
| 39 | python package_do_compress_doc() { | ||
| 40 | compress_mode = d.getVar('DOC_COMPRESS') | ||
| 41 | compress_list = (d.getVar('DOC_COMPRESS_LIST') or '').split() | ||
| 42 | if compress_mode not in compress_list: | ||
| 43 | bb.fatal('Compression policy %s not supported (not listed in %s)\n' % (compress_mode, compress_list)) | ||
| 44 | |||
| 45 | dvar = d.getVar('PKGD') | ||
| 46 | compress_cmds = {} | ||
| 47 | decompress_cmds = {} | ||
| 48 | for mode in compress_list: | ||
| 49 | compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode) | ||
| 50 | decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode) | ||
| 51 | |||
| 52 | mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir")) | ||
| 53 | if os.path.exists(mandir): | ||
| 54 | # Decompress doc files which format is not compress_mode | ||
| 55 | decompress_doc(mandir, compress_mode, decompress_cmds) | ||
| 56 | compress_doc(mandir, compress_mode, compress_cmds) | ||
| 57 | |||
| 58 | infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir")) | ||
| 59 | if os.path.exists(infodir): | ||
| 60 | # Decompress doc files which format is not compress_mode | ||
| 61 | decompress_doc(infodir, compress_mode, decompress_cmds) | ||
| 62 | compress_doc(infodir, compress_mode, compress_cmds) | ||
| 63 | } | ||
| 64 | |||
| 65 | def _get_compress_format(file, compress_format_list): | ||
| 66 | for compress_format in compress_format_list: | ||
| 67 | compress_suffix = '.' + compress_format | ||
| 68 | if file.endswith(compress_suffix): | ||
| 69 | return compress_format | ||
| 70 | |||
| 71 | return '' | ||
| 72 | |||
| 73 | # Collect hardlinks to dict, each element in dict lists hardlinks | ||
| 74 | # which points to the same doc file. | ||
| 75 | # {hardlink10: [hardlink11, hardlink12],,,} | ||
| 76 | # The hardlink10, hardlink11 and hardlink12 are the same file. | ||
| 77 | def _collect_hardlink(hardlink_dict, file): | ||
| 78 | for hardlink in hardlink_dict: | ||
| 79 | # Add to the existed hardlink | ||
| 80 | if os.path.samefile(hardlink, file): | ||
| 81 | hardlink_dict[hardlink].append(file) | ||
| 82 | return hardlink_dict | ||
| 83 | |||
| 84 | hardlink_dict[file] = [] | ||
| 85 | return hardlink_dict | ||
| 86 | |||
| 87 | def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False): | ||
| 88 | import subprocess | ||
| 89 | for target in hardlink_dict: | ||
| 90 | if decompress: | ||
| 91 | compress_format = _get_compress_format(target, shell_cmds.keys()) | ||
| 92 | cmd = "%s -f %s" % (shell_cmds[compress_format], target) | ||
| 93 | bb.note('decompress hardlink %s' % target) | ||
| 94 | else: | ||
| 95 | cmd = "%s -f %s" % (shell_cmds[compress_mode], target) | ||
| 96 | bb.note('compress hardlink %s' % target) | ||
| 97 | (retval, output) = subprocess.getstatusoutput(cmd) | ||
| 98 | if retval: | ||
| 99 | bb.warn("de/compress file failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")) | ||
| 100 | return | ||
| 101 | |||
| 102 | for hardlink_dup in hardlink_dict[target]: | ||
| 103 | if decompress: | ||
| 104 | # Remove compress suffix | ||
| 105 | compress_suffix = '.' + compress_format | ||
| 106 | new_hardlink = hardlink_dup[:-len(compress_suffix)] | ||
| 107 | new_target = target[:-len(compress_suffix)] | ||
| 108 | else: | ||
| 109 | # Append compress suffix | ||
| 110 | compress_suffix = '.' + compress_mode | ||
| 111 | new_hardlink = hardlink_dup + compress_suffix | ||
| 112 | new_target = target + compress_suffix | ||
| 113 | |||
| 114 | bb.note('hardlink %s-->%s' % (new_hardlink, new_target)) | ||
| 115 | if not os.path.exists(new_hardlink): | ||
| 116 | os.link(new_target, new_hardlink) | ||
| 117 | if os.path.exists(hardlink_dup): | ||
| 118 | os.unlink(hardlink_dup) | ||
| 119 | |||
| 120 | def _process_symlink(file, compress_format, decompress=False): | ||
| 121 | compress_suffix = '.' + compress_format | ||
| 122 | if decompress: | ||
| 123 | # Remove compress suffix | ||
| 124 | new_linkname = file[:-len(compress_suffix)] | ||
| 125 | new_source = os.readlink(file)[:-len(compress_suffix)] | ||
| 126 | else: | ||
| 127 | # Append compress suffix | ||
| 128 | new_linkname = file + compress_suffix | ||
| 129 | new_source = os.readlink(file) + compress_suffix | ||
| 130 | |||
| 131 | bb.note('symlink %s-->%s' % (new_linkname, new_source)) | ||
| 132 | if not os.path.exists(new_linkname): | ||
| 133 | os.symlink(new_source, new_linkname) | ||
| 134 | |||
| 135 | os.unlink(file) | ||
| 136 | |||
| 137 | def _is_info(file): | ||
| 138 | flags = '.info .info-'.split() | ||
| 139 | for flag in flags: | ||
| 140 | if flag in os.path.basename(file): | ||
| 141 | return True | ||
| 142 | |||
| 143 | return False | ||
| 144 | |||
| 145 | def _is_man(file): | ||
| 146 | import re | ||
| 147 | |||
| 148 | # It refers MANSECT-var in man(1.6g)'s man.config | ||
| 149 | # ".1:.1p:.8:.2:.3:.3p:.4:.5:.6:.7:.9:.0p:.tcl:.n:.l:.p:.o" | ||
| 150 | # Not start with '.', and contain the above colon-seperate element | ||
| 151 | p = re.compile(r'[^\.]+\.([1-9lnop]|0p|tcl)') | ||
| 152 | if p.search(file): | ||
| 153 | return True | ||
| 154 | |||
| 155 | return False | ||
| 156 | |||
| 157 | def _is_compress_doc(file, compress_format_list): | ||
| 158 | compress_format = _get_compress_format(file, compress_format_list) | ||
| 159 | compress_suffix = '.' + compress_format | ||
| 160 | if file.endswith(compress_suffix): | ||
| 161 | # Remove the compress suffix | ||
| 162 | uncompress_file = file[:-len(compress_suffix)] | ||
| 163 | if _is_info(uncompress_file) or _is_man(uncompress_file): | ||
| 164 | return True, compress_format | ||
| 165 | |||
| 166 | return False, '' | ||
| 167 | |||
| 168 | def compress_doc(topdir, compress_mode, compress_cmds): | ||
| 169 | import subprocess | ||
| 170 | hardlink_dict = {} | ||
| 171 | for root, dirs, files in os.walk(topdir): | ||
| 172 | for f in files: | ||
| 173 | file = os.path.join(root, f) | ||
| 174 | if os.path.isdir(file): | ||
| 175 | continue | ||
| 176 | |||
| 177 | if _is_info(file) or _is_man(file): | ||
| 178 | # Symlink | ||
| 179 | if os.path.islink(file): | ||
| 180 | _process_symlink(file, compress_mode) | ||
| 181 | # Hardlink | ||
| 182 | elif os.lstat(file).st_nlink > 1: | ||
| 183 | _collect_hardlink(hardlink_dict, file) | ||
| 184 | # Normal file | ||
| 185 | elif os.path.isfile(file): | ||
| 186 | cmd = "%s %s" % (compress_cmds[compress_mode], file) | ||
| 187 | (retval, output) = subprocess.getstatusoutput(cmd) | ||
| 188 | if retval: | ||
| 189 | bb.warn("compress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")) | ||
| 190 | continue | ||
| 191 | bb.note('compress file %s' % file) | ||
| 192 | |||
| 193 | _process_hardlink(hardlink_dict, compress_mode, compress_cmds) | ||
| 194 | |||
| 195 | # Decompress doc files which format is not compress_mode | ||
| 196 | def decompress_doc(topdir, compress_mode, decompress_cmds): | ||
| 197 | import subprocess | ||
| 198 | hardlink_dict = {} | ||
| 199 | decompress = True | ||
| 200 | for root, dirs, files in os.walk(topdir): | ||
| 201 | for f in files: | ||
| 202 | file = os.path.join(root, f) | ||
| 203 | if os.path.isdir(file): | ||
| 204 | continue | ||
| 205 | |||
| 206 | res, compress_format = _is_compress_doc(file, decompress_cmds.keys()) | ||
| 207 | # Decompress files which format is not compress_mode | ||
| 208 | if res and compress_mode!=compress_format: | ||
| 209 | # Symlink | ||
| 210 | if os.path.islink(file): | ||
| 211 | _process_symlink(file, compress_format, decompress) | ||
| 212 | # Hardlink | ||
| 213 | elif os.lstat(file).st_nlink > 1: | ||
| 214 | _collect_hardlink(hardlink_dict, file) | ||
| 215 | # Normal file | ||
| 216 | elif os.path.isfile(file): | ||
| 217 | cmd = "%s %s" % (decompress_cmds[compress_format], file) | ||
| 218 | (retval, output) = subprocess.getstatusoutput(cmd) | ||
| 219 | if retval: | ||
| 220 | bb.warn("decompress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")) | ||
| 221 | continue | ||
| 222 | bb.note('decompress file %s' % file) | ||
| 223 | |||
| 224 | _process_hardlink(hardlink_dict, compress_mode, decompress_cmds, decompress) | ||
| 225 | |||
| 226 | python compress_doc_updatealternatives () { | ||
| 227 | if not bb.data.inherits_class('update-alternatives', d): | ||
| 228 | return | ||
| 229 | |||
| 230 | mandir = d.getVar("mandir") | ||
| 231 | infodir = d.getVar("infodir") | ||
| 232 | compress_mode = d.getVar('DOC_COMPRESS') | ||
| 233 | for pkg in (d.getVar('PACKAGES') or "").split(): | ||
| 234 | old_names = (d.getVar('ALTERNATIVE:%s' % pkg) or "").split() | ||
| 235 | new_names = [] | ||
| 236 | for old_name in old_names: | ||
| 237 | old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name) | ||
| 238 | old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name) or \ | ||
| 239 | d.getVarFlag('ALTERNATIVE_TARGET', old_name) or \ | ||
| 240 | d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \ | ||
| 241 | d.getVar('ALTERNATIVE_TARGET') or \ | ||
| 242 | old_link | ||
| 243 | # Sometimes old_target is specified as relative to the link name. | ||
| 244 | old_target = os.path.join(os.path.dirname(old_link), old_target) | ||
| 245 | |||
| 246 | # The updatealternatives used for compress doc | ||
| 247 | if mandir in old_target or infodir in old_target: | ||
| 248 | new_name = old_name + '.' + compress_mode | ||
| 249 | new_link = old_link + '.' + compress_mode | ||
| 250 | new_target = old_target + '.' + compress_mode | ||
| 251 | d.delVarFlag('ALTERNATIVE_LINK_NAME', old_name) | ||
| 252 | d.setVarFlag('ALTERNATIVE_LINK_NAME', new_name, new_link) | ||
| 253 | if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name): | ||
| 254 | d.delVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name) | ||
| 255 | d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, new_name, new_target) | ||
| 256 | elif d.getVarFlag('ALTERNATIVE_TARGET', old_name): | ||
| 257 | d.delVarFlag('ALTERNATIVE_TARGET', old_name) | ||
| 258 | d.setVarFlag('ALTERNATIVE_TARGET', new_name, new_target) | ||
| 259 | elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg): | ||
| 260 | d.setVar('ALTERNATIVE_TARGET_%s' % pkg, new_target) | ||
| 261 | elif d.getVar('ALTERNATIVE_TARGET'): | ||
| 262 | d.setVar('ALTERNATIVE_TARGET', new_target) | ||
| 263 | |||
| 264 | new_names.append(new_name) | ||
| 265 | |||
| 266 | if new_names: | ||
| 267 | d.setVar('ALTERNATIVE:%s' % pkg, ' '.join(new_names)) | ||
| 268 | } | ||
| 269 | |||
diff --git a/meta/classes-recipe/core-image.bbclass b/meta/classes-recipe/core-image.bbclass deleted file mode 100644 index 994185ed4c..0000000000 --- a/meta/classes-recipe/core-image.bbclass +++ /dev/null | |||
| @@ -1,95 +0,0 @@ | |||
| 1 | # Common code for generating core reference images | ||
| 2 | # | ||
| 3 | # Copyright (C) 2007-2011 Linux Foundation | ||
| 4 | # | ||
| 5 | # SPDX-License-Identifier: MIT | ||
| 6 | |||
| 7 | # IMAGE_FEATURES control the content of the core reference images | ||
| 8 | # | ||
| 9 | # By default we install packagegroup-core-boot and packagegroup-base-extended packages; | ||
| 10 | # this gives us a working (console only) rootfs. | ||
| 11 | # | ||
| 12 | # Available IMAGE_FEATURES: | ||
| 13 | # | ||
| 14 | # These features install additional packages into the rootfs: | ||
| 15 | # - eclipse-debug - Eclipse remote debugging support | ||
| 16 | # - hwcodecs - hardware acceleration codecs (specified in MACHINE_HWCODECS) | ||
| 17 | # - nfs-client - NFS client | ||
| 18 | # - nfs-server - NFS server | ||
| 19 | # - package-management - installs package management tools and preserves the package manager database | ||
| 20 | # - splash - bootup splash screen | ||
| 21 | # - ssh-server-dropbear - SSH server (dropbear) | ||
| 22 | # - ssh-server-openssh - SSH server (openssh) | ||
| 23 | # - tools-debug - debugging tools | ||
| 24 | # - tools-profile - profiling tools | ||
| 25 | # - tools-sdk - SDK (C/C++ compiler, autotools, etc.) | ||
| 26 | # - tools-testapps - tools usable to make some device tests | ||
| 27 | # - weston - Weston Wayland compositor | ||
| 28 | # - x11 - X server | ||
| 29 | # - x11-base - X server with minimal environment | ||
| 30 | # - x11-sato - OpenedHand Sato environment | ||
| 31 | # | ||
| 32 | # These features install complementary packages for all installed packages in the rootfs: | ||
| 33 | # - dbg-pkgs - debug symbol packages | ||
| 34 | # - dev-pkgs - development packages (headers, etc.) | ||
| 35 | # - doc-pkgs - documentation packages | ||
| 36 | # - lic-pkgs - license packages, requires LICENSE_CREATE_PACKAGE="1" to be set when building packages too | ||
| 37 | # - ptest-pkgs - ptest packages for all ptest-enabled recipes | ||
| 38 | # | ||
| 39 | # These features install complementary development packages: | ||
| 40 | # - bash-completion-pkgs - bash-completion packages for recipes using bash-completion bbclass | ||
| 41 | # - zsh-completion-pkgs - zsh-completion packages | ||
| 42 | # | ||
| 43 | # These features tweak the behavior of the rootfs: | ||
| 44 | # - overlayfs-etc - sets up /etc in overlayfs | ||
| 45 | # - read-only-rootfs - tweaks an image to support a read-only rootfs | ||
| 46 | # - read-only-rootfs-delayed-postinsts - supports post-install scripts with read-only-rootfs | ||
| 47 | # - stateless-rootfs - systemctl-native is not run, image is populated by systemd at runtime | ||
| 48 | # | ||
| 49 | # These features are for development purposes (some were previously part of the debug-tweaks feature): | ||
| 50 | # - allow-empty-password - users can have an empty password (debug-tweaks) | ||
| 51 | # - allow-root-login - the root user can login (debug-tweaks) | ||
| 52 | # - empty-root-password - the root user has no password set (debug-tweaks) | ||
| 53 | # - post-install-logging - log the output of postinstall scriptlets (debug-tweaks) | ||
| 54 | # - serial-autologin-root - with 'empty-root-password': autologin 'root' on the serial console | ||
| 55 | # | ||
| 56 | FEATURE_PACKAGES_eclipse-debug = "packagegroup-core-eclipse-debug" | ||
| 57 | FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}" | ||
| 58 | FEATURE_PACKAGES_nfs-client = "packagegroup-core-nfs-client" | ||
| 59 | FEATURE_PACKAGES_nfs-server = "packagegroup-core-nfs-server" | ||
| 60 | FEATURE_PACKAGES_ssh-server-dropbear = "packagegroup-core-ssh-dropbear" | ||
| 61 | FEATURE_PACKAGES_ssh-server-openssh = "packagegroup-core-ssh-openssh" | ||
| 62 | FEATURE_PACKAGES_tools-debug = "packagegroup-core-tools-debug" | ||
| 63 | FEATURE_PACKAGES_tools-profile = "packagegroup-core-tools-profile" | ||
| 64 | FEATURE_PACKAGES_tools-sdk = "packagegroup-core-sdk packagegroup-core-standalone-sdk-target" | ||
| 65 | FEATURE_PACKAGES_tools-testapps = "packagegroup-core-tools-testapps" | ||
| 66 | FEATURE_PACKAGES_weston = "packagegroup-core-weston" | ||
| 67 | FEATURE_PACKAGES_x11 = "packagegroup-core-x11" | ||
| 68 | FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base" | ||
| 69 | FEATURE_PACKAGES_x11-sato = "packagegroup-core-x11-sato" | ||
| 70 | |||
| 71 | # IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2' | ||
| 72 | # Including image feature foo would replace the image features bar1 and bar2 | ||
| 73 | IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear" | ||
| 74 | # Do not install openssh complementary packages if either packagegroup-core-ssh-dropbear or dropbear | ||
| 75 | # is installed # to avoid openssh-dropbear conflict | ||
| 76 | # see [Yocto #14858] for more information | ||
| 77 | PACKAGE_EXCLUDE_COMPLEMENTARY:append = "${@bb.utils.contains_any('PACKAGE_INSTALL', 'packagegroup-core-ssh-dropbear dropbear', ' openssh', '' , d)}" | ||
| 78 | |||
| 79 | # IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2' | ||
| 80 | # An error exception would be raised if both image features foo and bar1(or bar2) are included | ||
| 81 | |||
| 82 | MACHINE_HWCODECS ??= "" | ||
| 83 | |||
| 84 | CORE_IMAGE_BASE_INSTALL = '\ | ||
| 85 | packagegroup-core-boot \ | ||
| 86 | packagegroup-base-extended \ | ||
| 87 | \ | ||
| 88 | ${CORE_IMAGE_EXTRA_INSTALL} \ | ||
| 89 | ' | ||
| 90 | |||
| 91 | CORE_IMAGE_EXTRA_INSTALL ?= "" | ||
| 92 | |||
| 93 | IMAGE_INSTALL ?= "${CORE_IMAGE_BASE_INSTALL}" | ||
| 94 | |||
| 95 | inherit image | ||
diff --git a/meta/classes-recipe/cpan-base.bbclass b/meta/classes-recipe/cpan-base.bbclass deleted file mode 100644 index 1db0a4ded6..0000000000 --- a/meta/classes-recipe/cpan-base.bbclass +++ /dev/null | |||
| @@ -1,33 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # cpan-base providers various perl related information needed for building | ||
| 9 | # cpan modules | ||
| 10 | # | ||
| 11 | FILES:${PN} += "${libdir}/perl5 ${datadir}/perl5" | ||
| 12 | |||
| 13 | DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}" | ||
| 14 | RDEPENDS:${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}" | ||
| 15 | |||
| 16 | inherit perl-version | ||
| 17 | |||
| 18 | def is_target(d): | ||
| 19 | if not bb.data.inherits_class('native', d): | ||
| 20 | return "yes" | ||
| 21 | return "no" | ||
| 22 | |||
| 23 | PERLLIBDIRS = "${libdir}/perl5" | ||
| 24 | PERLLIBDIRS:class-native = "${libdir}/perl5" | ||
| 25 | |||
| 26 | def cpan_upstream_check_pattern(d): | ||
| 27 | for x in (d.getVar('SRC_URI') or '').split(' '): | ||
| 28 | if x.startswith("https://cpan.metacpan.org"): | ||
| 29 | _pattern = x.split('/')[-1].replace(d.getVar('PV'), r'(?P<pver>\d+.\d+)') | ||
| 30 | return _pattern | ||
| 31 | return '' | ||
| 32 | |||
| 33 | UPSTREAM_CHECK_REGEX ?= "${@cpan_upstream_check_pattern(d)}" | ||
diff --git a/meta/classes-recipe/cpan.bbclass b/meta/classes-recipe/cpan.bbclass deleted file mode 100644 index bb76a5b326..0000000000 --- a/meta/classes-recipe/cpan.bbclass +++ /dev/null | |||
| @@ -1,71 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # This is for perl modules that use the old Makefile.PL build system | ||
| 9 | # | ||
| 10 | inherit cpan-base perlnative | ||
| 11 | |||
| 12 | EXTRA_CPANFLAGS ?= "" | ||
| 13 | EXTRA_PERLFLAGS ?= "" | ||
| 14 | |||
| 15 | # Env var which tells perl if it should use host (no) or target (yes) settings | ||
| 16 | export PERLCONFIGTARGET = "${@is_target(d)}" | ||
| 17 | |||
| 18 | # Env var which tells perl where the perl include files are | ||
| 19 | export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}/CORE" | ||
| 20 | export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}" | ||
| 21 | export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}" | ||
| 22 | export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/" | ||
| 23 | export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/" | ||
| 24 | |||
| 25 | cpan_do_configure () { | ||
| 26 | yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor NO_PERLLOCAL=1 NO_PACKLIST=1 PERL=$(which perl) ${EXTRA_CPANFLAGS} | ||
| 27 | |||
| 28 | # Makefile.PLs can exit with success without generating a | ||
| 29 | # Makefile, e.g. in cases of missing configure time | ||
| 30 | # dependencies. This is considered a best practice by | ||
| 31 | # cpantesters.org. See: | ||
| 32 | # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes | ||
| 33 | # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html | ||
| 34 | [ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL" | ||
| 35 | |||
| 36 | if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then | ||
| 37 | . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh | ||
| 38 | # Use find since there can be a Makefile generated for each Makefile.PL | ||
| 39 | for f in `find -name Makefile.PL`; do | ||
| 40 | f2=`echo $f | sed -e 's/.PL//'` | ||
| 41 | test -f $f2 || continue | ||
| 42 | sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \ | ||
| 43 | -e 's/perl.real/perl/' \ | ||
| 44 | -e "s|^\(CCFLAGS =.*\)|\1 ${CFLAGS}|" \ | ||
| 45 | $f2 | ||
| 46 | done | ||
| 47 | fi | ||
| 48 | } | ||
| 49 | |||
| 50 | do_configure:append:class-target() { | ||
| 51 | find . -name Makefile | xargs sed -E -i \ | ||
| 52 | -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g' | ||
| 53 | } | ||
| 54 | |||
| 55 | do_configure:append:class-nativesdk() { | ||
| 56 | find . -name Makefile | xargs sed -E -i \ | ||
| 57 | -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g' | ||
| 58 | } | ||
| 59 | |||
| 60 | cpan_do_compile () { | ||
| 61 | oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}" | ||
| 62 | } | ||
| 63 | |||
| 64 | cpan_do_install () { | ||
| 65 | oe_runmake DESTDIR="${D}" install_vendor | ||
| 66 | for PERLSCRIPT in `grep -rIEl '#! *${bindir}/perl-native.*/perl' ${D}`; do | ||
| 67 | sed -i -e 's|${bindir}/perl-native.*/perl|/usr/bin/env nativeperl|' $PERLSCRIPT | ||
| 68 | done | ||
| 69 | } | ||
| 70 | |||
| 71 | EXPORT_FUNCTIONS do_configure do_compile do_install | ||
diff --git a/meta/classes-recipe/cpan_build.bbclass b/meta/classes-recipe/cpan_build.bbclass deleted file mode 100644 index 026859b6c7..0000000000 --- a/meta/classes-recipe/cpan_build.bbclass +++ /dev/null | |||
| @@ -1,47 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # This is for perl modules that use the new Build.PL build system | ||
| 9 | # | ||
| 10 | inherit cpan-base perlnative | ||
| 11 | |||
| 12 | EXTRA_CPAN_BUILD_FLAGS ?= "" | ||
| 13 | |||
| 14 | # Env var which tells perl if it should use host (no) or target (yes) settings | ||
| 15 | export PERLCONFIGTARGET = "${@is_target(d)}" | ||
| 16 | export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}" | ||
| 17 | export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/" | ||
| 18 | export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/" | ||
| 19 | export LD = "${CCLD}" | ||
| 20 | |||
| 21 | cpan_build_do_configure () { | ||
| 22 | if [ "${@is_target(d)}" = "yes" ]; then | ||
| 23 | # build for target | ||
| 24 | . ${STAGING_LIBDIR}/perl5/config.sh | ||
| 25 | fi | ||
| 26 | |||
| 27 | perl Build.PL --installdirs vendor --destdir ${D} \ | ||
| 28 | ${EXTRA_CPAN_BUILD_FLAGS} | ||
| 29 | |||
| 30 | # Build.PLs can exit with success without generating a | ||
| 31 | # Build, e.g. in cases of missing configure time | ||
| 32 | # dependencies. This is considered a best practice by | ||
| 33 | # cpantesters.org. See: | ||
| 34 | # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes | ||
| 35 | # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html | ||
| 36 | [ -e Build ] || bbfatal "No Build was generated by Build.PL" | ||
| 37 | } | ||
| 38 | |||
| 39 | cpan_build_do_compile () { | ||
| 40 | perl Build --perl "${bindir}/perl" verbose=1 | ||
| 41 | } | ||
| 42 | |||
| 43 | cpan_build_do_install () { | ||
| 44 | perl Build install --destdir ${D} | ||
| 45 | } | ||
| 46 | |||
| 47 | EXPORT_FUNCTIONS do_configure do_compile do_install | ||
diff --git a/meta/classes-recipe/create-spdx-image-3.0.bbclass b/meta/classes-recipe/create-spdx-image-3.0.bbclass deleted file mode 100644 index 636ab14eb0..0000000000 --- a/meta/classes-recipe/create-spdx-image-3.0.bbclass +++ /dev/null | |||
| @@ -1,85 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | # SPDX image tasks | ||
| 7 | |||
| 8 | SPDX_ROOTFS_PACKAGES = "${SPDXDIR}/rootfs-packages.json" | ||
| 9 | SPDXIMAGEDEPLOYDIR = "${SPDXDIR}/image-deploy" | ||
| 10 | SPDXROOTFSDEPLOY = "${SPDXDIR}/rootfs-deploy" | ||
| 11 | |||
| 12 | python spdx_collect_rootfs_packages() { | ||
| 13 | import json | ||
| 14 | from pathlib import Path | ||
| 15 | from oe.rootfs import image_list_installed_packages | ||
| 16 | |||
| 17 | root_packages_file = Path(d.getVar("SPDX_ROOTFS_PACKAGES")) | ||
| 18 | |||
| 19 | packages = image_list_installed_packages(d) | ||
| 20 | if not packages: | ||
| 21 | packages = {} | ||
| 22 | |||
| 23 | root_packages_file.parent.mkdir(parents=True, exist_ok=True) | ||
| 24 | with root_packages_file.open("w") as f: | ||
| 25 | json.dump(packages, f) | ||
| 26 | } | ||
| 27 | ROOTFS_POSTUNINSTALL_COMMAND =+ "spdx_collect_rootfs_packages" | ||
| 28 | |||
| 29 | python do_create_rootfs_spdx() { | ||
| 30 | import oe.spdx30_tasks | ||
| 31 | oe.spdx30_tasks.create_rootfs_spdx(d) | ||
| 32 | } | ||
| 33 | addtask do_create_rootfs_spdx after do_rootfs before do_image | ||
| 34 | SSTATETASKS += "do_create_rootfs_spdx" | ||
| 35 | do_create_rootfs_spdx[sstate-inputdirs] = "${SPDXROOTFSDEPLOY}" | ||
| 36 | do_create_rootfs_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}" | ||
| 37 | do_create_rootfs_spdx[recrdeptask] += "do_create_spdx do_create_package_spdx" | ||
| 38 | do_create_rootfs_spdx[cleandirs] += "${SPDXROOTFSDEPLOY}" | ||
| 39 | do_create_rootfs_spdx[file-checksums] += "${SPDX3_DEP_FILES}" | ||
| 40 | |||
| 41 | python do_create_rootfs_spdx_setscene() { | ||
| 42 | sstate_setscene(d) | ||
| 43 | } | ||
| 44 | addtask do_create_rootfs_spdx_setscene | ||
| 45 | |||
| 46 | python do_create_image_spdx() { | ||
| 47 | import oe.spdx30_tasks | ||
| 48 | oe.spdx30_tasks.create_image_spdx(d) | ||
| 49 | } | ||
| 50 | addtask do_create_image_spdx after do_image_complete do_create_rootfs_spdx before do_build | ||
| 51 | SSTATETASKS += "do_create_image_spdx" | ||
| 52 | SSTATE_SKIP_CREATION:task-create-image-spdx = "1" | ||
| 53 | do_create_image_spdx[sstate-inputdirs] = "${SPDXIMAGEWORK}" | ||
| 54 | do_create_image_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}" | ||
| 55 | do_create_image_spdx[cleandirs] = "${SPDXIMAGEWORK}" | ||
| 56 | do_create_image_spdx[dirs] = "${SPDXIMAGEWORK}" | ||
| 57 | do_create_image_spdx[file-checksums] += "${SPDX3_DEP_FILES}" | ||
| 58 | do_create_image_spdx[vardeps] += "\ | ||
| 59 | SPDX_IMAGE_PURPOSE \ | ||
| 60 | " | ||
| 61 | |||
| 62 | python do_create_image_spdx_setscene() { | ||
| 63 | sstate_setscene(d) | ||
| 64 | } | ||
| 65 | addtask do_create_image_spdx_setscene | ||
| 66 | |||
| 67 | |||
| 68 | python do_create_image_sbom_spdx() { | ||
| 69 | import oe.spdx30_tasks | ||
| 70 | oe.spdx30_tasks.create_image_sbom_spdx(d) | ||
| 71 | } | ||
| 72 | addtask do_create_image_sbom_spdx after do_create_rootfs_spdx do_create_image_spdx before do_build | ||
| 73 | SSTATETASKS += "do_create_image_sbom_spdx" | ||
| 74 | SSTATE_SKIP_CREATION:task-create-image-sbom = "1" | ||
| 75 | do_create_image_sbom_spdx[sstate-inputdirs] = "${SPDXIMAGEDEPLOYDIR}" | ||
| 76 | do_create_image_sbom_spdx[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}" | ||
| 77 | do_create_image_sbom_spdx[stamp-extra-info] = "${MACHINE_ARCH}" | ||
| 78 | do_create_image_sbom_spdx[cleandirs] = "${SPDXIMAGEDEPLOYDIR}" | ||
| 79 | do_create_image_sbom_spdx[recrdeptask] += "do_create_spdx do_create_package_spdx" | ||
| 80 | do_create_image_sbom_spdx[file-checksums] += "${SPDX3_DEP_FILES}" | ||
| 81 | |||
| 82 | python do_create_image_sbom_spdx_setscene() { | ||
| 83 | sstate_setscene(d) | ||
| 84 | } | ||
| 85 | addtask do_create_image_sbom_spdx_setscene | ||
diff --git a/meta/classes-recipe/create-spdx-sdk-3.0.bbclass b/meta/classes-recipe/create-spdx-sdk-3.0.bbclass deleted file mode 100644 index e5f220cdfa..0000000000 --- a/meta/classes-recipe/create-spdx-sdk-3.0.bbclass +++ /dev/null | |||
| @@ -1,74 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 5 | # | ||
| 6 | # SPDX SDK tasks | ||
| 7 | |||
| 8 | do_populate_sdk[recrdeptask] += "do_create_spdx do_create_package_spdx" | ||
| 9 | do_populate_sdk[cleandirs] += "${SPDXSDKWORK}" | ||
| 10 | do_populate_sdk[postfuncs] += "sdk_create_sbom" | ||
| 11 | do_populate_sdk[file-checksums] += "${SPDX3_DEP_FILES}" | ||
| 12 | POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " sdk_host_create_spdx" | ||
| 13 | POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " sdk_target_create_spdx" | ||
| 14 | |||
| 15 | do_populate_sdk_ext[recrdeptask] += "do_create_spdx do_create_package_spdx" | ||
| 16 | do_populate_sdk_ext[cleandirs] += "${SPDXSDKEXTWORK}" | ||
| 17 | do_populate_sdk_ext[postfuncs] += "sdk_ext_create_sbom" | ||
| 18 | do_populate_sdk_ext[file-checksums] += "${SPDX3_DEP_FILES}" | ||
| 19 | POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk-ext = " sdk_ext_host_create_spdx" | ||
| 20 | POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk-ext = " sdk_ext_target_create_spdx" | ||
| 21 | |||
| 22 | python sdk_host_create_spdx() { | ||
| 23 | from pathlib import Path | ||
| 24 | import oe.spdx30_tasks | ||
| 25 | spdx_work_dir = Path(d.getVar('SPDXSDKWORK')) | ||
| 26 | |||
| 27 | oe.spdx30_tasks.sdk_create_spdx(d, "host", spdx_work_dir, d.getVar("TOOLCHAIN_OUTPUTNAME")) | ||
| 28 | } | ||
| 29 | |||
| 30 | python sdk_target_create_spdx() { | ||
| 31 | from pathlib import Path | ||
| 32 | import oe.spdx30_tasks | ||
| 33 | spdx_work_dir = Path(d.getVar('SPDXSDKWORK')) | ||
| 34 | |||
| 35 | oe.spdx30_tasks.sdk_create_spdx(d, "target", spdx_work_dir, d.getVar("TOOLCHAIN_OUTPUTNAME")) | ||
| 36 | } | ||
| 37 | |||
| 38 | python sdk_ext_host_create_spdx() { | ||
| 39 | from pathlib import Path | ||
| 40 | import oe.spdx30_tasks | ||
| 41 | spdx_work_dir = Path(d.getVar('SPDXSDKEXTWORK')) | ||
| 42 | |||
| 43 | # TODO: This doesn't seem to work | ||
| 44 | oe.spdx30_tasks.sdk_create_spdx(d, "host", spdx_work_dir, d.getVar("TOOLCHAINEXT_OUTPUTNAME")) | ||
| 45 | } | ||
| 46 | |||
| 47 | python sdk_ext_target_create_spdx() { | ||
| 48 | from pathlib import Path | ||
| 49 | import oe.spdx30_tasks | ||
| 50 | spdx_work_dir = Path(d.getVar('SPDXSDKEXTWORK')) | ||
| 51 | |||
| 52 | # TODO: This doesn't seem to work | ||
| 53 | oe.spdx30_tasks.sdk_create_spdx(d, "target", spdx_work_dir, d.getVar("TOOLCHAINEXT_OUTPUTNAME")) | ||
| 54 | } | ||
| 55 | |||
| 56 | |||
| 57 | python sdk_create_sbom() { | ||
| 58 | from pathlib import Path | ||
| 59 | import oe.spdx30_tasks | ||
| 60 | sdk_deploydir = Path(d.getVar("SDKDEPLOYDIR")) | ||
| 61 | spdx_work_dir = Path(d.getVar('SPDXSDKWORK')) | ||
| 62 | |||
| 63 | oe.spdx30_tasks.create_sdk_sbom(d, sdk_deploydir, spdx_work_dir, d.getVar("TOOLCHAIN_OUTPUTNAME")) | ||
| 64 | } | ||
| 65 | |||
| 66 | python sdk_ext_create_sbom() { | ||
| 67 | from pathlib import Path | ||
| 68 | import oe.spdx30_tasks | ||
| 69 | sdk_deploydir = Path(d.getVar("SDKEXTDEPLOYDIR")) | ||
| 70 | spdx_work_dir = Path(d.getVar('SPDXSDKEXTWORK')) | ||
| 71 | |||
| 72 | oe.spdx30_tasks.create_sdk_sbom(d, sdk_deploydir, spdx_work_dir, d.getVar("TOOLCHAINEXT_OUTPUTNAME")) | ||
| 73 | } | ||
| 74 | |||
diff --git a/meta/classes-recipe/cross-canadian.bbclass b/meta/classes-recipe/cross-canadian.bbclass deleted file mode 100644 index 059d9aa95f..0000000000 --- a/meta/classes-recipe/cross-canadian.bbclass +++ /dev/null | |||
| @@ -1,200 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | # NOTE - When using this class the user is responsible for ensuring that | ||
| 7 | # TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH | ||
| 8 | # is changed, another nativesdk xxx-canadian-cross can be installed | ||
| 9 | # | ||
| 10 | |||
| 11 | |||
| 12 | # SDK packages are built either explicitly by the user, | ||
| 13 | # or indirectly via dependency. No need to be in 'world'. | ||
| 14 | EXCLUDE_FROM_WORLD = "1" | ||
| 15 | NATIVESDKLIBC ?= "libc-glibc" | ||
| 16 | LIBCOVERRIDE = ":${NATIVESDKLIBC}" | ||
| 17 | CLASSOVERRIDE = "class-cross-canadian" | ||
| 18 | STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}" | ||
| 19 | |||
| 20 | # | ||
| 21 | # Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS | ||
| 22 | # | ||
| 23 | PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}" | ||
| 24 | BASECANADIANEXTRAOS ?= "linux-musl" | ||
| 25 | CANADIANEXTRAOS = "${BASECANADIANEXTRAOS}" | ||
| 26 | CANADIANEXTRAVENDOR = "" | ||
| 27 | MODIFYTOS ??= "1" | ||
| 28 | python () { | ||
| 29 | archs = d.getVar('PACKAGE_ARCHS').split() | ||
| 30 | sdkarchs = [] | ||
| 31 | for arch in archs: | ||
| 32 | sdkarchs.append(arch + '-${SDKPKGSUFFIX}') | ||
| 33 | d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs)) | ||
| 34 | |||
| 35 | # Allow the following code segment to be disabled, e.g. meta-environment | ||
| 36 | if d.getVar("MODIFYTOS") != "1": | ||
| 37 | return | ||
| 38 | |||
| 39 | if d.getVar("TCLIBC") in [ 'baremetal', 'newlib', 'picolibc' ]: | ||
| 40 | return | ||
| 41 | |||
| 42 | tos = d.getVar("TARGET_OS") | ||
| 43 | tos_known = ["mingw32"] | ||
| 44 | extralibcs = [""] | ||
| 45 | if "musl" in d.getVar("BASECANADIANEXTRAOS"): | ||
| 46 | extralibcs.append("musl") | ||
| 47 | if "android" in tos: | ||
| 48 | extralibcs.append("android") | ||
| 49 | for variant in ["", "spe", "x32", "eabi", "n32", "_ilp32"]: | ||
| 50 | for libc in extralibcs: | ||
| 51 | entry = "linux" | ||
| 52 | if variant and libc: | ||
| 53 | entry = entry + "-" + libc + variant | ||
| 54 | elif variant: | ||
| 55 | entry = entry + "-gnu" + variant | ||
| 56 | elif libc: | ||
| 57 | entry = entry + "-" + libc | ||
| 58 | tos_known.append(entry) | ||
| 59 | if tos not in tos_known: | ||
| 60 | bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS")) | ||
| 61 | |||
| 62 | for n in ["PROVIDES", "DEPENDS"]: | ||
| 63 | d.setVar(n, d.getVar(n)) | ||
| 64 | d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN")) | ||
| 65 | for prefix in ["AR", "AS", "DLLTOOL", "CC", "CXX", "GCC", "LD", "LIPO", "NM", "OBJDUMP", "RANLIB", "STRIP", "WINDRES"]: | ||
| 66 | n = prefix + "_FOR_TARGET" | ||
| 67 | d.setVar(n, d.getVar(n)) | ||
| 68 | # This is a bit ugly. We need to zero LIBC/ABI extension which will change TARGET_OS | ||
| 69 | # however we need the old value in some variables. We expand those here first. | ||
| 70 | tarch = d.getVar("TARGET_ARCH") | ||
| 71 | if tarch == "x86_64": | ||
| 72 | d.setVar("LIBCEXTENSION", "") | ||
| 73 | d.setVar("ABIEXTENSION", "") | ||
| 74 | d.appendVar("CANADIANEXTRAOS", " linux-gnux32") | ||
| 75 | for extraos in d.getVar("BASECANADIANEXTRAOS").split(): | ||
| 76 | d.appendVar("CANADIANEXTRAOS", " " + extraos + "x32") | ||
| 77 | elif tarch == "powerpc": | ||
| 78 | # PowerPC can build "linux" and "linux-gnuspe" | ||
| 79 | d.setVar("LIBCEXTENSION", "") | ||
| 80 | d.setVar("ABIEXTENSION", "") | ||
| 81 | d.appendVar("CANADIANEXTRAOS", " linux-gnuspe") | ||
| 82 | for extraos in d.getVar("BASECANADIANEXTRAOS").split(): | ||
| 83 | d.appendVar("CANADIANEXTRAOS", " " + extraos + "spe") | ||
| 84 | elif tarch == "mips64": | ||
| 85 | d.appendVar("CANADIANEXTRAOS", " linux-gnun32") | ||
| 86 | for extraos in d.getVar("BASECANADIANEXTRAOS").split(): | ||
| 87 | d.appendVar("CANADIANEXTRAOS", " " + extraos + "n32") | ||
| 88 | if tarch == "arm" or tarch == "armeb": | ||
| 89 | d.appendVar("CANADIANEXTRAOS", " linux-gnueabi linux-musleabi") | ||
| 90 | d.setVar("TARGET_OS", "linux-gnueabi") | ||
| 91 | else: | ||
| 92 | d.setVar("TARGET_OS", "linux") | ||
| 93 | |||
| 94 | # Also need to handle multilib target vendors | ||
| 95 | vendors = d.getVar("CANADIANEXTRAVENDOR") | ||
| 96 | if not vendors: | ||
| 97 | vendors = all_multilib_tune_values(d, 'TARGET_VENDOR') | ||
| 98 | origvendor = d.getVar("TARGET_VENDOR_MULTILIB_ORIGINAL") | ||
| 99 | if origvendor: | ||
| 100 | d.setVar("TARGET_VENDOR", origvendor) | ||
| 101 | if origvendor not in vendors.split(): | ||
| 102 | vendors = origvendor + " " + vendors | ||
| 103 | d.setVar("CANADIANEXTRAVENDOR", vendors) | ||
| 104 | } | ||
| 105 | MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}" | ||
| 106 | |||
| 107 | INHIBIT_DEFAULT_DEPS = "1" | ||
| 108 | |||
| 109 | STAGING_DIR_HOST = "${RECIPE_SYSROOT}" | ||
| 110 | |||
| 111 | TOOLCHAIN_OPTIONS = " --sysroot=${RECIPE_SYSROOT}" | ||
| 112 | |||
| 113 | PATH:append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}" | ||
| 114 | PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/" | ||
| 115 | |||
| 116 | HOST_ARCH = "${SDK_ARCH}" | ||
| 117 | HOST_VENDOR = "${SDK_VENDOR}" | ||
| 118 | HOST_OS = "${SDK_OS}" | ||
| 119 | HOST_PREFIX = "${SDK_PREFIX}" | ||
| 120 | HOST_CC_ARCH = "${SDK_CC_ARCH}" | ||
| 121 | HOST_LD_ARCH = "${SDK_LD_ARCH}" | ||
| 122 | HOST_AS_ARCH = "${SDK_AS_ARCH}" | ||
| 123 | |||
| 124 | #assign DPKG_ARCH | ||
| 125 | DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH'), '')}" | ||
| 126 | |||
| 127 | CPPFLAGS = "${BUILDSDK_CPPFLAGS}" | ||
| 128 | CFLAGS = "${BUILDSDK_CFLAGS}" | ||
| 129 | CXXFLAGS = "${BUILDSDK_CFLAGS}" | ||
| 130 | LDFLAGS = "${BUILDSDK_LDFLAGS} \ | ||
| 131 | -Wl,-rpath-link,${STAGING_LIBDIR}/.. \ | ||
| 132 | -Wl,-rpath,${libdir}/.. " | ||
| 133 | |||
| 134 | # | ||
| 135 | # We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit | ||
| 136 | # binaries | ||
| 137 | # | ||
| 138 | DEPENDS:append = " chrpath-replacement-native" | ||
| 139 | EXTRANATIVEPATH += "chrpath-native" | ||
| 140 | |||
| 141 | # Path mangling needed by the cross packaging | ||
| 142 | # Note that we use := here to ensure that libdir and includedir are | ||
| 143 | # target paths. | ||
| 144 | target_base_prefix := "${base_prefix}" | ||
| 145 | target_prefix := "${prefix}" | ||
| 146 | target_exec_prefix := "${exec_prefix}" | ||
| 147 | target_base_libdir = "${target_base_prefix}/${baselib}" | ||
| 148 | target_libdir = "${target_exec_prefix}/${baselib}" | ||
| 149 | target_includedir := "${includedir}" | ||
| 150 | |||
| 151 | # Change to place files in SDKPATH | ||
| 152 | base_prefix = "${SDKPATHNATIVE}" | ||
| 153 | prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" | ||
| 154 | exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" | ||
| 155 | bindir = "${exec_prefix}/bin/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}" | ||
| 156 | sbindir = "${bindir}" | ||
| 157 | base_bindir = "${bindir}" | ||
| 158 | base_sbindir = "${bindir}" | ||
| 159 | libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}" | ||
| 160 | libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}" | ||
| 161 | |||
| 162 | FILES:${PN} = "${prefix}" | ||
| 163 | |||
| 164 | export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${exec_prefix}/lib/pkgconfig" | ||
| 165 | export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}" | ||
| 166 | |||
| 167 | do_populate_sysroot[stamp-extra-info] = "" | ||
| 168 | do_packagedata[stamp-extra-info] = "" | ||
| 169 | |||
| 170 | USE_NLS = "${SDKUSE_NLS}" | ||
| 171 | |||
| 172 | # We have to us TARGET_ARCH but we care about the absolute value | ||
| 173 | # and not any particular tune that is enabled. | ||
| 174 | TARGET_ARCH[vardepsexclude] = "TUNE_ARCH" | ||
| 175 | |||
| 176 | PKGDATA_DIR = "${PKGDATA_DIR_SDK}" | ||
| 177 | # If MLPREFIX is set by multilib code, shlibs | ||
| 178 | # points to the wrong place so force it | ||
| 179 | SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2" | ||
| 180 | SHLIBSWORKDIR = "${PKGDATA_DIR}/nativesdk-shlibs2" | ||
| 181 | |||
| 182 | cross_canadian_bindirlinks () { | ||
| 183 | for i in linux ${CANADIANEXTRAOS} | ||
| 184 | do | ||
| 185 | for v in ${CANADIANEXTRAVENDOR} | ||
| 186 | do | ||
| 187 | d=${D}${bindir}/../${TARGET_ARCH}$v-$i | ||
| 188 | if [ -d $d ]; | ||
| 189 | then | ||
| 190 | continue | ||
| 191 | fi | ||
| 192 | install -d $d | ||
| 193 | for j in `ls ${D}${bindir}` | ||
| 194 | do | ||
| 195 | p=${TARGET_ARCH}$v-$i-`echo $j | sed -e s,${TARGET_PREFIX},,` | ||
| 196 | ln -s ../${TARGET_SYS}/$j $d/$p | ||
| 197 | done | ||
| 198 | done | ||
| 199 | done | ||
| 200 | } | ||
diff --git a/meta/classes-recipe/cross.bbclass b/meta/classes-recipe/cross.bbclass deleted file mode 100644 index 9abf166e50..0000000000 --- a/meta/classes-recipe/cross.bbclass +++ /dev/null | |||
| @@ -1,103 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit relocatable | ||
| 8 | |||
| 9 | # Cross packages are built indirectly via dependency, | ||
| 10 | # no need for them to be a direct target of 'world' | ||
| 11 | EXCLUDE_FROM_WORLD = "1" | ||
| 12 | |||
| 13 | CLASSOVERRIDE = "class-cross" | ||
| 14 | PACKAGES = "" | ||
| 15 | PACKAGES_DYNAMIC = "" | ||
| 16 | PACKAGES_DYNAMIC:class-native = "" | ||
| 17 | |||
| 18 | HOST_ARCH = "${BUILD_ARCH}" | ||
| 19 | HOST_VENDOR = "${BUILD_VENDOR}" | ||
| 20 | HOST_OS = "${BUILD_OS}" | ||
| 21 | HOST_PREFIX = "${BUILD_PREFIX}" | ||
| 22 | HOST_CC_ARCH = "${BUILD_CC_ARCH}" | ||
| 23 | HOST_LD_ARCH = "${BUILD_LD_ARCH}" | ||
| 24 | HOST_AS_ARCH = "${BUILD_AS_ARCH}" | ||
| 25 | |||
| 26 | # No strip sysroot when DEBUG_BUILD is enabled | ||
| 27 | INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}" | ||
| 28 | |||
| 29 | export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64" | ||
| 30 | |||
| 31 | STAGING_DIR_HOST = "${RECIPE_SYSROOT_NATIVE}" | ||
| 32 | |||
| 33 | PACKAGE_ARCH = "${BUILD_ARCH}" | ||
| 34 | |||
| 35 | MULTIMACH_TARGET_SYS = "${BUILD_ARCH}${BUILD_VENDOR}-${BUILD_OS}" | ||
| 36 | |||
| 37 | export PKG_CONFIG_DIR = "${exec_prefix}/lib/pkgconfig" | ||
| 38 | export PKG_CONFIG_SYSROOT_DIR = "" | ||
| 39 | |||
| 40 | TARGET_CPPFLAGS = "" | ||
| 41 | TARGET_CFLAGS = "" | ||
| 42 | TARGET_CXXFLAGS = "" | ||
| 43 | TARGET_LDFLAGS = "" | ||
| 44 | |||
| 45 | CPPFLAGS = "${BUILD_CPPFLAGS}" | ||
| 46 | CFLAGS = "${BUILD_CFLAGS}" | ||
| 47 | CXXFLAGS = "${BUILD_CFLAGS}" | ||
| 48 | LDFLAGS = "${BUILD_LDFLAGS}" | ||
| 49 | |||
| 50 | TOOLCHAIN_OPTIONS = "" | ||
| 51 | |||
| 52 | # This class encodes staging paths into its scripts data so can only be | ||
| 53 | # reused if we manipulate the paths. | ||
| 54 | SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}" | ||
| 55 | |||
| 56 | # Path mangling needed by the cross packaging | ||
| 57 | # Note that we use := here to ensure that libdir and includedir are | ||
| 58 | # target paths. | ||
| 59 | target_base_prefix := "${root_prefix}" | ||
| 60 | target_prefix := "${prefix}" | ||
| 61 | target_exec_prefix := "${exec_prefix}" | ||
| 62 | target_base_libdir = "${target_base_prefix}/${baselib}" | ||
| 63 | target_libdir = "${target_exec_prefix}/${baselib}" | ||
| 64 | target_includedir := "${includedir}" | ||
| 65 | |||
| 66 | # Overrides for paths | ||
| 67 | CROSS_TARGET_SYS_DIR = "${TARGET_SYS}" | ||
| 68 | prefix = "${STAGING_DIR_NATIVE}${prefix_native}" | ||
| 69 | base_prefix = "${STAGING_DIR_NATIVE}" | ||
| 70 | exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}" | ||
| 71 | bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}" | ||
| 72 | sbindir = "${bindir}" | ||
| 73 | base_bindir = "${bindir}" | ||
| 74 | base_sbindir = "${bindir}" | ||
| 75 | libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}" | ||
| 76 | libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}" | ||
| 77 | |||
| 78 | do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/" | ||
| 79 | do_packagedata[stamp-extra-info] = "" | ||
| 80 | |||
| 81 | USE_NLS = "no" | ||
| 82 | |||
| 83 | CC = "${BUILD_CC}" | ||
| 84 | CXX = "${BUILD_CXX}" | ||
| 85 | FC = "${BUILD_FC}" | ||
| 86 | CPP = "${BUILD_CPP}" | ||
| 87 | LD = "${BUILD_LD}" | ||
| 88 | CCLD = "${BUILD_CCLD}" | ||
| 89 | AR = "${BUILD_AR}" | ||
| 90 | AS = "${BUILD_AS}" | ||
| 91 | RANLIB = "${BUILD_RANLIB}" | ||
| 92 | STRIP = "${BUILD_STRIP}" | ||
| 93 | NM = "${BUILD_NM}" | ||
| 94 | |||
| 95 | inherit nopackages | ||
| 96 | |||
| 97 | python do_addto_recipe_sysroot () { | ||
| 98 | bb.build.exec_func("extend_recipe_sysroot", d) | ||
| 99 | } | ||
| 100 | addtask addto_recipe_sysroot after do_populate_sysroot | ||
| 101 | do_addto_recipe_sysroot[deptask] = "do_populate_sysroot" | ||
| 102 | |||
| 103 | PATH:prepend = "${COREBASE}/scripts/cross-intercept:" | ||
diff --git a/meta/classes-recipe/crosssdk.bbclass b/meta/classes-recipe/crosssdk.bbclass deleted file mode 100644 index 3541c2c393..0000000000 --- a/meta/classes-recipe/crosssdk.bbclass +++ /dev/null | |||
| @@ -1,58 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | BB_DEFER_BBCLASSES:remove = "cross" | ||
| 8 | inherit cross | ||
| 9 | |||
| 10 | CLASSOVERRIDE = "class-crosssdk" | ||
| 11 | NATIVESDKLIBC ?= "libc-glibc" | ||
| 12 | LIBCOVERRIDE = ":${NATIVESDKLIBC}" | ||
| 13 | MACHINEOVERRIDES = "" | ||
| 14 | PACKAGE_ARCH = "${SDK_ARCH}" | ||
| 15 | |||
| 16 | python () { | ||
| 17 | # set TUNE_PKGARCH to SDK_ARCH | ||
| 18 | d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH')) | ||
| 19 | # Set features here to prevent appends and distro features backfill | ||
| 20 | # from modifying nativesdk distro features | ||
| 21 | features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split()) | ||
| 22 | filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split()) | ||
| 23 | d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered))) | ||
| 24 | } | ||
| 25 | |||
| 26 | STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}" | ||
| 27 | |||
| 28 | # This class encodes staging paths into its scripts data so can only be | ||
| 29 | # reused if we manipulate the paths. | ||
| 30 | SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}" | ||
| 31 | |||
| 32 | TARGET_ARCH = "${SDK_ARCH}" | ||
| 33 | TARGET_VENDOR = "${SDK_VENDOR}" | ||
| 34 | TARGET_OS = "${SDK_OS}" | ||
| 35 | TARGET_PREFIX = "${SDK_PREFIX}" | ||
| 36 | TARGET_CC_ARCH = "${SDK_CC_ARCH}" | ||
| 37 | TARGET_LD_ARCH = "${SDK_LD_ARCH}" | ||
| 38 | TARGET_AS_ARCH = "${SDK_AS_ARCH}" | ||
| 39 | TARGET_CPPFLAGS = "" | ||
| 40 | TARGET_CFLAGS = "" | ||
| 41 | TARGET_CXXFLAGS = "" | ||
| 42 | TARGET_LDFLAGS = "" | ||
| 43 | TARGET_FPU = "" | ||
| 44 | |||
| 45 | |||
| 46 | target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}" | ||
| 47 | target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}" | ||
| 48 | target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}" | ||
| 49 | target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" | ||
| 50 | target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" | ||
| 51 | baselib = "lib" | ||
| 52 | |||
| 53 | do_packagedata[stamp-extra-info] = "" | ||
| 54 | |||
| 55 | # Need to force this to ensure consitency across architectures | ||
| 56 | EXTRA_OECONF_GCC_FLOAT = "" | ||
| 57 | |||
| 58 | USE_NLS = "no" | ||
diff --git a/meta/classes-recipe/cython.bbclass b/meta/classes-recipe/cython.bbclass deleted file mode 100644 index 9ae7a29134..0000000000 --- a/meta/classes-recipe/cython.bbclass +++ /dev/null | |||
| @@ -1,8 +0,0 @@ | |||
| 1 | DEPENDS:append = " python3-cython-native" | ||
| 2 | |||
| 3 | do_compile[postfuncs] = "strip_cython_metadata" | ||
| 4 | strip_cython_metadata() { | ||
| 5 | # Remove the Cython Metadata headers that we don't need after the build, and | ||
| 6 | # may contain build paths. | ||
| 7 | find ${S} \( -name "*.c" -o -name "*.cpp" \) -print0 | xargs --no-run-if-empty --null sed -i -e "/BEGIN: Cython Metadata/,/END: Cython Metadata/d" | ||
| 8 | } | ||
diff --git a/meta/classes-recipe/deploy.bbclass b/meta/classes-recipe/deploy.bbclass deleted file mode 100644 index f56fe98d6d..0000000000 --- a/meta/classes-recipe/deploy.bbclass +++ /dev/null | |||
| @@ -1,18 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | DEPLOYDIR = "${WORKDIR}/deploy-${PN}" | ||
| 8 | SSTATETASKS += "do_deploy" | ||
| 9 | do_deploy[sstate-inputdirs] = "${DEPLOYDIR}" | ||
| 10 | do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}" | ||
| 11 | |||
| 12 | python do_deploy_setscene () { | ||
| 13 | sstate_setscene(d) | ||
| 14 | } | ||
| 15 | addtask do_deploy_setscene | ||
| 16 | do_deploy[dirs] = "${B}" | ||
| 17 | do_deploy[cleandirs] = "${DEPLOYDIR}" | ||
| 18 | do_deploy[stamp-extra-info] = "${MACHINE_ARCH}" | ||
diff --git a/meta/classes-recipe/devicetree.bbclass b/meta/classes-recipe/devicetree.bbclass deleted file mode 100644 index ce9d008aac..0000000000 --- a/meta/classes-recipe/devicetree.bbclass +++ /dev/null | |||
| @@ -1,169 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # This bbclass implements device tree compilation for user provided device tree | ||
| 8 | # sources. The compilation of the device tree sources is the same as the kernel | ||
| 9 | # device tree compilation process, this includes being able to include sources | ||
| 10 | # from the kernel such as soc dtsi files or header files such as gpio.h. In | ||
| 11 | # addition to device trees this bbclass also handles compilation of device tree | ||
| 12 | # overlays. | ||
| 13 | # | ||
| 14 | # The output of this class behaves similar to how kernel-devicetree.bbclass | ||
| 15 | # operates in that the output files are installed into /boot/devicetree. | ||
| 16 | # However this class on purpose separates the deployed device trees into the | ||
| 17 | # 'devicetree' subdirectory. This prevents clashes with the kernel-devicetree | ||
| 18 | # output. Additionally the device trees are populated into the sysroot for | ||
| 19 | # access via the sysroot from within other recipes. | ||
| 20 | |||
| 21 | SECTION ?= "bsp" | ||
| 22 | |||
| 23 | # The default inclusion of kernel device tree includes and headers means that | ||
| 24 | # device trees built with them are at least GPL-2.0-only (and in some cases dual | ||
| 25 | # licensed). Default to GPL-2.0-only if the recipe does not specify a license. | ||
| 26 | LICENSE ?= "GPL-2.0-only" | ||
| 27 | LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0-only;md5=801f80980d171dd6425610833a22dbe6" | ||
| 28 | |||
| 29 | INHIBIT_DEFAULT_DEPS = "1" | ||
| 30 | DEPENDS += "dtc-native" | ||
| 31 | |||
| 32 | inherit deploy kernel-arch | ||
| 33 | |||
| 34 | COMPATIBLE_MACHINE ?= "^$" | ||
| 35 | |||
| 36 | PROVIDES = "virtual/dtb" | ||
| 37 | |||
| 38 | PACKAGE_ARCH = "${MACHINE_ARCH}" | ||
| 39 | |||
| 40 | SYSROOT_DIRS += "/boot/devicetree" | ||
| 41 | FILES:${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo" | ||
| 42 | |||
| 43 | S = "${UNPACKDIR}" | ||
| 44 | B = "${WORKDIR}/build" | ||
| 45 | |||
| 46 | # Default kernel includes, these represent what are normally used for in-kernel | ||
| 47 | # sources. | ||
| 48 | KERNEL_INCLUDE ??= " \ | ||
| 49 | ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts \ | ||
| 50 | ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts/* \ | ||
| 51 | ${STAGING_KERNEL_DIR}/scripts/dtc/include-prefixes \ | ||
| 52 | " | ||
| 53 | |||
| 54 | DT_INCLUDE[doc] = "Search paths to be made available to both the device tree compiler and preprocessor for inclusion." | ||
| 55 | DT_INCLUDE ?= "${DT_FILES_PATH} ${KERNEL_INCLUDE}" | ||
| 56 | DT_FILES_PATH[doc] = "Path to the directory containing dts files to build. Defaults to source directory." | ||
| 57 | DT_FILES_PATH ?= "${S}" | ||
| 58 | DT_FILES[doc] = "Space-separated list of dts or dtb files (relative to DT_FILES_PATH) to build. If empty, all dts files are built." | ||
| 59 | DT_FILES ?= "" | ||
| 60 | |||
| 61 | DT_PADDING_SIZE[doc] = "Size of padding on the device tree blob, used as extra space typically for additional properties during boot." | ||
| 62 | DT_PADDING_SIZE ??= "0x3000" | ||
| 63 | DT_RESERVED_MAP[doc] = "Number of reserved map entires." | ||
| 64 | DT_RESERVED_MAP ??= "8" | ||
| 65 | DT_BOOT_CPU[doc] = "The boot cpu, defaults to 0" | ||
| 66 | DT_BOOT_CPU ??= "0" | ||
| 67 | |||
| 68 | DTC_FLAGS ?= "-R ${DT_RESERVED_MAP} -b ${DT_BOOT_CPU}" | ||
| 69 | DTC_PPFLAGS ?= "-nostdinc -undef -D__DTS__ -x assembler-with-cpp" | ||
| 70 | DTC_BFLAGS ?= "-p ${DT_PADDING_SIZE} -@" | ||
| 71 | DTC_OFLAGS ?= "-p 0 -@ -H epapr" | ||
| 72 | |||
| 73 | python () { | ||
| 74 | if d.getVar("KERNEL_INCLUDE"): | ||
| 75 | # auto add dependency on kernel tree, but only if kernel include paths | ||
| 76 | # are specified. | ||
| 77 | d.appendVarFlag("do_compile", "depends", " virtual/kernel:do_configure") | ||
| 78 | } | ||
| 79 | |||
| 80 | def expand_includes(varname, d): | ||
| 81 | import glob | ||
| 82 | includes = set() | ||
| 83 | # expand all includes with glob | ||
| 84 | for i in (d.getVar(varname) or "").split(): | ||
| 85 | for g in glob.glob(i): | ||
| 86 | if os.path.isdir(g): # only add directories to include path | ||
| 87 | includes.add(g) | ||
| 88 | return includes | ||
| 89 | |||
| 90 | def devicetree_source_is_overlay(path): | ||
| 91 | # determine if a dts file is an overlay by checking if it uses "/plugin/;" | ||
| 92 | with open(path, "r") as f: | ||
| 93 | for i in f: | ||
| 94 | if i.startswith("/plugin/;"): | ||
| 95 | return True | ||
| 96 | return False | ||
| 97 | |||
| 98 | def devicetree_compile(dtspath, includes, d): | ||
| 99 | import subprocess | ||
| 100 | dts = os.path.basename(dtspath) | ||
| 101 | dtname = os.path.splitext(dts)[0] | ||
| 102 | bb.note("Processing {0} [{1}]".format(dtname, dts)) | ||
| 103 | |||
| 104 | # preprocess | ||
| 105 | ppargs = d.getVar("BUILD_CPP").split() | ||
| 106 | ppargs += (d.getVar("DTC_PPFLAGS") or "").split() | ||
| 107 | for i in includes: | ||
| 108 | ppargs.append("-I{0}".format(i)) | ||
| 109 | ppargs += ["-o", "{0}.pp".format(dts), dtspath] | ||
| 110 | bb.note("Running {0}".format(" ".join(ppargs))) | ||
| 111 | try: | ||
| 112 | subprocess.run(ppargs, check=True, capture_output=True) | ||
| 113 | except subprocess.CalledProcessError as e: | ||
| 114 | bb.fatal(f"Command '{' '.join(ppargs)}' failed with return code {e.returncode}\nstdout: {e.stdout.decode()}\nstderr: {e.stderr.decode()}\ndtspath: {os.path.abspath(dtspath)}") | ||
| 115 | |||
| 116 | |||
| 117 | # determine if the file is an overlay or not (using the preprocessed file) | ||
| 118 | isoverlay = devicetree_source_is_overlay("{0}.pp".format(dts)) | ||
| 119 | |||
| 120 | # compile | ||
| 121 | dtcargs = ["dtc"] + (d.getVar("DTC_FLAGS") or "").split() | ||
| 122 | if isoverlay: | ||
| 123 | dtcargs += (d.getVar("DTC_OFLAGS") or "").split() | ||
| 124 | else: | ||
| 125 | dtcargs += (d.getVar("DTC_BFLAGS") or "").split() | ||
| 126 | for i in includes: | ||
| 127 | dtcargs += ["-i", i] | ||
| 128 | dtcargs += ["-o", "{0}.{1}".format(dtname, "dtbo" if isoverlay else "dtb")] | ||
| 129 | dtcargs += ["-I", "dts", "-O", "dtb", "{0}.pp".format(dts)] | ||
| 130 | bb.note("Running {0}".format(" ".join(dtcargs))) | ||
| 131 | try: | ||
| 132 | subprocess.run(dtcargs, check=True, capture_output=True) | ||
| 133 | except subprocess.CalledProcessError as e: | ||
| 134 | bb.fatal(f"Command '{' '.join(dtcargs)}' failed with return code {e.returncode}\nstdout: {e.stdout.decode()}\nstderr: {e.stderr.decode()}\ndtname: {dtname}") | ||
| 135 | |||
| 136 | |||
| 137 | python devicetree_do_compile() { | ||
| 138 | import re | ||
| 139 | includes = expand_includes("DT_INCLUDE", d) | ||
| 140 | dtfiles = d.getVar("DT_FILES").split() | ||
| 141 | dtfiles = [ re.sub(r"\.dtbo?$", ".dts", dtfile) for dtfile in dtfiles ] | ||
| 142 | listpath = d.getVar("DT_FILES_PATH") | ||
| 143 | for dts in dtfiles or os.listdir(listpath): | ||
| 144 | dtspath = os.path.join(listpath, dts) | ||
| 145 | try: | ||
| 146 | if not(os.path.isfile(dtspath)) or not(dts.endswith(".dts") or devicetree_source_is_overlay(dtspath)): | ||
| 147 | continue # skip non-.dts files and non-overlay files | ||
| 148 | except: | ||
| 149 | continue # skip if can't determine if overlay | ||
| 150 | devicetree_compile(dtspath, includes, d) | ||
| 151 | } | ||
| 152 | |||
| 153 | devicetree_do_install() { | ||
| 154 | for dtb_file in *.dtb *.dtbo; do | ||
| 155 | [ -e "$dtb_file" ] || continue | ||
| 156 | install -Dm 0644 "${B}/$dtb_file" "${D}/boot/devicetree/$dtb_file" | ||
| 157 | done | ||
| 158 | } | ||
| 159 | |||
| 160 | devicetree_do_deploy() { | ||
| 161 | for dtb_file in *.dtb *.dtbo; do | ||
| 162 | [ -e "$dtb_file" ] || continue | ||
| 163 | install -Dm 0644 "${B}/$dtb_file" "${DEPLOYDIR}/devicetree/$dtb_file" | ||
| 164 | done | ||
| 165 | } | ||
| 166 | addtask deploy before do_build after do_install | ||
| 167 | |||
| 168 | EXPORT_FUNCTIONS do_compile do_install do_deploy | ||
| 169 | |||
diff --git a/meta/classes-recipe/devupstream.bbclass b/meta/classes-recipe/devupstream.bbclass deleted file mode 100644 index 60026a527f..0000000000 --- a/meta/classes-recipe/devupstream.bbclass +++ /dev/null | |||
| @@ -1,55 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Class for use in BBCLASSEXTEND to make it easier to have a single recipe that | ||
| 8 | # can build both stable tarballs and snapshots from upstream source | ||
| 9 | # repositories. | ||
| 10 | # | ||
| 11 | # Usage: | ||
| 12 | # BBCLASSEXTEND = "devupstream:target" | ||
| 13 | # SRC_URI:class-devupstream = "git://git.example.com/example;branch=master" | ||
| 14 | # SRCREV:class-devupstream = "abcdef" | ||
| 15 | # | ||
| 16 | # There are a few caveats that remain to be solved: | ||
| 17 | # - You can't build native or nativesdk recipes using for example | ||
| 18 | # devupstream:native, you can only build target recipes. | ||
| 19 | # - If the fetcher requires native tools (such as subversion-native) then | ||
| 20 | # bitbake won't be able to add them automatically. | ||
| 21 | |||
| 22 | python devupstream_virtclass_handler () { | ||
| 23 | # Do nothing if this is inherited, as it's for BBCLASSEXTEND | ||
| 24 | if "devupstream" not in (d.getVar('BBCLASSEXTEND') or ""): | ||
| 25 | bb.error("Don't inherit devupstream, use BBCLASSEXTEND") | ||
| 26 | return | ||
| 27 | |||
| 28 | variant = d.getVar("BBEXTENDVARIANT") | ||
| 29 | if variant not in ("target", "native"): | ||
| 30 | bb.error("Unsupported variant %s. Pass the variant when using devupstream, for example devupstream:target" % variant) | ||
| 31 | return | ||
| 32 | |||
| 33 | # Develpment releases are never preferred by default | ||
| 34 | d.setVar("DEFAULT_PREFERENCE", "-1") | ||
| 35 | |||
| 36 | src_uri = d.getVar("SRC_URI:class-devupstream") or d.getVar("SRC_URI") | ||
| 37 | uri = bb.fetch2.URI(src_uri.split()[0]) | ||
| 38 | |||
| 39 | # Modify the PV if the recipe hasn't already overridden it | ||
| 40 | pv = d.getVar("PV") | ||
| 41 | proto_marker = "+" + uri.scheme | ||
| 42 | if proto_marker not in pv and not d.getVar("PV:class-devupstream"): | ||
| 43 | d.setVar("PV", pv + proto_marker) | ||
| 44 | |||
| 45 | if variant == "native": | ||
| 46 | pn = d.getVar("PN") | ||
| 47 | d.setVar("PN", "%s-native" % (pn)) | ||
| 48 | fn = d.getVar("FILE") | ||
| 49 | bb.parse.BBHandler.inherit("native", fn, 0, d) | ||
| 50 | |||
| 51 | d.appendVar("CLASSOVERRIDE", ":class-devupstream") | ||
| 52 | } | ||
| 53 | |||
| 54 | addhandler devupstream_virtclass_handler | ||
| 55 | devupstream_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise" | ||
diff --git a/meta/classes-recipe/distro_features_check.bbclass b/meta/classes-recipe/distro_features_check.bbclass deleted file mode 100644 index 1f2674fd6e..0000000000 --- a/meta/classes-recipe/distro_features_check.bbclass +++ /dev/null | |||
| @@ -1,13 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Temporarily provide fallback to the old name of the class | ||
| 8 | |||
| 9 | python __anonymous() { | ||
| 10 | bb.warn("distro_features_check.bbclass is deprecated, please use features_check.bbclass instead") | ||
| 11 | } | ||
| 12 | |||
| 13 | inherit features_check | ||
diff --git a/meta/classes-recipe/dos2unix.bbclass b/meta/classes-recipe/dos2unix.bbclass deleted file mode 100644 index 18e89b1cf2..0000000000 --- a/meta/classes-recipe/dos2unix.bbclass +++ /dev/null | |||
| @@ -1,20 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Class for use to convert all CRLF line terminators to LF | ||
| 8 | # provided that some projects are being developed/maintained | ||
| 9 | # on Windows so they have different line terminators(CRLF) vs | ||
| 10 | # on Linux(LF), which can cause annoying patching errors during | ||
| 11 | # git push/checkout processes. | ||
| 12 | |||
| 13 | do_convert_crlf_to_lf[depends] += "dos2unix-native:do_populate_sysroot" | ||
| 14 | |||
| 15 | # Convert CRLF line terminators to LF | ||
| 16 | do_convert_crlf_to_lf () { | ||
| 17 | find ${S} -type f -exec dos2unix {} \; | ||
| 18 | } | ||
| 19 | |||
| 20 | addtask convert_crlf_to_lf after do_unpack before do_patch | ||
diff --git a/meta/classes-recipe/features_check.bbclass b/meta/classes-recipe/features_check.bbclass deleted file mode 100644 index 1e0eaa4eed..0000000000 --- a/meta/classes-recipe/features_check.bbclass +++ /dev/null | |||
| @@ -1,57 +0,0 @@ | |||
| 1 | # Allow checking of required and conflicting features | ||
| 2 | # | ||
| 3 | # xxx = [DISTRO,MACHINE,COMBINED,IMAGE] | ||
| 4 | # | ||
| 5 | # ANY_OF_xxx_FEATURES: ensure at least one item on this list is included | ||
| 6 | # in xxx_FEATURES. | ||
| 7 | # REQUIRED_xxx_FEATURES: ensure every item on this list is included | ||
| 8 | # in xxx_FEATURES. | ||
| 9 | # CONFLICT_xxx_FEATURES: ensure no item in this list is included in | ||
| 10 | # xxx_FEATURES. | ||
| 11 | # | ||
| 12 | # Copyright 2019 (C) Texas Instruments Inc. | ||
| 13 | # Copyright 2013 (C) O.S. Systems Software LTDA. | ||
| 14 | # | ||
| 15 | # SPDX-License-Identifier: MIT | ||
| 16 | |||
| 17 | |||
| 18 | python () { | ||
| 19 | if bb.utils.to_boolean(d.getVar('PARSE_ALL_RECIPES', False)): | ||
| 20 | return | ||
| 21 | |||
| 22 | unused = True | ||
| 23 | |||
| 24 | for kind in ['DISTRO', 'MACHINE', 'COMBINED', 'IMAGE', 'TUNE']: | ||
| 25 | if d.getVar('ANY_OF_' + kind + '_FEATURES') is None and not d.hasOverrides('ANY_OF_' + kind + '_FEATURES') and \ | ||
| 26 | d.getVar('REQUIRED_' + kind + '_FEATURES') is None and not d.hasOverrides('REQUIRED_' + kind + '_FEATURES') and \ | ||
| 27 | d.getVar('CONFLICT_' + kind + '_FEATURES') is None and not d.hasOverrides('CONFLICT_' + kind + '_FEATURES'): | ||
| 28 | continue | ||
| 29 | |||
| 30 | unused = False | ||
| 31 | |||
| 32 | # Assume at least one var is set. | ||
| 33 | features = set((d.getVar(kind + '_FEATURES') or '').split()) | ||
| 34 | |||
| 35 | any_of_features = set((d.getVar('ANY_OF_' + kind + '_FEATURES') or '').split()) | ||
| 36 | if any_of_features: | ||
| 37 | if set.isdisjoint(any_of_features, features): | ||
| 38 | raise bb.parse.SkipRecipe("one of '%s' needs to be in %s_FEATURES" | ||
| 39 | % (' '.join(any_of_features), kind)) | ||
| 40 | |||
| 41 | required_features = set((d.getVar('REQUIRED_' + kind + '_FEATURES') or '').split()) | ||
| 42 | if required_features: | ||
| 43 | missing = set.difference(required_features, features) | ||
| 44 | if missing: | ||
| 45 | raise bb.parse.SkipRecipe("missing required %s feature%s '%s' (not in %s_FEATURES)" | ||
| 46 | % (kind.lower(), 's' if len(missing) > 1 else '', ' '.join(missing), kind)) | ||
| 47 | |||
| 48 | conflict_features = set((d.getVar('CONFLICT_' + kind + '_FEATURES') or '').split()) | ||
| 49 | if conflict_features: | ||
| 50 | conflicts = set.intersection(conflict_features, features) | ||
| 51 | if conflicts: | ||
| 52 | raise bb.parse.SkipRecipe("conflicting %s feature%s '%s' (in %s_FEATURES)" | ||
| 53 | % (kind.lower(), 's' if len(conflicts) > 1 else '', ' '.join(conflicts), kind)) | ||
| 54 | |||
| 55 | if unused: | ||
| 56 | bb.warn("Recipe inherits features_check but doesn't use it") | ||
| 57 | } | ||
diff --git a/meta/classes-recipe/fontcache.bbclass b/meta/classes-recipe/fontcache.bbclass deleted file mode 100644 index deadcd2fbb..0000000000 --- a/meta/classes-recipe/fontcache.bbclass +++ /dev/null | |||
| @@ -1,63 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # This class will generate the proper postinst/postrm scriptlets for font | ||
| 9 | # packages. | ||
| 10 | # | ||
| 11 | |||
| 12 | PACKAGE_WRITE_DEPS += "qemuwrapper-cross" | ||
| 13 | |||
| 14 | FONT_PACKAGES ??= "${PN}" | ||
| 15 | FONT_PACKAGES:class-native = "" | ||
| 16 | FONT_EXTRA_RDEPENDS ?= "${MLPREFIX}fontconfig-utils" | ||
| 17 | FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig" | ||
| 18 | FONTCONFIG_CACHE_PARAMS ?= "-v" | ||
| 19 | # You can change this to e.g. FC_DEBUG=16 to debug fc-cache issues, | ||
| 20 | # something has to be set, because qemuwrapper is using this variable after -E | ||
| 21 | # multiple variables aren't allowed because for qemu they are separated | ||
| 22 | # by comma and in -n "$D" case they should be separated by space | ||
| 23 | FONTCONFIG_CACHE_ENV ?= "FC_DEBUG=1" | ||
| 24 | fontcache_common() { | ||
| 25 | if [ -n "$D" ] ; then | ||
| 26 | $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} \ | ||
| 27 | 'bindir="${bindir}"' \ | ||
| 28 | 'libdir="${libdir}"' \ | ||
| 29 | 'libexecdir="${libexecdir}"' \ | ||
| 30 | 'base_libdir="${base_libdir}"' \ | ||
| 31 | 'fontconfigcachedir="${FONTCONFIG_CACHE_DIR}"' \ | ||
| 32 | 'fontconfigcacheparams="${FONTCONFIG_CACHE_PARAMS}"' \ | ||
| 33 | 'fontconfigcacheenv="${FONTCONFIG_CACHE_ENV}"' | ||
| 34 | else | ||
| 35 | ${FONTCONFIG_CACHE_ENV} fc-cache ${FONTCONFIG_CACHE_PARAMS} | ||
| 36 | fi | ||
| 37 | } | ||
| 38 | |||
| 39 | python () { | ||
| 40 | font_pkgs = d.getVar('FONT_PACKAGES').split() | ||
| 41 | deps = d.getVar("FONT_EXTRA_RDEPENDS") | ||
| 42 | |||
| 43 | for pkg in font_pkgs: | ||
| 44 | if deps: d.appendVar('RDEPENDS:' + pkg, ' '+deps) | ||
| 45 | } | ||
| 46 | |||
| 47 | python add_fontcache_postinsts() { | ||
| 48 | for pkg in d.getVar('FONT_PACKAGES').split(): | ||
| 49 | bb.note("adding fonts postinst and postrm scripts to %s" % pkg) | ||
| 50 | postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst') | ||
| 51 | if not postinst: | ||
| 52 | postinst = '#!/bin/sh\n' | ||
| 53 | postinst += d.getVar('fontcache_common') | ||
| 54 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 55 | |||
| 56 | postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm') | ||
| 57 | if not postrm: | ||
| 58 | postrm = '#!/bin/sh\n' | ||
| 59 | postrm += d.getVar('fontcache_common') | ||
| 60 | d.setVar('pkg_postrm:%s' % pkg, postrm) | ||
| 61 | } | ||
| 62 | |||
| 63 | PACKAGEFUNCS =+ "add_fontcache_postinsts" | ||
diff --git a/meta/classes-recipe/fs-uuid.bbclass b/meta/classes-recipe/fs-uuid.bbclass deleted file mode 100644 index e215f06c80..0000000000 --- a/meta/classes-recipe/fs-uuid.bbclass +++ /dev/null | |||
| @@ -1,30 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Extract UUID from ${ROOTFS}, which must have been built | ||
| 8 | # by the time that this function gets called. Only works | ||
| 9 | # on ext file systems and depends on tune2fs. | ||
| 10 | def get_rootfs_uuid(d): | ||
| 11 | import subprocess | ||
| 12 | rootfs = d.getVar('ROOTFS') | ||
| 13 | output = subprocess.check_output(['tune2fs', '-l', rootfs], text=True) | ||
| 14 | for line in output.split('\n'): | ||
| 15 | if line.startswith('Filesystem UUID:'): | ||
| 16 | uuid = line.split()[-1] | ||
| 17 | bb.note('UUID of %s: %s' % (rootfs, uuid)) | ||
| 18 | return uuid | ||
| 19 | bb.fatal('Could not determine filesystem UUID of %s' % rootfs) | ||
| 20 | |||
| 21 | # Replace the special <<uuid-of-rootfs>> inside a string (like the | ||
| 22 | # root= APPEND string in a syslinux.cfg or systemd-boot entry) with the | ||
| 23 | # actual UUID of the rootfs. Does nothing if the special string | ||
| 24 | # is not used. | ||
| 25 | def replace_rootfs_uuid(d, string): | ||
| 26 | UUID_PLACEHOLDER = '<<uuid-of-rootfs>>' | ||
| 27 | if UUID_PLACEHOLDER in string: | ||
| 28 | uuid = get_rootfs_uuid(d) | ||
| 29 | string = string.replace(UUID_PLACEHOLDER, uuid) | ||
| 30 | return string | ||
diff --git a/meta/classes-recipe/gconf.bbclass b/meta/classes-recipe/gconf.bbclass deleted file mode 100644 index b81851bc78..0000000000 --- a/meta/classes-recipe/gconf.bbclass +++ /dev/null | |||
| @@ -1,77 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | DEPENDS += "gconf" | ||
| 8 | PACKAGE_WRITE_DEPS += "gconf-native" | ||
| 9 | |||
| 10 | # These are for when gconftool is used natively and the prefix isn't necessarily | ||
| 11 | # the sysroot. TODO: replicate the postinst logic for -native packages going | ||
| 12 | # into sysroot as they won't be running their own install-time schema | ||
| 13 | # registration (disabled below) nor the postinst script (as they don't happen). | ||
| 14 | export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults" | ||
| 15 | export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2" | ||
| 16 | |||
| 17 | # Disable install-time schema registration as we're a packaging system so this | ||
| 18 | # happens in the postinst script, not at install time. Set both the configure | ||
| 19 | # script option and the traditional envionment variable just to make sure. | ||
| 20 | EXTRA_OECONF += "--disable-schemas-install" | ||
| 21 | export GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL = "1" | ||
| 22 | |||
| 23 | gconf_postinst() { | ||
| 24 | if [ "x$D" != "x" ]; then | ||
| 25 | export GCONF_CONFIG_SOURCE="xml::$D${sysconfdir}/gconf/gconf.xml.defaults" | ||
| 26 | else | ||
| 27 | export GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` | ||
| 28 | fi | ||
| 29 | |||
| 30 | SCHEMA_LOCATION=$D/etc/gconf/schemas | ||
| 31 | for SCHEMA in ${SCHEMA_FILES}; do | ||
| 32 | if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then | ||
| 33 | HOME=$D/root gconftool-2 \ | ||
| 34 | --makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null | ||
| 35 | fi | ||
| 36 | done | ||
| 37 | } | ||
| 38 | |||
| 39 | gconf_prerm() { | ||
| 40 | SCHEMA_LOCATION=/etc/gconf/schemas | ||
| 41 | for SCHEMA in ${SCHEMA_FILES}; do | ||
| 42 | if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then | ||
| 43 | HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \ | ||
| 44 | gconftool-2 \ | ||
| 45 | --makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null | ||
| 46 | fi | ||
| 47 | done | ||
| 48 | } | ||
| 49 | |||
| 50 | python populate_packages:append () { | ||
| 51 | import re | ||
| 52 | packages = d.getVar('PACKAGES').split() | ||
| 53 | pkgdest = d.getVar('PKGDEST') | ||
| 54 | |||
| 55 | for pkg in packages: | ||
| 56 | schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg) | ||
| 57 | schemas = [] | ||
| 58 | schema_re = re.compile(r".*\.schemas$") | ||
| 59 | if os.path.exists(schema_dir): | ||
| 60 | for f in os.listdir(schema_dir): | ||
| 61 | if schema_re.match(f): | ||
| 62 | schemas.append(f) | ||
| 63 | if schemas != []: | ||
| 64 | bb.note("adding gconf postinst and prerm scripts to %s" % pkg) | ||
| 65 | d.setVar('SCHEMA_FILES', " ".join(schemas)) | ||
| 66 | postinst = d.getVar('pkg_postinst:%s' % pkg) | ||
| 67 | if not postinst: | ||
| 68 | postinst = '#!/bin/sh\n' | ||
| 69 | postinst += d.getVar('gconf_postinst') | ||
| 70 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 71 | prerm = d.getVar('pkg_prerm:%s' % pkg) | ||
| 72 | if not prerm: | ||
| 73 | prerm = '#!/bin/sh\n' | ||
| 74 | prerm += d.getVar('gconf_prerm') | ||
| 75 | d.setVar('pkg_prerm:%s' % pkg, prerm) | ||
| 76 | d.appendVar("RDEPENDS:%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf') | ||
| 77 | } | ||
diff --git a/meta/classes-recipe/gettext.bbclass b/meta/classes-recipe/gettext.bbclass deleted file mode 100644 index c313885d52..0000000000 --- a/meta/classes-recipe/gettext.bbclass +++ /dev/null | |||
| @@ -1,28 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | def gettext_dependencies(d): | ||
| 8 | if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'): | ||
| 9 | return "" | ||
| 10 | if d.getVar('USE_NLS') == 'no': | ||
| 11 | return "gettext-minimal-native" | ||
| 12 | return "gettext-native" | ||
| 13 | |||
| 14 | def gettext_oeconf(d): | ||
| 15 | if d.getVar('USE_NLS') == 'no': | ||
| 16 | return '--disable-nls' | ||
| 17 | # Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set | ||
| 18 | if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'): | ||
| 19 | return '--disable-nls' | ||
| 20 | return "--enable-nls" | ||
| 21 | |||
| 22 | BASEDEPENDS:append = " ${@gettext_dependencies(d)}" | ||
| 23 | EXTRA_OECONF:append = " ${@gettext_oeconf(d)}" | ||
| 24 | |||
| 25 | # Without this, msgfmt from gettext-native will not find ITS files | ||
| 26 | # provided by target recipes (for example, polkit.its). | ||
| 27 | GETTEXTDATADIRS:append:class-target = ":${STAGING_DATADIR}/gettext" | ||
| 28 | export GETTEXTDATADIRS | ||
diff --git a/meta/classes-recipe/gi-docgen.bbclass b/meta/classes-recipe/gi-docgen.bbclass deleted file mode 100644 index b178d1c387..0000000000 --- a/meta/classes-recipe/gi-docgen.bbclass +++ /dev/null | |||
| @@ -1,32 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # gi-docgen is a new gnome documentation generator, which | ||
| 8 | # seems to be a successor to gtk-doc: | ||
| 9 | # https://gitlab.gnome.org/GNOME/gi-docgen | ||
| 10 | |||
| 11 | # True if api-documentation and gobject-introspection-data are in DISTRO_FEATURES, | ||
| 12 | # and qemu-user is in MACHINE_FEATURES, False otherwise. | ||
| 13 | GIDOCGEN_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation gobject-introspection-data', \ | ||
| 14 | bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}" | ||
| 15 | |||
| 16 | # When building native recipes, disable gi-docgen, as it is not necessary, | ||
| 17 | # pulls in additional dependencies, and makes build times longer | ||
| 18 | GIDOCGEN_ENABLED:class-native = "False" | ||
| 19 | GIDOCGEN_ENABLED:class-nativesdk = "False" | ||
| 20 | |||
| 21 | # meson: default option name to enable/disable gi-docgen. This matches most | ||
| 22 | # projects' configuration. In doubts - check meson_options.txt in project's | ||
| 23 | # source path. | ||
| 24 | GIDOCGEN_MESON_OPTION ?= 'gtk_doc' | ||
| 25 | GIDOCGEN_MESON_ENABLE_FLAG ?= 'true' | ||
| 26 | GIDOCGEN_MESON_DISABLE_FLAG ?= 'false' | ||
| 27 | |||
| 28 | # Auto enable/disable based on GIDOCGEN_ENABLED | ||
| 29 | EXTRA_OEMESON:prepend = "-D${GIDOCGEN_MESON_OPTION}=${@bb.utils.contains('GIDOCGEN_ENABLED', 'True', '${GIDOCGEN_MESON_ENABLE_FLAG}', '${GIDOCGEN_MESON_DISABLE_FLAG}', d)} " | ||
| 30 | |||
| 31 | DEPENDS:append = "${@' gi-docgen-native gi-docgen' if d.getVar('GIDOCGEN_ENABLED') == 'True' else ''}" | ||
| 32 | |||
diff --git a/meta/classes-recipe/gio-module-cache.bbclass b/meta/classes-recipe/gio-module-cache.bbclass deleted file mode 100644 index 3714678c7c..0000000000 --- a/meta/classes-recipe/gio-module-cache.bbclass +++ /dev/null | |||
| @@ -1,43 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | PACKAGE_WRITE_DEPS += "qemuwrapper-cross" | ||
| 8 | |||
| 9 | GIO_MODULE_PACKAGES ??= "${PN}" | ||
| 10 | |||
| 11 | gio_module_cache_common() { | ||
| 12 | if [ "x$D" != "x" ]; then | ||
| 13 | $INTERCEPT_DIR/postinst_intercept update_gio_module_cache ${PKG} \ | ||
| 14 | mlprefix=${MLPREFIX} \ | ||
| 15 | binprefix=${MLPREFIX} \ | ||
| 16 | libdir=${libdir} \ | ||
| 17 | libexecdir=${libexecdir} \ | ||
| 18 | base_libdir=${base_libdir} \ | ||
| 19 | bindir=${bindir} | ||
| 20 | else | ||
| 21 | ${libexecdir}/${MLPREFIX}gio-querymodules ${libdir}/gio/modules/ | ||
| 22 | fi | ||
| 23 | } | ||
| 24 | |||
| 25 | python populate_packages:append () { | ||
| 26 | packages = d.getVar('GIO_MODULE_PACKAGES').split() | ||
| 27 | |||
| 28 | for pkg in packages: | ||
| 29 | bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg) | ||
| 30 | |||
| 31 | postinst = d.getVar('pkg_postinst:%s' % pkg) | ||
| 32 | if not postinst: | ||
| 33 | postinst = '#!/bin/sh\n' | ||
| 34 | postinst += d.getVar('gio_module_cache_common') | ||
| 35 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 36 | |||
| 37 | postrm = d.getVar('pkg_postrm:%s' % pkg) | ||
| 38 | if not postrm: | ||
| 39 | postrm = '#!/bin/sh\n' | ||
| 40 | postrm += d.getVar('gio_module_cache_common') | ||
| 41 | d.setVar('pkg_postrm:%s' % pkg, postrm) | ||
| 42 | } | ||
| 43 | |||
diff --git a/meta/classes-recipe/github-releases.bbclass b/meta/classes-recipe/github-releases.bbclass deleted file mode 100644 index ed83b83731..0000000000 --- a/meta/classes-recipe/github-releases.bbclass +++ /dev/null | |||
| @@ -1,3 +0,0 @@ | |||
| 1 | GITHUB_BASE_URI ?= "https://github.com/${BPN}/${BPN}/releases/" | ||
| 2 | UPSTREAM_CHECK_URI ?= "${GITHUB_BASE_URI}" | ||
| 3 | UPSTREAM_CHECK_REGEX ?= "releases/tag/v?(?P<pver>\d+(\.\d+)+)" | ||
diff --git a/meta/classes-recipe/gnomebase.bbclass b/meta/classes-recipe/gnomebase.bbclass deleted file mode 100644 index 74073321b8..0000000000 --- a/meta/classes-recipe/gnomebase.bbclass +++ /dev/null | |||
| @@ -1,38 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | def gnome_verdir(v): | ||
| 8 | return ".".join(v.split(".")[:-1]) or v | ||
| 9 | |||
| 10 | |||
| 11 | GNOME_COMPRESS_TYPE ?= "xz" | ||
| 12 | SECTION ?= "x11/gnome" | ||
| 13 | GNOMEBN ?= "${BPN}" | ||
| 14 | SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive" | ||
| 15 | |||
| 16 | FILES:${PN} += "${datadir}/application-registry \ | ||
| 17 | ${datadir}/mime-info \ | ||
| 18 | ${datadir}/mime/packages \ | ||
| 19 | ${datadir}/mime/application \ | ||
| 20 | ${datadir}/gnome-2.0 \ | ||
| 21 | ${datadir}/polkit* \ | ||
| 22 | ${datadir}/GConf \ | ||
| 23 | ${datadir}/glib-2.0/schemas \ | ||
| 24 | ${datadir}/appdata \ | ||
| 25 | ${datadir}/icons \ | ||
| 26 | " | ||
| 27 | |||
| 28 | FILES:${PN}-doc += "${datadir}/devhelp" | ||
| 29 | |||
| 30 | GNOMEBASEBUILDCLASS ??= "meson" | ||
| 31 | inherit pkgconfig | ||
| 32 | inherit_defer ${GNOMEBASEBUILDCLASS} | ||
| 33 | |||
| 34 | do_install:append() { | ||
| 35 | rm -rf ${D}${localstatedir}/lib/scrollkeeper/* | ||
| 36 | rm -rf ${D}${localstatedir}/scrollkeeper/* | ||
| 37 | rm -f ${D}${datadir}/applications/*.cache | ||
| 38 | } | ||
diff --git a/meta/classes-recipe/go-mod-update-modules.bbclass b/meta/classes-recipe/go-mod-update-modules.bbclass deleted file mode 100644 index 0083588a25..0000000000 --- a/meta/classes-recipe/go-mod-update-modules.bbclass +++ /dev/null | |||
| @@ -1,146 +0,0 @@ | |||
| 1 | addtask do_update_modules after do_configure | ||
| 2 | do_update_modules[nostamp] = "1" | ||
| 3 | do_update_modules[network] = "1" | ||
| 4 | |||
| 5 | # This class maintains two files, BPN-go-mods.inc and BPN-licenses.inc. | ||
| 6 | # | ||
| 7 | # -go-mods.inc will append SRC_URI with all of the Go modules that are | ||
| 8 | # dependencies of this recipe. | ||
| 9 | # | ||
| 10 | # -licenses.inc will append LICENSE and LIC_FILES_CHKSUM with the found licenses | ||
| 11 | # in the modules. | ||
| 12 | # | ||
| 13 | # These files are machine-generated and should not be modified. | ||
| 14 | |||
| 15 | python do_update_modules() { | ||
| 16 | import subprocess, tempfile, json, re, urllib.parse | ||
| 17 | from oe.license import tidy_licenses | ||
| 18 | from oe.license_finder import find_licenses_up | ||
| 19 | |||
| 20 | def unescape_path(path): | ||
| 21 | """Unescape capital letters using exclamation points.""" | ||
| 22 | return re.sub(r'!([a-z])', lambda m: m.group(1).upper(), path) | ||
| 23 | |||
| 24 | def fold_uri(uri): | ||
| 25 | """Fold URI for sorting shorter module paths before longer.""" | ||
| 26 | return uri.replace(';', ' ').replace('/', '!') | ||
| 27 | |||
| 28 | def parse_existing_licenses(): | ||
| 29 | hashes = {} | ||
| 30 | for url in d.getVar("LIC_FILES_CHKSUM").split(): | ||
| 31 | (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url) | ||
| 32 | if "spdx" in parm and parm["spdx"] != "Unknown": | ||
| 33 | hashes[parm["md5"]] = urllib.parse.unquote_plus(parm["spdx"]) | ||
| 34 | return hashes | ||
| 35 | |||
| 36 | bpn = d.getVar("BPN") | ||
| 37 | thisdir = d.getVar("THISDIR") | ||
| 38 | s_dir = d.getVar("S") | ||
| 39 | |||
| 40 | with tempfile.TemporaryDirectory(prefix='go-mod-') as mod_cache_dir: | ||
| 41 | notice = """ | ||
| 42 | # This file has been generated by go-mod-update-modules.bbclass | ||
| 43 | # | ||
| 44 | # Do not modify it by hand, as the contents will be replaced when | ||
| 45 | # running the update-modules task. | ||
| 46 | |||
| 47 | """ | ||
| 48 | |||
| 49 | env = dict(os.environ, GOMODCACHE=mod_cache_dir) | ||
| 50 | source = d.expand("${UNPACKDIR}/${GO_SRCURI_DESTSUFFIX}") | ||
| 51 | go_install = d.getVar("GO_INSTALL").split() | ||
| 52 | output = subprocess.check_output(("go", "list", "-json=Dir,Module", "-deps", *go_install), | ||
| 53 | cwd=source, env=env, text=True) | ||
| 54 | |||
| 55 | # | ||
| 56 | # Licenses | ||
| 57 | # | ||
| 58 | |||
| 59 | # load hashes from the existing licenses.inc | ||
| 60 | extra_hashes = parse_existing_licenses() | ||
| 61 | |||
| 62 | # The output of this isn't actually valid JSON, but a series of dicts. | ||
| 63 | # Wrap in [] and join the dicts with , | ||
| 64 | # Very frustrating that the json parser in python can't repeatedly | ||
| 65 | # parse from a stream. | ||
| 66 | pkgs = json.loads('[' + output.replace('}\n{', '},\n{') + ']') | ||
| 67 | |||
| 68 | # Collect licenses for the dependencies. | ||
| 69 | lic_files = {} | ||
| 70 | for pkg in pkgs: | ||
| 71 | pkg_dir = pkg['Dir'] | ||
| 72 | if not pkg_dir.startswith(mod_cache_dir): | ||
| 73 | continue | ||
| 74 | |||
| 75 | mod_dir = pkg['Module']['Dir'] | ||
| 76 | path = os.path.relpath(mod_dir, mod_cache_dir) | ||
| 77 | |||
| 78 | for name, file, md5 in find_licenses_up(pkg_dir, mod_dir, d, first_only=True, extra_hashes=extra_hashes): | ||
| 79 | lic_files[os.path.join(path, file)] = (name, md5) | ||
| 80 | |||
| 81 | licenses = set() | ||
| 82 | lic_files_chksum = [] | ||
| 83 | for lic_file in lic_files: | ||
| 84 | license_name, license_md5 = lic_files[lic_file] | ||
| 85 | if license_name == "Unknown": | ||
| 86 | bb.warn(f"Unknown license: {lic_file} {license_md5}") | ||
| 87 | |||
| 88 | licenses.add(lic_files[lic_file][0]) | ||
| 89 | lic_files_chksum.append( | ||
| 90 | f'file://pkg/mod/{lic_file};md5={license_md5};spdx={urllib.parse.quote_plus(license_name)}') | ||
| 91 | |||
| 92 | licenses_filename = os.path.join(thisdir, f"{bpn}-licenses.inc") | ||
| 93 | with open(licenses_filename, "w") as f: | ||
| 94 | f.write(notice) | ||
| 95 | f.write(f'LICENSE += "& {" & ".join(tidy_licenses(licenses))}"\n\n') | ||
| 96 | f.write('LIC_FILES_CHKSUM += "\\\n') | ||
| 97 | for lic in sorted(lic_files_chksum, key=fold_uri): | ||
| 98 | f.write(' ' + lic + ' \\\n') | ||
| 99 | f.write('"\n') | ||
| 100 | |||
| 101 | # | ||
| 102 | # Sources | ||
| 103 | # | ||
| 104 | |||
| 105 | # Collect the module cache files downloaded by the go list command as | ||
| 106 | # the go list command knows best what the go list command needs and it | ||
| 107 | # needs more files in the module cache than the go install command as | ||
| 108 | # it doesn't do the dependency pruning mentioned in the Go module | ||
| 109 | # reference, https://go.dev/ref/mod, for go 1.17 or higher. | ||
| 110 | src_uris = [] | ||
| 111 | downloaddir = os.path.join(mod_cache_dir, 'cache', 'download') | ||
| 112 | for dirpath, _, filenames in os.walk(downloaddir): | ||
| 113 | # We want to process files under @v directories | ||
| 114 | path, base = os.path.split(os.path.relpath(dirpath, downloaddir)) | ||
| 115 | if base != '@v': | ||
| 116 | continue | ||
| 117 | |||
| 118 | path = unescape_path(path) | ||
| 119 | zipver = None | ||
| 120 | for name in filenames: | ||
| 121 | ver, ext = os.path.splitext(name) | ||
| 122 | if ext == '.zip': | ||
| 123 | chksum = bb.utils.sha256_file(os.path.join(dirpath, name)) | ||
| 124 | src_uris.append(f'gomod://{path};version={ver};sha256sum={chksum}') | ||
| 125 | zipver = ver | ||
| 126 | break | ||
| 127 | for name in filenames: | ||
| 128 | ver, ext = os.path.splitext(name) | ||
| 129 | if ext == '.mod' and ver != zipver: | ||
| 130 | chksum = bb.utils.sha256_file(os.path.join(dirpath, name)) | ||
| 131 | src_uris.append(f'gomod://{path};version={ver};mod=1;sha256sum={chksum}') | ||
| 132 | |||
| 133 | |||
| 134 | go_mods_filename = os.path.join(thisdir, f"{bpn}-go-mods.inc") | ||
| 135 | with open(go_mods_filename, "w") as f: | ||
| 136 | f.write(notice) | ||
| 137 | f.write('SRC_URI += "\\\n') | ||
| 138 | for uri in sorted(src_uris, key=fold_uri): | ||
| 139 | f.write(' ' + uri + ' \\\n') | ||
| 140 | f.write('"\n') | ||
| 141 | |||
| 142 | subprocess.check_output(("go", "clean", "-modcache"), cwd=source, env=env, text=True) | ||
| 143 | } | ||
| 144 | |||
| 145 | # This doesn't work as we need to wipe the inc files first so we don't try looking for LICENSE files that don't yet exist | ||
| 146 | # RECIPE_UPGRADE_EXTRA_TASKS += "do_update_modules" | ||
diff --git a/meta/classes-recipe/go-mod.bbclass b/meta/classes-recipe/go-mod.bbclass deleted file mode 100644 index a15dda8f0e..0000000000 --- a/meta/classes-recipe/go-mod.bbclass +++ /dev/null | |||
| @@ -1,34 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Handle Go Modules support | ||
| 8 | # | ||
| 9 | # When using Go Modules, the current working directory MUST be at or below | ||
| 10 | # the location of the 'go.mod' file when the go tool is used, and there is no | ||
| 11 | # way to tell it to look elsewhere. It will automatically look upwards for the | ||
| 12 | # file, but not downwards. | ||
| 13 | # | ||
| 14 | # To support this use case, we provide the `GO_WORKDIR` variable, which defaults | ||
| 15 | # to `GO_IMPORT` but allows for easy override. | ||
| 16 | # | ||
| 17 | # Copyright 2020 (C) O.S. Systems Software LTDA. | ||
| 18 | |||
| 19 | # The '-modcacherw' option ensures we have write access to the cached objects so | ||
| 20 | # we avoid errors during clean task as well as when removing the TMPDIR. | ||
| 21 | GOBUILDFLAGS:append = " -modcacherw" | ||
| 22 | |||
| 23 | inherit go | ||
| 24 | |||
| 25 | export GOMODCACHE = "${S}/pkg/mod" | ||
| 26 | GO_MOD_CACHE_DIR = "${@os.path.relpath(d.getVar('GOMODCACHE'), d.getVar('UNPACKDIR'))}" | ||
| 27 | do_unpack[cleandirs] += "${GOMODCACHE}" | ||
| 28 | |||
| 29 | GO_WORKDIR ?= "${GO_IMPORT}" | ||
| 30 | do_compile[dirs] += "${B}/src/${GO_WORKDIR}" | ||
| 31 | |||
| 32 | # Make go install unpack the module zip files in the module cache directory | ||
| 33 | # before the license directory is polulated with license files. | ||
| 34 | addtask do_compile before do_populate_lic | ||
diff --git a/meta/classes-recipe/go-ptest.bbclass b/meta/classes-recipe/go-ptest.bbclass deleted file mode 100644 index 54fcbb535d..0000000000 --- a/meta/classes-recipe/go-ptest.bbclass +++ /dev/null | |||
| @@ -1,60 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit go ptest | ||
| 8 | |||
| 9 | do_compile_ptest_base() { | ||
| 10 | export TMPDIR="${GOTMPDIR}" | ||
| 11 | rm -f ${B}/.go_compiled_tests.list | ||
| 12 | go_list_package_tests | while read pkg; do | ||
| 13 | cd ${B}/src/$pkg | ||
| 14 | ${GO} test ${GOPTESTBUILDFLAGS} $pkg | ||
| 15 | find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \ | ||
| 16 | sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list | ||
| 17 | done | ||
| 18 | do_compile_ptest | ||
| 19 | } | ||
| 20 | |||
| 21 | do_compile_ptest_base[dirs] =+ "${GOTMPDIR}" | ||
| 22 | |||
| 23 | go_make_ptest_wrapper() { | ||
| 24 | cat >${D}${PTEST_PATH}/run-ptest <<EOF | ||
| 25 | #!/bin/sh | ||
| 26 | RC=0 | ||
| 27 | run_test() ( | ||
| 28 | cd "\$1" | ||
| 29 | ((((./\$2 ${GOPTESTFLAGS}; echo \$? >&3) | sed -r -e"s,^(PASS|SKIP|FAIL)\$,\\1: \$1/\$2," >&4) 3>&1) | (read rc; exit \$rc)) 4>&1 | ||
| 30 | exit \$?) | ||
| 31 | EOF | ||
| 32 | |||
| 33 | } | ||
| 34 | |||
| 35 | do_install_ptest_base() { | ||
| 36 | test -f "${B}/.go_compiled_tests.list" || exit 0 | ||
| 37 | install -d ${D}${PTEST_PATH} | ||
| 38 | go_stage_testdata | ||
| 39 | go_make_ptest_wrapper | ||
| 40 | havetests="" | ||
| 41 | while read test; do | ||
| 42 | testdir=`dirname $test` | ||
| 43 | testprog=`basename $test` | ||
| 44 | install -d ${D}${PTEST_PATH}/$testdir | ||
| 45 | install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test | ||
| 46 | echo "run_test $testdir $testprog || RC=1" >> ${D}${PTEST_PATH}/run-ptest | ||
| 47 | havetests="yes" | ||
| 48 | done < ${B}/.go_compiled_tests.list | ||
| 49 | if [ -n "$havetests" ]; then | ||
| 50 | echo "exit \$RC" >> ${D}${PTEST_PATH}/run-ptest | ||
| 51 | chmod +x ${D}${PTEST_PATH}/run-ptest | ||
| 52 | else | ||
| 53 | rm -rf ${D}${PTEST_PATH} | ||
| 54 | fi | ||
| 55 | do_install_ptest | ||
| 56 | chown -R root:root ${D}${PTEST_PATH} | ||
| 57 | } | ||
| 58 | |||
| 59 | INSANE_SKIP:${PN}-ptest += "ldflags" | ||
| 60 | |||
diff --git a/meta/classes-recipe/go.bbclass b/meta/classes-recipe/go.bbclass deleted file mode 100644 index e0f667373e..0000000000 --- a/meta/classes-recipe/go.bbclass +++ /dev/null | |||
| @@ -1,159 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit goarch | ||
| 8 | inherit linuxloader | ||
| 9 | |||
| 10 | # if the GO_IMPORT is not set in recipe generate an error | ||
| 11 | GO_IMPORT ??= "${@bb.fatal("The recipe needs to set GO_IMPORT for go.bbclass to work")}" | ||
| 12 | |||
| 13 | GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}" | ||
| 14 | |||
| 15 | export GODEBUG = "gocachehash=1" | ||
| 16 | |||
| 17 | GOROOT:class-native = "${STAGING_LIBDIR_NATIVE}/go" | ||
| 18 | GOROOT:class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go" | ||
| 19 | GOROOT = "${STAGING_LIBDIR}/go" | ||
| 20 | export GOROOT | ||
| 21 | export GOCACHE = "${B}/.cache" | ||
| 22 | |||
| 23 | export GOARCH = "${TARGET_GOARCH}" | ||
| 24 | export GOOS = "${TARGET_GOOS}" | ||
| 25 | export GOHOSTARCH = "${BUILD_GOARCH}" | ||
| 26 | export GOHOSTOS = "${BUILD_GOOS}" | ||
| 27 | |||
| 28 | GOARM[export] = "0" | ||
| 29 | GOARM:arm:class-target = "${TARGET_GOARM}" | ||
| 30 | GOARM:arm:class-target[export] = "1" | ||
| 31 | |||
| 32 | GO386[export] = "0" | ||
| 33 | GO386:x86:class-target = "${TARGET_GO386}" | ||
| 34 | GO386:x86:class-target[export] = "1" | ||
| 35 | |||
| 36 | GOMIPS[export] = "0" | ||
| 37 | GOMIPS:mips:class-target = "${TARGET_GOMIPS}" | ||
| 38 | GOMIPS:mips:class-target[export] = "1" | ||
| 39 | |||
| 40 | DEPENDS_GOLANG:class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime" | ||
| 41 | DEPENDS_GOLANG:class-native = "go-native" | ||
| 42 | DEPENDS_GOLANG:class-nativesdk = "virtual/${TARGET_PREFIX}go virtual/${TARGET_PREFIX}go-runtime" | ||
| 43 | |||
| 44 | DEPENDS:append = " ${DEPENDS_GOLANG}" | ||
| 45 | |||
| 46 | GO_LINKSHARED ?= "${@'-linkshared' if d.getVar('GO_DYNLINK') else ''}" | ||
| 47 | GO_RPATH_LINK = "${@'-Wl,-rpath-link=${STAGING_DIR_TARGET}${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}" | ||
| 48 | GO_RPATH = "${@'-r ${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}" | ||
| 49 | GO_RPATH:class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}" | ||
| 50 | GO_RPATH_LINK:class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}" | ||
| 51 | GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS}" | ||
| 52 | GO_LINKMODE ?= "" | ||
| 53 | GO_EXTRA_LDFLAGS ?= "" | ||
| 54 | GO_LINUXLOADER ?= "-I ${@get_linuxloader(d)}" | ||
| 55 | # Use system loader. If uninative is used, the uninative loader will be patched automatically | ||
| 56 | GO_LINUXLOADER:class-native = "" | ||
| 57 | GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} ${GO_LINUXLOADER} ${GO_EXTRA_LDFLAGS} -extldflags '${GO_EXTLDFLAGS}'"' | ||
| 58 | export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -trimpath" | ||
| 59 | export GOPATH_OMIT_IN_ACTIONID ?= "1" | ||
| 60 | export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c" | ||
| 61 | export GOPTESTFLAGS ?= "" | ||
| 62 | GOBUILDFLAGS:prepend:task-compile = "${GO_PARALLEL_BUILD} " | ||
| 63 | |||
| 64 | export GO = "${HOST_PREFIX}go" | ||
| 65 | GOTOOLDIR = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go/pkg/tool/${BUILD_GOTUPLE}" | ||
| 66 | GOTOOLDIR:class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}" | ||
| 67 | export GOTOOLDIR | ||
| 68 | |||
| 69 | export CGO_ENABLED ?= "1" | ||
| 70 | export CGO_CFLAGS ?= "${CFLAGS}" | ||
| 71 | export CGO_CPPFLAGS ?= "${CPPFLAGS}" | ||
| 72 | export CGO_CXXFLAGS ?= "${CXXFLAGS}" | ||
| 73 | export CGO_LDFLAGS ?= "${LDFLAGS}" | ||
| 74 | |||
| 75 | GO_INSTALL ?= "${GO_IMPORT}/..." | ||
| 76 | GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/" | ||
| 77 | |||
| 78 | B = "${WORKDIR}/build" | ||
| 79 | export GOPATH = "${B}" | ||
| 80 | export GOENV = "off" | ||
| 81 | export GOPROXY ??= "https://proxy.golang.org,direct" | ||
| 82 | export GOTMPDIR ?= "${WORKDIR}/build-tmp" | ||
| 83 | GOTMPDIR[vardepvalue] = "" | ||
| 84 | |||
| 85 | GO_SRCURI_DESTSUFFIX = "${@os.path.join(os.path.basename(d.getVar('S')), 'src', d.getVar('GO_IMPORT')) + '/'}" | ||
| 86 | |||
| 87 | go_list_packages() { | ||
| 88 | ${GO} list -f '{{.ImportPath}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \ | ||
| 89 | egrep -v '${GO_INSTALL_FILTEROUT}' | ||
| 90 | } | ||
| 91 | |||
| 92 | go_list_package_tests() { | ||
| 93 | ${GO} list -f '{{.ImportPath}} {{.TestGoFiles}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \ | ||
| 94 | grep -v '\[\]$' | \ | ||
| 95 | egrep -v '${GO_INSTALL_FILTEROUT}' | \ | ||
| 96 | awk '{ print $1 }' | ||
| 97 | } | ||
| 98 | |||
| 99 | go_do_configure() { | ||
| 100 | ln -snf ${S}/src ${B}/ | ||
| 101 | } | ||
| 102 | do_configure[dirs] =+ "${GOTMPDIR}" | ||
| 103 | |||
| 104 | go_do_compile() { | ||
| 105 | export TMPDIR="${GOTMPDIR}" | ||
| 106 | if [ -n "${GO_INSTALL}" ]; then | ||
| 107 | if [ -n "${GO_LINKSHARED}" ]; then | ||
| 108 | ${GO} install ${GOBUILDFLAGS} `go_list_packages` | ||
| 109 | rm -rf ${B}/bin | ||
| 110 | fi | ||
| 111 | ${GO} install ${GO_LINKSHARED} ${GOBUILDFLAGS} `go_list_packages` | ||
| 112 | fi | ||
| 113 | } | ||
| 114 | do_compile[dirs] =+ "${GOTMPDIR}" | ||
| 115 | do_compile[cleandirs] = "${B}/bin ${B}/pkg" | ||
| 116 | |||
| 117 | go_do_install() { | ||
| 118 | install -d ${D}${libdir}/go/src/${GO_IMPORT} | ||
| 119 | tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \ | ||
| 120 | tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf - | ||
| 121 | tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \ | ||
| 122 | tar -C ${D}${libdir}/go --no-same-owner -xf - | ||
| 123 | |||
| 124 | if ls ${B}/${GO_BUILD_BINDIR}/* >/dev/null 2>/dev/null ; then | ||
| 125 | install -d ${D}${bindir} | ||
| 126 | install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/ | ||
| 127 | fi | ||
| 128 | } | ||
| 129 | |||
| 130 | go_stage_testdata() { | ||
| 131 | oldwd="$PWD" | ||
| 132 | cd ${S}/src | ||
| 133 | find ${GO_IMPORT} -depth -type d -name testdata | while read d; do | ||
| 134 | if echo "$d" | grep -q '/vendor/'; then | ||
| 135 | continue | ||
| 136 | fi | ||
| 137 | parent=`dirname $d` | ||
| 138 | install -d ${D}${PTEST_PATH}/$parent | ||
| 139 | cp --preserve=mode,timestamps -R $d ${D}${PTEST_PATH}/$parent/ | ||
| 140 | done | ||
| 141 | cd "$oldwd" | ||
| 142 | } | ||
| 143 | |||
| 144 | EXPORT_FUNCTIONS do_configure do_compile do_install | ||
| 145 | |||
| 146 | FILES:${PN}-dev = "${libdir}/go/src" | ||
| 147 | FILES:${PN}-staticdev = "${libdir}/go/pkg" | ||
| 148 | |||
| 149 | INSANE_SKIP:${PN} += "ldflags" | ||
| 150 | |||
| 151 | # Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips | ||
| 152 | # doesn't support -buildmode=pie, so skip the QA checking for mips/rv32 and its | ||
| 153 | # variants. | ||
| 154 | python() { | ||
| 155 | if 'mips' in d.getVar('TARGET_ARCH') or 'riscv32' in d.getVar('TARGET_ARCH'): | ||
| 156 | d.appendVar('INSANE_SKIP:%s' % d.getVar('PN'), " textrel") | ||
| 157 | else: | ||
| 158 | d.appendVar('GOBUILDFLAGS', ' -buildmode=pie') | ||
| 159 | } | ||
diff --git a/meta/classes-recipe/goarch.bbclass b/meta/classes-recipe/goarch.bbclass deleted file mode 100644 index 0e9ef3a6ec..0000000000 --- a/meta/classes-recipe/goarch.bbclass +++ /dev/null | |||
| @@ -1,105 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | BUILD_GOOS = "${@go_map_os(d.getVar('BUILD_OS'), d)}" | ||
| 8 | BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH'), d)}" | ||
| 9 | BUILD_GOTUPLE = "${BUILD_GOOS}_${BUILD_GOARCH}" | ||
| 10 | HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS'), d)}" | ||
| 11 | HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}" | ||
| 12 | HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d)}" | ||
| 13 | HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}" | ||
| 14 | HOST_GOMIPS = "${@go_map_mips(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}" | ||
| 15 | HOST_GOARM:class-native = "7" | ||
| 16 | HOST_GO386:class-native = "sse2" | ||
| 17 | HOST_GOMIPS:class-native = "hardfloat" | ||
| 18 | HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}" | ||
| 19 | TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}" | ||
| 20 | TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}" | ||
| 21 | TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d)}" | ||
| 22 | TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}" | ||
| 23 | TARGET_GOMIPS = "${@go_map_mips(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}" | ||
| 24 | TARGET_GOARM:class-native = "7" | ||
| 25 | TARGET_GO386:class-native = "sse2" | ||
| 26 | TARGET_GOMIPS:class-native = "hardfloat" | ||
| 27 | TARGET_GOARM:class-crosssdk = "7" | ||
| 28 | TARGET_GO386:class-crosssdk = "sse2" | ||
| 29 | TARGET_GOMIPS:class-crosssdk = "hardfloat" | ||
| 30 | TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}" | ||
| 31 | GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}" | ||
| 32 | |||
| 33 | # Use the MACHINEOVERRIDES to map ARM CPU architecture passed to GO via GOARM. | ||
| 34 | # This is combined with *_ARCH to set HOST_GOARM and TARGET_GOARM. | ||
| 35 | BASE_GOARM = '' | ||
| 36 | BASE_GOARM:armv7ve = '7' | ||
| 37 | BASE_GOARM:armv7a = '7' | ||
| 38 | BASE_GOARM:armv6 = '6' | ||
| 39 | BASE_GOARM:armv5 = '5' | ||
| 40 | |||
| 41 | # Go supports dynamic linking on a limited set of architectures. | ||
| 42 | # See the supportsDynlink function in go/src/cmd/compile/internal/gc/main.go | ||
| 43 | GO_DYNLINK = "" | ||
| 44 | GO_DYNLINK:arm ?= "1" | ||
| 45 | GO_DYNLINK:aarch64 ?= "1" | ||
| 46 | GO_DYNLINK:x86 ?= "1" | ||
| 47 | GO_DYNLINK:x86-64 ?= "1" | ||
| 48 | GO_DYNLINK:powerpc64 ?= "1" | ||
| 49 | GO_DYNLINK:powerpc64le ?= "1" | ||
| 50 | GO_DYNLINK:class-native ?= "" | ||
| 51 | GO_DYNLINK:class-nativesdk = "" | ||
| 52 | |||
| 53 | # define here because everybody inherits this class | ||
| 54 | # | ||
| 55 | COMPATIBLE_HOST:linux-gnux32 = "null" | ||
| 56 | COMPATIBLE_HOST:linux-muslx32 = "null" | ||
| 57 | COMPATIBLE_HOST:powerpc = "null" | ||
| 58 | COMPATIBLE_HOST:powerpc64 = "null" | ||
| 59 | COMPATIBLE_HOST:mipsarchn32 = "null" | ||
| 60 | COMPATIBLE_HOST:riscv32 = "null" | ||
| 61 | |||
| 62 | ARM_INSTRUCTION_SET:armv4 = "arm" | ||
| 63 | ARM_INSTRUCTION_SET:armv5 = "arm" | ||
| 64 | ARM_INSTRUCTION_SET:armv6 = "arm" | ||
| 65 | |||
| 66 | TUNE_CCARGS:remove = "-march=mips32r2" | ||
| 67 | SECURITY_NOPIE_CFLAGS ??= "" | ||
| 68 | |||
| 69 | # go can't be built with ccache: | ||
| 70 | # gcc: fatal error: no input files | ||
| 71 | CCACHE_DISABLE ?= "1" | ||
| 72 | |||
| 73 | def go_map_arch(a, d): | ||
| 74 | arch = oe.go.map_arch(a) | ||
| 75 | if not arch: | ||
| 76 | raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a) | ||
| 77 | return arch | ||
| 78 | |||
| 79 | def go_map_arm(a, d): | ||
| 80 | if a.startswith("arm"): | ||
| 81 | return d.getVar('BASE_GOARM') | ||
| 82 | return '' | ||
| 83 | |||
| 84 | def go_map_386(a, f, d): | ||
| 85 | import re | ||
| 86 | if re.match('i.86', a): | ||
| 87 | if ('core2' in f) or ('corei7' in f): | ||
| 88 | return 'sse2' | ||
| 89 | else: | ||
| 90 | return 'softfloat' | ||
| 91 | return '' | ||
| 92 | |||
| 93 | def go_map_mips(a, f, d): | ||
| 94 | import re | ||
| 95 | if a == 'mips' or a == 'mipsel': | ||
| 96 | if 'fpu-hard' in f: | ||
| 97 | return 'hardfloat' | ||
| 98 | else: | ||
| 99 | return 'softfloat' | ||
| 100 | return '' | ||
| 101 | |||
| 102 | def go_map_os(o, d): | ||
| 103 | if o.startswith('linux'): | ||
| 104 | return 'linux' | ||
| 105 | return o | ||
diff --git a/meta/classes-recipe/gobject-introspection-data.bbclass b/meta/classes-recipe/gobject-introspection-data.bbclass deleted file mode 100644 index aa04c70ca6..0000000000 --- a/meta/classes-recipe/gobject-introspection-data.bbclass +++ /dev/null | |||
| @@ -1,13 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # This variable is set to True if gobject-introspection-data is in | ||
| 8 | # DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise. | ||
| 9 | # | ||
| 10 | # It should be used in recipes to determine whether introspection data should be built, | ||
| 11 | # so that qemu use can be avoided when necessary. | ||
| 12 | GI_DATA_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \ | ||
| 13 | bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}" | ||
diff --git a/meta/classes-recipe/gobject-introspection.bbclass b/meta/classes-recipe/gobject-introspection.bbclass deleted file mode 100644 index d0052cd623..0000000000 --- a/meta/classes-recipe/gobject-introspection.bbclass +++ /dev/null | |||
| @@ -1,64 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Inherit this class in recipes to enable building their introspection files | ||
| 8 | |||
| 9 | # python3native is inherited to prevent introspection tools being run with | ||
| 10 | # host's python 3 (they need to be run with native python 3) | ||
| 11 | # | ||
| 12 | # This also sets up autoconf-based recipes to build introspection data (or not), | ||
| 13 | # depending on distro and machine features (see gobject-introspection-data class). | ||
| 14 | inherit python3native gobject-introspection-data | ||
| 15 | |||
| 16 | # meson: default option name to enable/disable introspection. This matches most | ||
| 17 | # project's configuration. In doubts - check meson_options.txt in project's | ||
| 18 | # source path. | ||
| 19 | GIR_MESON_OPTION ?= 'introspection' | ||
| 20 | GIR_MESON_ENABLE_FLAG ?= 'true' | ||
| 21 | GIR_MESON_DISABLE_FLAG ?= 'false' | ||
| 22 | |||
| 23 | # Define g-i options such that they can be disabled completely when GIR_MESON_OPTION is empty | ||
| 24 | GIRMESONTARGET = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} " | ||
| 25 | GIRMESONBUILD = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} " | ||
| 26 | # Auto enable/disable based on GI_DATA_ENABLED | ||
| 27 | EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} " | ||
| 28 | EXTRA_OEMESON:prepend:class-target = "${@['', '${GIRMESONTARGET}'][d.getVar('GIR_MESON_OPTION') != '']}" | ||
| 29 | # When building native recipes, disable introspection, as it is not necessary, | ||
| 30 | # pulls in additional dependencies, and makes build times longer | ||
| 31 | EXTRA_OECONF:prepend:class-native = "--disable-introspection " | ||
| 32 | EXTRA_OECONF:prepend:class-nativesdk = "--disable-introspection " | ||
| 33 | EXTRA_OEMESON:prepend:class-native = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}" | ||
| 34 | EXTRA_OEMESON:prepend:class-nativesdk = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}" | ||
| 35 | |||
| 36 | # Generating introspection data depends on a combination of native and target | ||
| 37 | # introspection tools, and qemu to run the target tools. | ||
| 38 | DEPENDS:append:class-target = " ${@bb.utils.contains('GI_DATA_ENABLED', 'True', 'gobject-introspection qemu-native', '', d)}" | ||
| 39 | |||
| 40 | # Even when introspection is disabled, the gobject-introspection package is still needed for m4 macros. | ||
| 41 | DEPENDS:append = " gobject-introspection-native" | ||
| 42 | |||
| 43 | # This is used by introspection tools to find .gir includes | ||
| 44 | export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}" | ||
| 45 | |||
| 46 | do_configure:prepend:class-target () { | ||
| 47 | # introspection.m4 pre-packaged with upstream tarballs does not yet | ||
| 48 | # have our fixes | ||
| 49 | mkdir -p ${S}/m4 | ||
| 50 | cp ${STAGING_DIR_NATIVE}/${datadir}/aclocal/introspection.m4 ${S}/m4 | ||
| 51 | } | ||
| 52 | |||
| 53 | do_compile:prepend() { | ||
| 54 | # This prevents g-ir-scanner from writing cache data to $HOME | ||
| 55 | export GI_SCANNER_DISABLE_CACHE=1 | ||
| 56 | } | ||
| 57 | |||
| 58 | # .typelib files are needed at runtime and so they go to the main package (so | ||
| 59 | # they'll be together with libraries they support). | ||
| 60 | FILES:${PN}:append = " ${libdir}/girepository-*/*.typelib" | ||
| 61 | |||
| 62 | # .gir files go to dev package, as they're needed for developing (but not for | ||
| 63 | # running) things that depends on introspection. | ||
| 64 | FILES:${PN}-dev:append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir" | ||
diff --git a/meta/classes-recipe/grub-efi-cfg.bbclass b/meta/classes-recipe/grub-efi-cfg.bbclass deleted file mode 100644 index 9a5cb99c52..0000000000 --- a/meta/classes-recipe/grub-efi-cfg.bbclass +++ /dev/null | |||
| @@ -1,128 +0,0 @@ | |||
| 1 | # grub-efi.bbclass | ||
| 2 | # Copyright (c) 2011, Intel Corporation. | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | |||
| 6 | # Provide grub-efi specific functions for building bootable images. | ||
| 7 | |||
| 8 | # External variables | ||
| 9 | # ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional) | ||
| 10 | # ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional) | ||
| 11 | # ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu | ||
| 12 | # ${LABELS} - a list of targets for the automatic config | ||
| 13 | # ${APPEND} - an override list of append strings for each label | ||
| 14 | # ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional) | ||
| 15 | # ${GRUB_TIMEOUT} - timeout before executing the deault label (optional) | ||
| 16 | # ${GRUB_ROOT} - grub's root device. | ||
| 17 | |||
| 18 | GRUB_SERIAL ?= "console=ttyS0,115200" | ||
| 19 | GRUB_CFG_VM = "${S}/grub_vm.cfg" | ||
| 20 | GRUB_CFG_LIVE = "${S}/grub_live.cfg" | ||
| 21 | GRUB_TIMEOUT ?= "10" | ||
| 22 | #FIXME: build this from the machine config | ||
| 23 | GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1" | ||
| 24 | |||
| 25 | GRUB_ROOT ?= "${ROOT}" | ||
| 26 | GRUB_TITLE ?= "" | ||
| 27 | APPEND ?= "" | ||
| 28 | |||
| 29 | # Uses MACHINE specific KERNEL_IMAGETYPE | ||
| 30 | PACKAGE_ARCH = "${MACHINE_ARCH}" | ||
| 31 | |||
| 32 | # Need UUID utility code. | ||
| 33 | inherit fs-uuid | ||
| 34 | |||
| 35 | python build_efi_cfg() { | ||
| 36 | import sys | ||
| 37 | |||
| 38 | workdir = d.getVar('WORKDIR') | ||
| 39 | if not workdir: | ||
| 40 | bb.error("WORKDIR not defined, unable to package") | ||
| 41 | return | ||
| 42 | |||
| 43 | gfxserial = d.getVar('GRUB_GFXSERIAL') or "" | ||
| 44 | |||
| 45 | labels = d.getVar('LABELS') | ||
| 46 | if not labels: | ||
| 47 | bb.debug(1, "LABELS not defined, nothing to do") | ||
| 48 | return | ||
| 49 | |||
| 50 | if labels == []: | ||
| 51 | bb.debug(1, "No labels, nothing to do") | ||
| 52 | return | ||
| 53 | |||
| 54 | cfile = d.getVar('GRUB_CFG') | ||
| 55 | if not cfile: | ||
| 56 | bb.fatal('Unable to read GRUB_CFG') | ||
| 57 | |||
| 58 | try: | ||
| 59 | cfgfile = open(cfile, 'w') | ||
| 60 | except OSError: | ||
| 61 | bb.fatal('Unable to open %s' % cfile) | ||
| 62 | |||
| 63 | cfgfile.write('# Automatically created by OE\n') | ||
| 64 | |||
| 65 | opts = d.getVar('GRUB_OPTS') | ||
| 66 | if opts: | ||
| 67 | for opt in opts.split(';'): | ||
| 68 | cfgfile.write('%s\n' % opt) | ||
| 69 | |||
| 70 | cfgfile.write('default=%s\n' % (labels.split()[0])) | ||
| 71 | |||
| 72 | timeout = d.getVar('GRUB_TIMEOUT') | ||
| 73 | if timeout: | ||
| 74 | cfgfile.write('timeout=%s\n' % timeout) | ||
| 75 | else: | ||
| 76 | cfgfile.write('timeout=50\n') | ||
| 77 | |||
| 78 | root = d.getVar('GRUB_ROOT') | ||
| 79 | if not root: | ||
| 80 | bb.fatal('GRUB_ROOT not defined') | ||
| 81 | |||
| 82 | if gfxserial == "1": | ||
| 83 | btypes = [ [ " graphics console", "" ], | ||
| 84 | [ " serial console", d.getVar('GRUB_SERIAL') or "" ] ] | ||
| 85 | else: | ||
| 86 | btypes = [ [ "", "" ] ] | ||
| 87 | |||
| 88 | for label in labels.split(): | ||
| 89 | localdata = d.createCopy() | ||
| 90 | |||
| 91 | overrides = localdata.getVar('OVERRIDES') | ||
| 92 | if not overrides: | ||
| 93 | bb.fatal('OVERRIDES not defined') | ||
| 94 | |||
| 95 | localdata.need_overrides() | ||
| 96 | localdata.setVar('OVERRIDES', 'grub_' + label + ':' + overrides) | ||
| 97 | |||
| 98 | for btype in btypes: | ||
| 99 | title = localdata.getVar('GRUB_TITLE') | ||
| 100 | if not title or len(title) == 0: | ||
| 101 | title = label | ||
| 102 | |||
| 103 | cfgfile.write('\nmenuentry \'%s%s\'{\n' % (title, btype[0])) | ||
| 104 | lb = label | ||
| 105 | if label == "install": | ||
| 106 | lb = "install-efi" | ||
| 107 | kernel = localdata.getVar('KERNEL_IMAGETYPE') | ||
| 108 | cfgfile.write('linux /%s LABEL=%s' % (kernel, lb)) | ||
| 109 | |||
| 110 | cfgfile.write(' %s' % replace_rootfs_uuid(d, root)) | ||
| 111 | |||
| 112 | append = localdata.getVar('APPEND') | ||
| 113 | initrd = localdata.getVar('INITRD') | ||
| 114 | |||
| 115 | if append: | ||
| 116 | append = replace_rootfs_uuid(d, append) | ||
| 117 | cfgfile.write(' %s' % (append)) | ||
| 118 | |||
| 119 | cfgfile.write(' %s' % btype[1]) | ||
| 120 | cfgfile.write('\n') | ||
| 121 | |||
| 122 | if initrd: | ||
| 123 | cfgfile.write('initrd /initrd') | ||
| 124 | cfgfile.write('\n}\n') | ||
| 125 | |||
| 126 | cfgfile.close() | ||
| 127 | } | ||
| 128 | build_efi_cfg[vardepsexclude] += "OVERRIDES" | ||
diff --git a/meta/classes-recipe/grub-efi.bbclass b/meta/classes-recipe/grub-efi.bbclass deleted file mode 100644 index 34bcbc53e6..0000000000 --- a/meta/classes-recipe/grub-efi.bbclass +++ /dev/null | |||
| @@ -1,17 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit grub-efi-cfg | ||
| 8 | require conf/image-uefi.conf | ||
| 9 | |||
| 10 | # Assure the existence of grub-efi image in deploy dir | ||
| 11 | do_bootimg[depends] += "grub-efi:do_deploy" | ||
| 12 | |||
| 13 | efi_populate() { | ||
| 14 | efi_populate_common "$1" grub-efi | ||
| 15 | |||
| 16 | install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg | ||
| 17 | } | ||
diff --git a/meta/classes-recipe/gsettings.bbclass b/meta/classes-recipe/gsettings.bbclass deleted file mode 100644 index adb027ea0a..0000000000 --- a/meta/classes-recipe/gsettings.bbclass +++ /dev/null | |||
| @@ -1,48 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # A bbclass to handle installed GSettings (glib) schemas, updated the compiled | ||
| 8 | # form on package install and remove. | ||
| 9 | # | ||
| 10 | # The compiled schemas are platform-agnostic, so we can depend on | ||
| 11 | # glib-2.0-native for the native tool and run the postinst script when the | ||
| 12 | # rootfs builds to save a little time on first boot. | ||
| 13 | |||
| 14 | # TODO use a trigger so that this runs once per package operation run | ||
| 15 | |||
| 16 | GSETTINGS_PACKAGE ?= "${PN}" | ||
| 17 | |||
| 18 | python __anonymous() { | ||
| 19 | pkg = d.getVar("GSETTINGS_PACKAGE") | ||
| 20 | if pkg: | ||
| 21 | d.appendVar("PACKAGE_WRITE_DEPS", " glib-2.0-native") | ||
| 22 | d.appendVar("RDEPENDS:" + pkg, " ${MLPREFIX}glib-2.0-utils") | ||
| 23 | d.appendVar("FILES:" + pkg, " ${datadir}/glib-2.0/schemas") | ||
| 24 | } | ||
| 25 | |||
| 26 | gsettings_postinstrm () { | ||
| 27 | glib-compile-schemas $D${datadir}/glib-2.0/schemas | ||
| 28 | } | ||
| 29 | |||
| 30 | python populate_packages:append () { | ||
| 31 | pkg = d.getVar('GSETTINGS_PACKAGE') | ||
| 32 | if pkg: | ||
| 33 | bb.note("adding gsettings postinst scripts to %s" % pkg) | ||
| 34 | |||
| 35 | postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst') | ||
| 36 | if not postinst: | ||
| 37 | postinst = '#!/bin/sh\n' | ||
| 38 | postinst += d.getVar('gsettings_postinstrm') | ||
| 39 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 40 | |||
| 41 | bb.note("adding gsettings postrm scripts to %s" % pkg) | ||
| 42 | |||
| 43 | postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm') | ||
| 44 | if not postrm: | ||
| 45 | postrm = '#!/bin/sh\n' | ||
| 46 | postrm += d.getVar('gsettings_postinstrm') | ||
| 47 | d.setVar('pkg_postrm:%s' % pkg, postrm) | ||
| 48 | } | ||
diff --git a/meta/classes-recipe/gtk-doc.bbclass b/meta/classes-recipe/gtk-doc.bbclass deleted file mode 100644 index 9d3911966b..0000000000 --- a/meta/classes-recipe/gtk-doc.bbclass +++ /dev/null | |||
| @@ -1,72 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Helper class to pull in the right gtk-doc dependencies and configure | ||
| 8 | # gtk-doc to enable or disable documentation building (which requries the | ||
| 9 | # use of usermode qemu). | ||
| 10 | |||
| 11 | # This variable is set to True if api-documentation is in | ||
| 12 | # DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise. | ||
| 13 | # | ||
| 14 | # It should be used in recipes to determine whether gtk-doc based documentation should be built, | ||
| 15 | # so that qemu use can be avoided when necessary. | ||
| 16 | GTKDOC_ENABLED:class-native = "False" | ||
| 17 | GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \ | ||
| 18 | bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}" | ||
| 19 | |||
| 20 | # meson: default option name to enable/disable gtk-doc. This matches most | ||
| 21 | # project's configuration. In doubts - check meson_options.txt in project's | ||
| 22 | # source path. | ||
| 23 | GTKDOC_MESON_OPTION ?= 'docs' | ||
| 24 | GTKDOC_MESON_ENABLE_FLAG ?= 'true' | ||
| 25 | GTKDOC_MESON_DISABLE_FLAG ?= 'false' | ||
| 26 | |||
| 27 | # Auto enable/disable based on GTKDOC_ENABLED | ||
| 28 | EXTRA_OECONF:prepend = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \ | ||
| 29 | '--disable-gtk-doc', d)} " | ||
| 30 | EXTRA_OEMESON:prepend = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} " | ||
| 31 | |||
| 32 | # Even though gtkdoc is disabled on -native, gtk-doc package is still | ||
| 33 | # needed for m4 macros. | ||
| 34 | DEPENDS:append = " gtk-doc-native" | ||
| 35 | |||
| 36 | export STAGING_DIR_HOST | ||
| 37 | |||
| 38 | inherit python3native pkgconfig qemu | ||
| 39 | DEPENDS:append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}" | ||
| 40 | |||
| 41 | do_compile:prepend:class-target () { | ||
| 42 | if [ ${GTKDOC_ENABLED} = True ]; then | ||
| 43 | # Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it | ||
| 44 | # can run target helper binaries through that. | ||
| 45 | qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['\\$GIR_EXTRA_LIBS_PATH','$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}" | ||
| 46 | cat > ${B}/gtkdoc-qemuwrapper << EOF | ||
| 47 | #!/bin/sh | ||
| 48 | # Use a modules directory which doesn't exist so we don't load random things | ||
| 49 | # which may then get deleted (or their dependencies) and potentially segfault | ||
| 50 | export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy | ||
| 51 | |||
| 52 | GIR_EXTRA_LIBS_PATH=\`find ${B} -name *.so -printf "%h\n"|sort|uniq| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH | ||
| 53 | GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH | ||
| 54 | |||
| 55 | # meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly | ||
| 56 | unset LD_LIBRARY_PATH | ||
| 57 | |||
| 58 | if [ -d ".libs" ]; then | ||
| 59 | $qemu_binary ".libs/\$@" | ||
| 60 | else | ||
| 61 | $qemu_binary "\$@" | ||
| 62 | fi | ||
| 63 | |||
| 64 | if [ \$? -ne 0 ]; then | ||
| 65 | echo "If the above error message is about missing .so libraries, then setting up GIR_EXTRA_LIBS_PATH in the recipe should help." | ||
| 66 | echo "(typically like this: GIR_EXTRA_LIBS_PATH=\"$""{B}/something/.libs\" )" | ||
| 67 | exit 1 | ||
| 68 | fi | ||
| 69 | EOF | ||
| 70 | chmod +x ${B}/gtkdoc-qemuwrapper | ||
| 71 | fi | ||
| 72 | } | ||
diff --git a/meta/classes-recipe/gtk-icon-cache.bbclass b/meta/classes-recipe/gtk-icon-cache.bbclass deleted file mode 100644 index fad8c4c65f..0000000000 --- a/meta/classes-recipe/gtk-icon-cache.bbclass +++ /dev/null | |||
| @@ -1,95 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | FILES:${PN} += "${datadir}/icons/hicolor" | ||
| 8 | |||
| 9 | GTKIC_VERSION ??= '3' | ||
| 10 | |||
| 11 | GTKPN = "${@ 'gtk4' if d.getVar('GTKIC_VERSION') == '4' else 'gtk+3' }" | ||
| 12 | GTKIC_CMD = "${@ 'gtk4-update-icon-cache' if d.getVar('GTKIC_VERSION') == '4' else 'gtk-update-icon-cache-3.0' }" | ||
| 13 | |||
| 14 | #gtk+3/gtk4 require GTK3DISTROFEATURES, DEPENDS on it make all the | ||
| 15 | #recipes inherit this class require GTK3DISTROFEATURES | ||
| 16 | inherit features_check | ||
| 17 | ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}" | ||
| 18 | |||
| 19 | DEPENDS += "${@ '' if d.getVar('BPN') == 'hicolor-icon-theme' else 'hicolor-icon-theme' } \ | ||
| 20 | ${@ '' if d.getVar('BPN') == 'gdk-pixbuf' else 'gdk-pixbuf' } \ | ||
| 21 | ${@ '' if d.getVar('BPN') == d.getVar('GTKPN') else d.getVar('GTKPN') } \ | ||
| 22 | ${GTKPN}-native \ | ||
| 23 | " | ||
| 24 | |||
| 25 | PACKAGE_WRITE_DEPS += "${GTKPN}-native gdk-pixbuf-native" | ||
| 26 | |||
| 27 | gtk_icon_cache_postinst() { | ||
| 28 | if [ "x$D" != "x" ]; then | ||
| 29 | $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \ | ||
| 30 | mlprefix=${MLPREFIX} \ | ||
| 31 | libdir_native=${libdir_native} | ||
| 32 | else | ||
| 33 | |||
| 34 | # Update the pixbuf loaders in case they haven't been registered yet | ||
| 35 | ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache | ||
| 36 | |||
| 37 | for icondir in /usr/share/icons/* ; do | ||
| 38 | if [ -d $icondir ] ; then | ||
| 39 | ${GTKIC_CMD} -fqt $icondir | ||
| 40 | fi | ||
| 41 | done | ||
| 42 | fi | ||
| 43 | } | ||
| 44 | |||
| 45 | gtk_icon_cache_postrm() { | ||
| 46 | if [ "x$D" != "x" ]; then | ||
| 47 | $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \ | ||
| 48 | mlprefix=${MLPREFIX} \ | ||
| 49 | libdir=${libdir} | ||
| 50 | else | ||
| 51 | for icondir in /usr/share/icons/* ; do | ||
| 52 | if [ -d $icondir ] ; then | ||
| 53 | ${GTKIC_CMD} -qt $icondir | ||
| 54 | fi | ||
| 55 | done | ||
| 56 | fi | ||
| 57 | } | ||
| 58 | |||
| 59 | python populate_packages:append () { | ||
| 60 | packages = d.getVar('PACKAGES').split() | ||
| 61 | pkgdest = d.getVar('PKGDEST') | ||
| 62 | |||
| 63 | for pkg in packages: | ||
| 64 | icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir')) | ||
| 65 | if not os.path.exists(icon_dir): | ||
| 66 | continue | ||
| 67 | |||
| 68 | bb.note("adding hicolor-icon-theme dependency to %s" % pkg) | ||
| 69 | rdepends = ' ' + d.getVar('MLPREFIX', False) + "hicolor-icon-theme" | ||
| 70 | d.appendVar('RDEPENDS:%s' % pkg, rdepends) | ||
| 71 | |||
| 72 | #gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3/gtk4 | ||
| 73 | bb.note("adding gdk-pixbuf dependency to %s" % pkg) | ||
| 74 | rdepends = ' ' + d.getVar('MLPREFIX', False) + "gdk-pixbuf" | ||
| 75 | d.appendVar('RDEPENDS:%s' % pkg, rdepends) | ||
| 76 | |||
| 77 | bb.note("adding %s dependency to %s" % (d.getVar('GTKPN'), pkg)) | ||
| 78 | rdepends = ' ' + d.getVar('MLPREFIX', False) + d.getVar('GTKPN') | ||
| 79 | d.appendVar('RDEPENDS:%s' % pkg, rdepends) | ||
| 80 | |||
| 81 | bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg) | ||
| 82 | |||
| 83 | postinst = d.getVar('pkg_postinst:%s' % pkg) | ||
| 84 | if not postinst: | ||
| 85 | postinst = '#!/bin/sh\n' | ||
| 86 | postinst += d.getVar('gtk_icon_cache_postinst') | ||
| 87 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 88 | |||
| 89 | postrm = d.getVar('pkg_postrm:%s' % pkg) | ||
| 90 | if not postrm: | ||
| 91 | postrm = '#!/bin/sh\n' | ||
| 92 | postrm += d.getVar('gtk_icon_cache_postrm') | ||
| 93 | d.setVar('pkg_postrm:%s' % pkg, postrm) | ||
| 94 | } | ||
| 95 | |||
diff --git a/meta/classes-recipe/gtk-immodules-cache.bbclass b/meta/classes-recipe/gtk-immodules-cache.bbclass deleted file mode 100644 index 585838c105..0000000000 --- a/meta/classes-recipe/gtk-immodules-cache.bbclass +++ /dev/null | |||
| @@ -1,80 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # This class will update the inputmethod module cache for virtual keyboards | ||
| 8 | # | ||
| 9 | # Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules | ||
| 10 | |||
| 11 | PACKAGE_WRITE_DEPS += "qemuwrapper-cross" | ||
| 12 | |||
| 13 | GTKIMMODULES_PACKAGES ?= "${PN}" | ||
| 14 | |||
| 15 | gtk_immodule_cache_postinst() { | ||
| 16 | if [ "x$D" != "x" ]; then | ||
| 17 | $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \ | ||
| 18 | mlprefix=${MLPREFIX} \ | ||
| 19 | binprefix=${MLPREFIX} \ | ||
| 20 | libdir=${libdir} \ | ||
| 21 | libexecdir=${libexecdir} \ | ||
| 22 | base_libdir=${base_libdir} \ | ||
| 23 | bindir=${bindir} | ||
| 24 | else | ||
| 25 | if [ ! -z `which gtk-query-immodules-2.0` ]; then | ||
| 26 | gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache | ||
| 27 | fi | ||
| 28 | if [ ! -z `which gtk-query-immodules-3.0` ]; then | ||
| 29 | mkdir -p ${libdir}/gtk-3.0/3.0.0 | ||
| 30 | gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache | ||
| 31 | fi | ||
| 32 | fi | ||
| 33 | } | ||
| 34 | |||
| 35 | gtk_immodule_cache_postrm() { | ||
| 36 | if [ "x$D" != "x" ]; then | ||
| 37 | $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \ | ||
| 38 | mlprefix=${MLPREFIX} \ | ||
| 39 | binprefix=${MLPREFIX} \ | ||
| 40 | libdir=${libdir} \ | ||
| 41 | libexecdir=${libexecdir} \ | ||
| 42 | base_libdir=${base_libdir} \ | ||
| 43 | bindir=${bindir} | ||
| 44 | else | ||
| 45 | if [ ! -z `which gtk-query-immodules-2.0` ]; then | ||
| 46 | gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache | ||
| 47 | fi | ||
| 48 | if [ ! -z `which gtk-query-immodules-3.0` ]; then | ||
| 49 | gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache | ||
| 50 | fi | ||
| 51 | fi | ||
| 52 | } | ||
| 53 | |||
| 54 | python populate_packages:append () { | ||
| 55 | gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES').split() | ||
| 56 | |||
| 57 | for pkg in gtkimmodules_pkgs: | ||
| 58 | bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg) | ||
| 59 | |||
| 60 | postinst = d.getVar('pkg_postinst:%s' % pkg) | ||
| 61 | if not postinst: | ||
| 62 | postinst = '#!/bin/sh\n' | ||
| 63 | postinst += d.getVar('gtk_immodule_cache_postinst') | ||
| 64 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 65 | |||
| 66 | postrm = d.getVar('pkg_postrm:%s' % pkg) | ||
| 67 | if not postrm: | ||
| 68 | postrm = '#!/bin/sh\n' | ||
| 69 | postrm += d.getVar('gtk_immodule_cache_postrm') | ||
| 70 | d.setVar('pkg_postrm:%s' % pkg, postrm) | ||
| 71 | } | ||
| 72 | |||
| 73 | python __anonymous() { | ||
| 74 | if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d): | ||
| 75 | gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES', False) | ||
| 76 | if not gtkimmodules_check: | ||
| 77 | bb_filename = d.getVar('FILE', False) | ||
| 78 | bb.fatal("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename) | ||
| 79 | } | ||
| 80 | |||
diff --git a/meta/classes-recipe/image-artifact-names.bbclass b/meta/classes-recipe/image-artifact-names.bbclass deleted file mode 100644 index bc76ff0e16..0000000000 --- a/meta/classes-recipe/image-artifact-names.bbclass +++ /dev/null | |||
| @@ -1,41 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | ################################################################## | ||
| 8 | # Specific image creation and rootfs population info. | ||
| 9 | ################################################################## | ||
| 10 | |||
| 11 | IMAGE_BASENAME ?= "${PN}" | ||
| 12 | IMAGE_VERSION_SUFFIX ?= "-${DATETIME}" | ||
| 13 | IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME SOURCE_DATE_EPOCH" | ||
| 14 | IMAGE_NAME ?= "${IMAGE_LINK_NAME}${IMAGE_VERSION_SUFFIX}" | ||
| 15 | IMAGE_LINK_NAME ?= "${IMAGE_BASENAME}${IMAGE_MACHINE_SUFFIX}${IMAGE_NAME_SUFFIX}" | ||
| 16 | |||
| 17 | # This needs to stay in sync with IMAGE_LINK_NAME, but with INITRAMFS_IMAGE instead of IMAGE_BASENAME | ||
| 18 | # and without ${IMAGE_NAME_SUFFIX} which all initramfs images should set to empty | ||
| 19 | INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}${IMAGE_MACHINE_SUFFIX}', ''][d.getVar('INITRAMFS_IMAGE') == '']}" | ||
| 20 | |||
| 21 | # The default DEPLOY_DIR_IMAGE is ${MACHINE} directory: | ||
| 22 | # meta/conf/bitbake.conf:DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR}/images/${MACHINE}" | ||
| 23 | # so many people find it unnecessary to include this suffix to every image | ||
| 24 | # stored there, but other people often fetch various images for different | ||
| 25 | # MACHINEs to the same downloads directory and then the suffix is very helpful | ||
| 26 | # add separate variable for projects to decide which scheme works best for them | ||
| 27 | # without understanding the IMAGE_NAME/IMAGE_LINK_NAME structure. | ||
| 28 | IMAGE_MACHINE_SUFFIX ??= "-${MACHINE}" | ||
| 29 | |||
| 30 | # IMAGE_NAME is the base name for everything produced when building images. | ||
| 31 | # The actual image that contains the rootfs has an additional suffix (.rootfs | ||
| 32 | # by default) followed by additional suffices which describe the format (.ext4, | ||
| 33 | # .ext4.xz, etc.). | ||
| 34 | IMAGE_NAME_SUFFIX ??= ".rootfs" | ||
| 35 | |||
| 36 | python () { | ||
| 37 | if bb.data.inherits_class('deploy', d) and d.getVar("IMAGE_VERSION_SUFFIX") == "-${DATETIME}": | ||
| 38 | import datetime | ||
| 39 | d.setVar("IMAGE_VERSION_SUFFIX", "-" + datetime.datetime.fromtimestamp(int(d.getVar("SOURCE_DATE_EPOCH")), datetime.timezone.utc).strftime('%Y%m%d%H%M%S')) | ||
| 40 | d.setVarFlag("IMAGE_VERSION_SUFFIX", "vardepvalue", "") | ||
| 41 | } | ||
diff --git a/meta/classes-recipe/image-combined-dbg.bbclass b/meta/classes-recipe/image-combined-dbg.bbclass deleted file mode 100644 index 729313739c..0000000000 --- a/meta/classes-recipe/image-combined-dbg.bbclass +++ /dev/null | |||
| @@ -1,15 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | IMAGE_PREPROCESS_COMMAND:append = " combine_dbg_image" | ||
| 8 | |||
| 9 | combine_dbg_image () { | ||
| 10 | if [ "${IMAGE_GEN_DEBUGFS}" = "1" -a -e ${IMAGE_ROOTFS}-dbg ]; then | ||
| 11 | # copy target files into -dbg rootfs, so it can be used for | ||
| 12 | # debug purposes directly | ||
| 13 | tar -C ${IMAGE_ROOTFS} -cf - . | tar -C ${IMAGE_ROOTFS}-dbg -xf - | ||
| 14 | fi | ||
| 15 | } | ||
diff --git a/meta/classes-recipe/image-container.bbclass b/meta/classes-recipe/image-container.bbclass deleted file mode 100644 index d24b030453..0000000000 --- a/meta/classes-recipe/image-container.bbclass +++ /dev/null | |||
| @@ -1,27 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | ROOTFS_BOOTSTRAP_INSTALL = "" | ||
| 8 | IMAGE_TYPES_MASKED += "container" | ||
| 9 | IMAGE_TYPEDEP:container = "tar.bz2" | ||
| 10 | |||
| 11 | python __anonymous() { | ||
| 12 | if "container" in d.getVar("IMAGE_FSTYPES") and \ | ||
| 13 | d.getVar("IMAGE_CONTAINER_NO_DUMMY") != "1" and \ | ||
| 14 | "linux-dummy" not in d.getVar("PREFERRED_PROVIDER_virtual/kernel"): | ||
| 15 | msg = '"container" is in IMAGE_FSTYPES, but ' \ | ||
| 16 | 'PREFERRED_PROVIDER_virtual/kernel is not "linux-dummy". ' \ | ||
| 17 | 'Unless a particular kernel is needed, using linux-dummy will ' \ | ||
| 18 | 'prevent a kernel from being built, which can reduce ' \ | ||
| 19 | 'build times. If you don\'t want to use "linux-dummy", set ' \ | ||
| 20 | '"IMAGE_CONTAINER_NO_DUMMY" to "1".' | ||
| 21 | |||
| 22 | # Raising skip recipe was Paul's clever idea. It causes the error to | ||
| 23 | # only be shown for the recipes actually requested to build, rather | ||
| 24 | # than bb.fatal which would appear for all recipes inheriting the | ||
| 25 | # class. | ||
| 26 | raise bb.parse.SkipRecipe(msg) | ||
| 27 | } | ||
diff --git a/meta/classes-recipe/image-live.bbclass b/meta/classes-recipe/image-live.bbclass deleted file mode 100644 index c3054be630..0000000000 --- a/meta/classes-recipe/image-live.bbclass +++ /dev/null | |||
| @@ -1,267 +0,0 @@ | |||
| 1 | # Copyright (C) 2004, Advanced Micro Devices, Inc. | ||
| 2 | # | ||
| 3 | # SPDX-License-Identifier: MIT | ||
| 4 | |||
| 5 | # Creates a bootable image using syslinux, your kernel and an optional | ||
| 6 | # initrd | ||
| 7 | |||
| 8 | # | ||
| 9 | # End result is two things: | ||
| 10 | # | ||
| 11 | # 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel, | ||
| 12 | # an initrd and a rootfs image. These can be written to harddisks directly and | ||
| 13 | # also booted on USB flash disks (write them there with dd). | ||
| 14 | # | ||
| 15 | # 2. A CD .iso image | ||
| 16 | |||
| 17 | # Boot process is that the initrd will boot and process which label was selected | ||
| 18 | # in syslinux. Actions based on the label are then performed (e.g. installing to | ||
| 19 | # an hdd) | ||
| 20 | |||
| 21 | # External variables (also used by syslinux.bbclass) | ||
| 22 | # ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional) | ||
| 23 | # ${HDDIMG_ID} - FAT image volume-id | ||
| 24 | # ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional) | ||
| 25 | |||
| 26 | inherit live-vm-common image-artifact-names | ||
| 27 | |||
| 28 | do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \ | ||
| 29 | mtools-native:do_populate_sysroot \ | ||
| 30 | cdrtools-native:do_populate_sysroot \ | ||
| 31 | virtual/kernel:do_deploy \ | ||
| 32 | ${MLPREFIX}syslinux:do_populate_sysroot \ | ||
| 33 | syslinux-native:do_populate_sysroot \ | ||
| 34 | ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_').split('.')[0]) if d.getVar('ROOTFS') else ''} \ | ||
| 35 | " | ||
| 36 | |||
| 37 | |||
| 38 | LABELS_LIVE ?= "boot install" | ||
| 39 | ROOT_LIVE ?= "root=/dev/ram0" | ||
| 40 | INITRD_IMAGE_LIVE ?= "${MLPREFIX}core-image-minimal-initramfs" | ||
| 41 | INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}${IMAGE_MACHINE_SUFFIX}.${@d.getVar('INITRAMFS_FSTYPES').split()[0]}" | ||
| 42 | |||
| 43 | LIVE_ROOTFS_TYPE ?= "ext4" | ||
| 44 | ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}" | ||
| 45 | |||
| 46 | IMAGE_TYPEDEP:live = "${LIVE_ROOTFS_TYPE}" | ||
| 47 | IMAGE_TYPEDEP:iso = "${LIVE_ROOTFS_TYPE}" | ||
| 48 | IMAGE_TYPEDEP:hddimg = "${LIVE_ROOTFS_TYPE}" | ||
| 49 | IMAGE_TYPES_MASKED += "live hddimg iso" | ||
| 50 | |||
| 51 | python() { | ||
| 52 | image_b = d.getVar('IMAGE_BASENAME') | ||
| 53 | initrd_i = d.getVar('INITRD_IMAGE_LIVE') | ||
| 54 | if image_b == initrd_i: | ||
| 55 | bb.error('INITRD_IMAGE_LIVE %s cannot use image live, hddimg or iso.' % initrd_i) | ||
| 56 | bb.fatal('Check IMAGE_FSTYPES and INITRAMFS_FSTYPES settings.') | ||
| 57 | elif initrd_i: | ||
| 58 | d.appendVarFlag('do_bootimg', 'depends', ' %s:do_image_complete' % initrd_i) | ||
| 59 | } | ||
| 60 | |||
| 61 | HDDDIR = "${S}/hddimg" | ||
| 62 | ISODIR = "${S}/iso" | ||
| 63 | EFIIMGDIR = "${S}/efi_img" | ||
| 64 | COMPACT_ISODIR = "${S}/iso.z" | ||
| 65 | |||
| 66 | ISOLINUXDIR ?= "/isolinux" | ||
| 67 | ISO_BOOTIMG = "isolinux/isolinux.bin" | ||
| 68 | ISO_BOOTCAT = "isolinux/boot.cat" | ||
| 69 | MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table" | ||
| 70 | |||
| 71 | BOOTIMG_VOLUME_ID ?= "boot" | ||
| 72 | BOOTIMG_EXTRA_SPACE ?= "512" | ||
| 73 | |||
| 74 | populate_live() { | ||
| 75 | populate_kernel $1 | ||
| 76 | if [ -s "${ROOTFS}" ]; then | ||
| 77 | install -m 0644 ${ROOTFS} $1/rootfs.img | ||
| 78 | fi | ||
| 79 | } | ||
| 80 | |||
| 81 | build_iso() { | ||
| 82 | # Only create an ISO if we have an INITRD and the live or iso image type was selected | ||
| 83 | if [ -z "${INITRD}" ] || [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso', '1', '0', d)}" != "1" ]; then | ||
| 84 | bbnote "ISO image will not be created." | ||
| 85 | return | ||
| 86 | fi | ||
| 87 | # ${INITRD} is a list of multiple filesystem images | ||
| 88 | for fs in ${INITRD} | ||
| 89 | do | ||
| 90 | if [ ! -s "$fs" ]; then | ||
| 91 | bbwarn "ISO image will not be created. $fs is invalid." | ||
| 92 | return | ||
| 93 | fi | ||
| 94 | done | ||
| 95 | |||
| 96 | populate_live ${ISODIR} | ||
| 97 | |||
| 98 | if [ "${PCBIOS}" = "1" ]; then | ||
| 99 | syslinux_iso_populate ${ISODIR} | ||
| 100 | fi | ||
| 101 | if [ "${EFI}" = "1" ]; then | ||
| 102 | efi_iso_populate ${ISODIR} | ||
| 103 | build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img | ||
| 104 | fi | ||
| 105 | |||
| 106 | # EFI only | ||
| 107 | if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then | ||
| 108 | # Work around bug in isohybrid where it requires isolinux.bin | ||
| 109 | # In the boot catalog, even though it is not used | ||
| 110 | mkdir -p ${ISODIR}/${ISOLINUXDIR} | ||
| 111 | install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR} | ||
| 112 | fi | ||
| 113 | |||
| 114 | # We used to have support for zisofs; this is a relic of that | ||
| 115 | mkisofs_compress_opts="-r" | ||
| 116 | |||
| 117 | # Check the size of ${ISODIR}/rootfs.img, use mkisofs -iso-level 3 | ||
| 118 | # when it exceeds 3.8GB, the specification is 4G - 1 bytes, we need | ||
| 119 | # leave a few space for other files. | ||
| 120 | mkisofs_iso_level="" | ||
| 121 | |||
| 122 | if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then | ||
| 123 | rootfs_img_size=`stat -c '%s' ${ISODIR}/rootfs.img` | ||
| 124 | # 4080218931 = 3.8 * 1024 * 1024 * 1024 | ||
| 125 | if [ $rootfs_img_size -gt 4080218931 ]; then | ||
| 126 | bbnote "${ISODIR}/rootfs.img execeeds 3.8GB, using '-iso-level 3' for mkisofs" | ||
| 127 | mkisofs_iso_level="-iso-level 3" | ||
| 128 | fi | ||
| 129 | fi | ||
| 130 | |||
| 131 | if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then | ||
| 132 | # PCBIOS only media | ||
| 133 | mkisofs -V ${BOOTIMG_VOLUME_ID} \ | ||
| 134 | -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \ | ||
| 135 | -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \ | ||
| 136 | $mkisofs_compress_opts \ | ||
| 137 | ${MKISOFS_OPTIONS} $mkisofs_iso_level ${ISODIR} | ||
| 138 | else | ||
| 139 | # EFI only OR EFI+PCBIOS | ||
| 140 | mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \ | ||
| 141 | -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \ | ||
| 142 | -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \ | ||
| 143 | $mkisofs_compress_opts ${MKISOFS_OPTIONS} $mkisofs_iso_level \ | ||
| 144 | -eltorito-alt-boot -eltorito-platform efi \ | ||
| 145 | -b efi.img -no-emul-boot \ | ||
| 146 | ${ISODIR} | ||
| 147 | isohybrid_args="-u" | ||
| 148 | fi | ||
| 149 | |||
| 150 | # EFI only does not need isohybrid | ||
| 151 | if [ "${PCBIOS}" = "1" ] || [ "${EFI}" != "1" ]; then | ||
| 152 | isohybrid $isohybrid_args ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso | ||
| 153 | fi | ||
| 154 | } | ||
| 155 | |||
| 156 | build_fat_img() { | ||
| 157 | FATSOURCEDIR=$1 | ||
| 158 | FATIMG=$2 | ||
| 159 | |||
| 160 | # Calculate the size required for the final image including the | ||
| 161 | # data and filesystem overhead. | ||
| 162 | # Sectors: 512 bytes | ||
| 163 | # Blocks: 1024 bytes | ||
| 164 | |||
| 165 | # Determine the sector count just for the data | ||
| 166 | SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2) | ||
| 167 | |||
| 168 | # Account for the filesystem overhead. This includes directory | ||
| 169 | # entries in the clusters as well as the FAT itself. | ||
| 170 | # Assumptions: | ||
| 171 | # FAT32 (12 or 16 may be selected by mkdosfs, but the extra | ||
| 172 | # padding will be minimal on those smaller images and not | ||
| 173 | # worth the logic here to caclulate the smaller FAT sizes) | ||
| 174 | # < 16 entries per directory | ||
| 175 | # 8.3 filenames only | ||
| 176 | |||
| 177 | # 32 bytes per dir entry | ||
| 178 | DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32) | ||
| 179 | # 32 bytes for every end-of-directory dir entry | ||
| 180 | DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32)) | ||
| 181 | # 4 bytes per FAT entry per sector of data | ||
| 182 | FAT_BYTES=$(expr $SECTORS \* 4) | ||
| 183 | # 4 bytes per FAT entry per end-of-cluster list | ||
| 184 | FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4)) | ||
| 185 | |||
| 186 | # Use a ceiling function to determine FS overhead in sectors | ||
| 187 | DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512) | ||
| 188 | # There are two FATs on the image | ||
| 189 | FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2) | ||
| 190 | SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS)) | ||
| 191 | |||
| 192 | # Determine the final size in blocks accounting for some padding | ||
| 193 | BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE}) | ||
| 194 | |||
| 195 | # mkdosfs will sometimes use FAT16 when it is not appropriate, | ||
| 196 | # resulting in a boot failure from SYSLINUX. Use FAT32 for | ||
| 197 | # images larger than 512MB, otherwise let mkdosfs decide. | ||
| 198 | if [ $(expr $BLOCKS / 1024) -gt 512 ]; then | ||
| 199 | FATSIZE="-F 32" | ||
| 200 | fi | ||
| 201 | |||
| 202 | # mkdosfs will fail if ${FATIMG} exists. Since we are creating an | ||
| 203 | # new image, it is safe to delete any previous image. | ||
| 204 | if [ -e ${FATIMG} ]; then | ||
| 205 | rm ${FATIMG} | ||
| 206 | fi | ||
| 207 | |||
| 208 | if [ -z "${HDDIMG_ID}" ]; then | ||
| 209 | mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \ | ||
| 210 | ${BLOCKS} | ||
| 211 | else | ||
| 212 | mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \ | ||
| 213 | ${BLOCKS} -i ${HDDIMG_ID} | ||
| 214 | fi | ||
| 215 | |||
| 216 | # Copy FATSOURCEDIR recursively into the image file directly | ||
| 217 | mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/ | ||
| 218 | } | ||
| 219 | |||
| 220 | build_hddimg() { | ||
| 221 | # Create an HDD image | ||
| 222 | if [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live hddimg', '1', '0', d)}" = "1" ] ; then | ||
| 223 | populate_live ${HDDDIR} | ||
| 224 | |||
| 225 | if [ "${PCBIOS}" = "1" ]; then | ||
| 226 | syslinux_hddimg_populate ${HDDDIR} | ||
| 227 | fi | ||
| 228 | if [ "${EFI}" = "1" ]; then | ||
| 229 | efi_hddimg_populate ${HDDDIR} | ||
| 230 | fi | ||
| 231 | |||
| 232 | # Check the size of ${HDDDIR}/rootfs.img, error out if it | ||
| 233 | # exceeds 4GB, it is the single file's max size of FAT fs. | ||
| 234 | if [ -f ${HDDDIR}/rootfs.img ]; then | ||
| 235 | rootfs_img_size=`stat -c '%s' ${HDDDIR}/rootfs.img` | ||
| 236 | max_size=`expr 4 \* 1024 \* 1024 \* 1024` | ||
| 237 | if [ $rootfs_img_size -ge $max_size ]; then | ||
| 238 | bberror "${HDDDIR}/rootfs.img rootfs size is greather than or equal to 4GB," | ||
| 239 | bberror "and this doesn't work on a FAT filesystem. You can either:" | ||
| 240 | bberror "1) Reduce the size of rootfs.img, or," | ||
| 241 | bbfatal "2) Use wic, vmdk,vhd, vhdx or vdi instead of hddimg\n" | ||
| 242 | fi | ||
| 243 | fi | ||
| 244 | |||
| 245 | build_fat_img ${HDDDIR} ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg | ||
| 246 | |||
| 247 | if [ "${PCBIOS}" = "1" ]; then | ||
| 248 | syslinux_hddimg_install | ||
| 249 | fi | ||
| 250 | |||
| 251 | chmod 644 ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg | ||
| 252 | fi | ||
| 253 | } | ||
| 254 | |||
| 255 | python do_bootimg() { | ||
| 256 | set_live_vm_vars(d, 'LIVE') | ||
| 257 | if d.getVar("PCBIOS") == "1": | ||
| 258 | bb.build.exec_func('build_syslinux_cfg', d) | ||
| 259 | if d.getVar("EFI") == "1": | ||
| 260 | bb.build.exec_func('build_efi_cfg', d) | ||
| 261 | bb.build.exec_func('build_hddimg', d) | ||
| 262 | bb.build.exec_func('build_iso', d) | ||
| 263 | bb.build.exec_func('create_symlinks', d) | ||
| 264 | } | ||
| 265 | do_bootimg[subimages] = "hddimg iso" | ||
| 266 | |||
| 267 | addtask bootimg before do_image_complete after do_rootfs | ||
diff --git a/meta/classes-recipe/image-postinst-intercepts.bbclass b/meta/classes-recipe/image-postinst-intercepts.bbclass deleted file mode 100644 index fc15926384..0000000000 --- a/meta/classes-recipe/image-postinst-intercepts.bbclass +++ /dev/null | |||
| @@ -1,29 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Gather existing and candidate postinst intercepts from BBPATH | ||
| 8 | POSTINST_INTERCEPTS_DIR ?= "${COREBASE}/scripts/postinst-intercepts" | ||
| 9 | POSTINST_INTERCEPTS_PATHS ?= "${@':'.join('%s/postinst-intercepts' % p for p in '${BBPATH}'.split(':'))}:${POSTINST_INTERCEPTS_DIR}" | ||
| 10 | |||
| 11 | python find_intercepts() { | ||
| 12 | intercepts = {} | ||
| 13 | search_paths = [] | ||
| 14 | paths = d.getVar('POSTINST_INTERCEPTS_PATHS').split(':') | ||
| 15 | overrides = (':' + d.getVar('FILESOVERRIDES')).split(':') + [''] | ||
| 16 | search_paths = [os.path.join(p, op) for p in paths for op in overrides] | ||
| 17 | searched = oe.path.which_wild('*', ':'.join(search_paths), candidates=True) | ||
| 18 | files, chksums = [], [] | ||
| 19 | for pathname, candidates in searched: | ||
| 20 | if os.path.isfile(pathname): | ||
| 21 | files.append(pathname) | ||
| 22 | chksums.append('%s:True' % pathname) | ||
| 23 | chksums.extend('%s:False' % c for c in candidates[:-1]) | ||
| 24 | |||
| 25 | d.setVar('POSTINST_INTERCEPT_CHECKSUMS', ' '.join(chksums)) | ||
| 26 | d.setVar('POSTINST_INTERCEPTS', ' '.join(files)) | ||
| 27 | } | ||
| 28 | find_intercepts[eventmask] += "bb.event.RecipePreFinalise" | ||
| 29 | addhandler find_intercepts | ||
diff --git a/meta/classes-recipe/image.bbclass b/meta/classes-recipe/image.bbclass deleted file mode 100644 index ccad308b93..0000000000 --- a/meta/classes-recipe/image.bbclass +++ /dev/null | |||
| @@ -1,705 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | IMAGE_CLASSES ??= "" | ||
| 8 | |||
| 9 | # rootfs bootstrap install | ||
| 10 | # warning - image-container resets this | ||
| 11 | ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts" | ||
| 12 | |||
| 13 | # Handle inherits of any of the image classes we need | ||
| 14 | IMGCLASSES = "rootfs_${IMAGE_PKGTYPE} image_types ${IMAGE_CLASSES}" | ||
| 15 | # Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk_base | ||
| 16 | # in the non-Linux SDK_OS case, such as mingw32 | ||
| 17 | inherit populate_sdk_base | ||
| 18 | IMGCLASSES += "${@['', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}" | ||
| 19 | IMGCLASSES += "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso hddimg', 'image-live', '', d)}" | ||
| 20 | IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-container', '', d)}" | ||
| 21 | IMGCLASSES += "image_types_wic" | ||
| 22 | IMGCLASSES += "rootfs-postcommands" | ||
| 23 | IMGCLASSES += "image-postinst-intercepts" | ||
| 24 | IMGCLASSES += "overlayfs-etc" | ||
| 25 | inherit_defer ${IMGCLASSES} | ||
| 26 | |||
| 27 | TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}" | ||
| 28 | TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}" | ||
| 29 | POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks" | ||
| 30 | |||
| 31 | LICENSE ?= "MIT" | ||
| 32 | PACKAGES = "" | ||
| 33 | DEPENDS += "depmodwrapper-cross cross-localedef-native" | ||
| 34 | RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}" | ||
| 35 | RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}" | ||
| 36 | PATH:prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:" | ||
| 37 | |||
| 38 | INHIBIT_DEFAULT_DEPS = "1" | ||
| 39 | |||
| 40 | # IMAGE_FEATURES may contain any available package group | ||
| 41 | IMAGE_FEATURES ?= "" | ||
| 42 | IMAGE_FEATURES[type] = "list" | ||
| 43 | IMAGE_FEATURES[validitems] += "read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login serial-autologin-root post-install-logging overlayfs-etc" | ||
| 44 | |||
| 45 | # Generate companion debugfs? | ||
| 46 | IMAGE_GEN_DEBUGFS ?= "0" | ||
| 47 | |||
| 48 | # These packages will be installed as additional into debug rootfs | ||
| 49 | IMAGE_INSTALL_DEBUGFS ?= "" | ||
| 50 | |||
| 51 | # These packages will be removed from a read-only rootfs after all other | ||
| 52 | # packages have been installed | ||
| 53 | ROOTFS_RO_UNNEEDED ??= "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}" | ||
| 54 | |||
| 55 | # packages to install from features | ||
| 56 | FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}" | ||
| 57 | FEATURE_INSTALL[vardepvalue] = "${FEATURE_INSTALL}" | ||
| 58 | FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}" | ||
| 59 | FEATURE_INSTALL_OPTIONAL[vardepvalue] = "${FEATURE_INSTALL_OPTIONAL}" | ||
| 60 | |||
| 61 | # Define some very basic feature package groups | ||
| 62 | FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}" | ||
| 63 | SPLASH ?= "${@bb.utils.contains("MACHINE_FEATURES", "screen", "psplash", "", d)}" | ||
| 64 | FEATURE_PACKAGES_splash = "${SPLASH}" | ||
| 65 | |||
| 66 | IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}' | ||
| 67 | |||
| 68 | def check_image_features(d): | ||
| 69 | valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems') or "").split() | ||
| 70 | valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys() | ||
| 71 | for var in d: | ||
| 72 | if var.startswith("FEATURE_PACKAGES_"): | ||
| 73 | valid_features.append(var[17:]) | ||
| 74 | valid_features.sort() | ||
| 75 | |||
| 76 | features = set(oe.data.typed_value('IMAGE_FEATURES', d)) | ||
| 77 | for feature in features: | ||
| 78 | if feature not in valid_features: | ||
| 79 | if bb.utils.contains('EXTRA_IMAGE_FEATURES', feature, True, False, d): | ||
| 80 | raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES (added via EXTRA_IMAGE_FEATURES) is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features))) | ||
| 81 | else: | ||
| 82 | raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features))) | ||
| 83 | |||
| 84 | IMAGE_INSTALL ?= "" | ||
| 85 | IMAGE_INSTALL[type] = "list" | ||
| 86 | export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}" | ||
| 87 | PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}" | ||
| 88 | |||
| 89 | IMGDEPLOYDIR = "${WORKDIR}/deploy-${PN}-image-complete" | ||
| 90 | |||
| 91 | IMGMANIFESTDIR = "${WORKDIR}/image-task-manifest" | ||
| 92 | |||
| 93 | IMAGE_OUTPUT_MANIFEST_DIR = "${WORKDIR}/deploy-image-output-manifest" | ||
| 94 | IMAGE_OUTPUT_MANIFEST = "${IMAGE_OUTPUT_MANIFEST_DIR}/manifest.json" | ||
| 95 | |||
| 96 | # Images are generally built explicitly, do not need to be part of world. | ||
| 97 | EXCLUDE_FROM_WORLD = "1" | ||
| 98 | |||
| 99 | USE_DEVFS ?= "1" | ||
| 100 | USE_DEPMOD ?= "1" | ||
| 101 | |||
| 102 | PID = "${@os.getpid()}" | ||
| 103 | |||
| 104 | PACKAGE_ARCH = "${MACHINE_ARCH}" | ||
| 105 | SSTATE_ARCHS_TUNEPKG = "${@all_multilib_tune_values(d, 'TUNE_PKGARCH')}" | ||
| 106 | |||
| 107 | LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot" | ||
| 108 | LDCONFIGDEPEND:libc-musl = "" | ||
| 109 | |||
| 110 | # This is needed to have depmod data in PKGDATA_DIR, | ||
| 111 | # but if you're building small initramfs image | ||
| 112 | # e.g. to include it in your kernel, you probably | ||
| 113 | # don't want this dependency, which is causing dependency loop | ||
| 114 | KERNELDEPMODDEPEND ?= "virtual/kernel:do_packagedata" | ||
| 115 | |||
| 116 | do_rootfs[depends] += " \ | ||
| 117 | makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND} \ | ||
| 118 | virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot \ | ||
| 119 | ${KERNELDEPMODDEPEND} \ | ||
| 120 | " | ||
| 121 | do_rootfs[recrdeptask] += "do_packagedata" | ||
| 122 | |||
| 123 | def rootfs_command_variables(d): | ||
| 124 | return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','ROOTFS_POSTUNINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND', | ||
| 125 | 'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS'] | ||
| 126 | |||
| 127 | python () { | ||
| 128 | variables = rootfs_command_variables(d) | ||
| 129 | for var in variables: | ||
| 130 | d.setVarFlag(var, 'vardeps', d.getVar(var)) | ||
| 131 | } | ||
| 132 | |||
| 133 | def rootfs_variables(d): | ||
| 134 | from oe.rootfs import variable_depends | ||
| 135 | variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE', | ||
| 136 | 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', 'IMAGE_LOCALES_ARCHIVE', | ||
| 137 | 'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS', | ||
| 138 | 'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS', | ||
| 139 | 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS'] | ||
| 140 | variables.extend(rootfs_command_variables(d)) | ||
| 141 | variables.extend(variable_depends(d)) | ||
| 142 | return " ".join(variables) | ||
| 143 | |||
| 144 | do_rootfs[vardeps] += "${@rootfs_variables(d)}" | ||
| 145 | |||
| 146 | # This is needed to have kernel image in DEPLOY_DIR. | ||
| 147 | # This follows many common usecases and user expectations. | ||
| 148 | # But if you are building an image which doesn't need the kernel image at all, | ||
| 149 | # you can unset this variable manually. | ||
| 150 | KERNEL_DEPLOY_DEPEND ?= "virtual/kernel:do_deploy" | ||
| 151 | do_build[depends] += "${KERNEL_DEPLOY_DEPEND}" | ||
| 152 | |||
| 153 | |||
| 154 | python () { | ||
| 155 | def extraimage_getdepends(task): | ||
| 156 | deps = "" | ||
| 157 | for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split(): | ||
| 158 | if ":" in dep: | ||
| 159 | deps += " %s " % (dep) | ||
| 160 | else: | ||
| 161 | deps += " %s:%s" % (dep, task) | ||
| 162 | return deps | ||
| 163 | |||
| 164 | d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot')) | ||
| 165 | |||
| 166 | deps = " " + imagetypes_getdepends(d) | ||
| 167 | d.appendVarFlag('do_rootfs', 'depends', deps) | ||
| 168 | |||
| 169 | #process IMAGE_FEATURES, we must do this before runtime_mapping_rename | ||
| 170 | #Check for replaces image features | ||
| 171 | features = set(oe.data.typed_value('IMAGE_FEATURES', d)) | ||
| 172 | remain_features = features.copy() | ||
| 173 | for feature in features: | ||
| 174 | replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature) or "").split()) | ||
| 175 | remain_features -= replaces | ||
| 176 | |||
| 177 | #Check for conflict image features | ||
| 178 | for feature in remain_features: | ||
| 179 | conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature) or "").split()) | ||
| 180 | temp = conflicts & remain_features | ||
| 181 | if temp: | ||
| 182 | bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN'), feature, ' '.join(list(temp)))) | ||
| 183 | |||
| 184 | d.setVar('IMAGE_FEATURES', ' '.join(sorted(list(remain_features)))) | ||
| 185 | |||
| 186 | check_image_features(d) | ||
| 187 | } | ||
| 188 | |||
| 189 | IMAGE_POSTPROCESS_COMMAND ?= "" | ||
| 190 | |||
| 191 | IMAGE_LINGUAS ??= "" | ||
| 192 | |||
| 193 | LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}" | ||
| 194 | |||
| 195 | # per default create a locale archive | ||
| 196 | IMAGE_LOCALES_ARCHIVE ?= '1' | ||
| 197 | |||
| 198 | # Prefer image, but use the fallback files for lookups if the image ones | ||
| 199 | # aren't yet available. | ||
| 200 | PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}" | ||
| 201 | |||
| 202 | PACKAGE_EXCLUDE ??= "" | ||
| 203 | PACKAGE_EXCLUDE[type] = "list" | ||
| 204 | |||
| 205 | fakeroot python do_rootfs () { | ||
| 206 | from oe.rootfs import create_rootfs | ||
| 207 | from oe.manifest import create_manifest | ||
| 208 | import logging | ||
| 209 | import oe.packagedata | ||
| 210 | |||
| 211 | logger = d.getVar('BB_TASK_LOGGER', False) | ||
| 212 | if logger: | ||
| 213 | logcatcher = bb.utils.LogCatcher() | ||
| 214 | logger.addHandler(logcatcher) | ||
| 215 | else: | ||
| 216 | logcatcher = None | ||
| 217 | |||
| 218 | # NOTE: if you add, remove or significantly refactor the stages of this | ||
| 219 | # process then you should recalculate the weightings here. This is quite | ||
| 220 | # easy to do - just change the MultiStageProgressReporter line temporarily | ||
| 221 | # to pass debug=True as the last parameter and you'll get a printout of | ||
| 222 | # the weightings as well as a map to the lines where next_stage() was | ||
| 223 | # called. Of course this isn't critical, but it helps to keep the progress | ||
| 224 | # reporting accurate. | ||
| 225 | stage_weights = [1, 203, 354, 186, 65, 4228, 1, 353, 49, 330, 382, 23, 1] | ||
| 226 | progress_reporter = bb.progress.MultiStageProgressReporter(d, stage_weights) | ||
| 227 | progress_reporter.next_stage() | ||
| 228 | |||
| 229 | # Handle package exclusions | ||
| 230 | excl_pkgs = d.getVar("PACKAGE_EXCLUDE").split() | ||
| 231 | inst_pkgs = d.getVar("PACKAGE_INSTALL").split() | ||
| 232 | inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY").split() | ||
| 233 | |||
| 234 | d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs)) | ||
| 235 | d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs)) | ||
| 236 | |||
| 237 | for pkg in excl_pkgs: | ||
| 238 | if pkg in inst_pkgs: | ||
| 239 | bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs)) | ||
| 240 | inst_pkgs.remove(pkg) | ||
| 241 | |||
| 242 | if pkg in inst_attempt_pkgs: | ||
| 243 | bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs)) | ||
| 244 | inst_attempt_pkgs.remove(pkg) | ||
| 245 | |||
| 246 | d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs)) | ||
| 247 | d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs)) | ||
| 248 | |||
| 249 | # Ensure we handle package name remapping | ||
| 250 | # We have to delay the runtime_mapping_rename until just before rootfs runs | ||
| 251 | # otherwise, the multilib renaming could step in and squash any fixups that | ||
| 252 | # may have occurred. | ||
| 253 | pn = d.getVar('PN') | ||
| 254 | oe.packagedata.runtime_mapping_rename("PACKAGE_INSTALL", pn, d) | ||
| 255 | oe.packagedata.runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d) | ||
| 256 | oe.packagedata.runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d) | ||
| 257 | |||
| 258 | # Generate the initial manifest | ||
| 259 | create_manifest(d) | ||
| 260 | |||
| 261 | progress_reporter.next_stage() | ||
| 262 | |||
| 263 | # generate rootfs | ||
| 264 | d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1') | ||
| 265 | create_rootfs(d, progress_reporter=progress_reporter, logcatcher=logcatcher) | ||
| 266 | |||
| 267 | progress_reporter.finish() | ||
| 268 | } | ||
| 269 | do_rootfs[dirs] = "${TOPDIR}" | ||
| 270 | do_rootfs[cleandirs] += "${IMAGE_ROOTFS} ${IMGDEPLOYDIR} ${S}" | ||
| 271 | do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}" | ||
| 272 | addtask rootfs after do_prepare_recipe_sysroot | ||
| 273 | |||
| 274 | fakeroot python do_image () { | ||
| 275 | from oe.utils import execute_pre_post_process | ||
| 276 | |||
| 277 | d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1') | ||
| 278 | pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND") | ||
| 279 | |||
| 280 | execute_pre_post_process(d, pre_process_cmds) | ||
| 281 | } | ||
| 282 | do_image[dirs] = "${TOPDIR}" | ||
| 283 | do_image[cleandirs] += "${IMGMANIFESTDIR}" | ||
| 284 | addtask do_image after do_rootfs | ||
| 285 | |||
| 286 | fakeroot python do_image_complete () { | ||
| 287 | from oe.utils import execute_pre_post_process | ||
| 288 | from pathlib import Path | ||
| 289 | import json | ||
| 290 | |||
| 291 | post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND") | ||
| 292 | |||
| 293 | execute_pre_post_process(d, post_process_cmds) | ||
| 294 | |||
| 295 | image_manifest_dir = Path(d.getVar('IMGMANIFESTDIR')) | ||
| 296 | |||
| 297 | data = [] | ||
| 298 | |||
| 299 | for manifest_path in image_manifest_dir.glob("*.json"): | ||
| 300 | with manifest_path.open("r") as f: | ||
| 301 | data.extend(json.load(f)) | ||
| 302 | |||
| 303 | with open(d.getVar("IMAGE_OUTPUT_MANIFEST"), "w") as f: | ||
| 304 | json.dump(data, f) | ||
| 305 | } | ||
| 306 | do_image_complete[dirs] = "${TOPDIR}" | ||
| 307 | SSTATETASKS += "do_image_complete" | ||
| 308 | SSTATE_SKIP_CREATION:task-image-complete = '1' | ||
| 309 | do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}" | ||
| 310 | do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}" | ||
| 311 | do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}" | ||
| 312 | do_image_complete[sstate-plaindirs] += "${IMAGE_OUTPUT_MANIFEST_DIR}" | ||
| 313 | do_image_complete[dirs] += "${IMAGE_OUTPUT_MANIFEST_DIR}" | ||
| 314 | addtask do_image_complete after do_image before do_build | ||
| 315 | python do_image_complete_setscene () { | ||
| 316 | sstate_setscene(d) | ||
| 317 | } | ||
| 318 | addtask do_image_complete_setscene | ||
| 319 | |||
| 320 | # Add image-level QA/sanity checks to IMAGE_QA_COMMANDS | ||
| 321 | # | ||
| 322 | # IMAGE_QA_COMMANDS += " \ | ||
| 323 | # image_check_everything_ok \ | ||
| 324 | # " | ||
| 325 | # | ||
| 326 | # This task runs all functions in IMAGE_QA_COMMANDS after the rootfs | ||
| 327 | # construction has completed in order to validate the resulting image. | ||
| 328 | # | ||
| 329 | # The functions should use ${IMAGE_ROOTFS} to find the unpacked rootfs | ||
| 330 | # directory, which if QA passes will be the basis for the images. | ||
| 331 | # | ||
| 332 | # The functions are expected to call oe.qa.handle_error() to report any | ||
| 333 | # problems. | ||
| 334 | fakeroot python do_image_qa () { | ||
| 335 | qa_cmds = (d.getVar('IMAGE_QA_COMMANDS') or '').split() | ||
| 336 | |||
| 337 | for cmd in qa_cmds: | ||
| 338 | bb.build.exec_func(cmd, d) | ||
| 339 | |||
| 340 | oe.qa.exit_if_errors(d) | ||
| 341 | } | ||
| 342 | addtask do_image_qa after do_rootfs before do_image | ||
| 343 | |||
| 344 | SSTATETASKS += "do_image_qa" | ||
| 345 | SSTATE_SKIP_CREATION:task-image-qa = '1' | ||
| 346 | do_image_qa[sstate-inputdirs] = "" | ||
| 347 | do_image_qa[sstate-outputdirs] = "" | ||
| 348 | python do_image_qa_setscene () { | ||
| 349 | sstate_setscene(d) | ||
| 350 | } | ||
| 351 | addtask do_image_qa_setscene | ||
| 352 | |||
| 353 | def setup_debugfs_variables(d): | ||
| 354 | d.appendVar('IMAGE_ROOTFS', '-dbg') | ||
| 355 | if d.getVar('IMAGE_LINK_NAME'): | ||
| 356 | d.appendVar('IMAGE_LINK_NAME', '-dbg') | ||
| 357 | d.appendVar('IMAGE_NAME','-dbg') | ||
| 358 | d.setVar('IMAGE_BUILDING_DEBUGFS', 'true') | ||
| 359 | debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS') | ||
| 360 | if debugfs_image_fstypes: | ||
| 361 | d.setVar('IMAGE_FSTYPES', debugfs_image_fstypes) | ||
| 362 | |||
| 363 | python setup_debugfs () { | ||
| 364 | setup_debugfs_variables(d) | ||
| 365 | } | ||
| 366 | |||
| 367 | python () { | ||
| 368 | vardeps = set() | ||
| 369 | # We allow CONVERSIONTYPES to have duplicates. That avoids breaking | ||
| 370 | # derived distros when OE-core or some other layer independently adds | ||
| 371 | # the same type. There is still only one command for each type, but | ||
| 372 | # presumably the commands will do the same when the type is the same, | ||
| 373 | # even when added in different places. | ||
| 374 | # | ||
| 375 | # Without de-duplication, gen_conversion_cmds() below | ||
| 376 | # would create the same compression command multiple times. | ||
| 377 | ctypes = set(d.getVar('CONVERSIONTYPES').split()) | ||
| 378 | old_overrides = d.getVar('OVERRIDES', False) | ||
| 379 | |||
| 380 | def _image_base_type(type): | ||
| 381 | basetype = type | ||
| 382 | for ctype in ctypes: | ||
| 383 | if type.endswith("." + ctype): | ||
| 384 | basetype = type[:-len("." + ctype)] | ||
| 385 | break | ||
| 386 | |||
| 387 | if basetype != type: | ||
| 388 | # New base type itself might be generated by a conversion command. | ||
| 389 | basetype = _image_base_type(basetype) | ||
| 390 | |||
| 391 | return basetype | ||
| 392 | |||
| 393 | basetypes = {} | ||
| 394 | alltypes = d.getVar('IMAGE_FSTYPES').split() | ||
| 395 | typedeps = {} | ||
| 396 | |||
| 397 | if d.getVar('IMAGE_GEN_DEBUGFS') == "1": | ||
| 398 | debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS').split() | ||
| 399 | for t in debugfs_fstypes: | ||
| 400 | alltypes.append("debugfs_" + t) | ||
| 401 | |||
| 402 | def _add_type(t): | ||
| 403 | baset = _image_base_type(t) | ||
| 404 | input_t = t | ||
| 405 | if baset not in basetypes: | ||
| 406 | basetypes[baset]= [] | ||
| 407 | if t not in basetypes[baset]: | ||
| 408 | basetypes[baset].append(t) | ||
| 409 | debug = "" | ||
| 410 | if t.startswith("debugfs_"): | ||
| 411 | t = t[8:] | ||
| 412 | debug = "debugfs_" | ||
| 413 | deps = (d.getVar('IMAGE_TYPEDEP:' + t) or "").split() | ||
| 414 | vardeps.add('IMAGE_TYPEDEP:' + t) | ||
| 415 | if baset not in typedeps: | ||
| 416 | typedeps[baset] = set() | ||
| 417 | deps = [debug + dep for dep in deps] | ||
| 418 | for dep in deps: | ||
| 419 | if dep not in alltypes: | ||
| 420 | alltypes.append(dep) | ||
| 421 | _add_type(dep) | ||
| 422 | basedep = _image_base_type(dep) | ||
| 423 | typedeps[baset].add(basedep) | ||
| 424 | |||
| 425 | if baset != input_t: | ||
| 426 | _add_type(baset) | ||
| 427 | |||
| 428 | for t in alltypes[:]: | ||
| 429 | _add_type(t) | ||
| 430 | |||
| 431 | d.appendVarFlag('do_image', 'vardeps', ' '.join(vardeps)) | ||
| 432 | |||
| 433 | maskedtypes = (d.getVar('IMAGE_TYPES_MASKED') or "").split() | ||
| 434 | maskedtypes = [dbg + t for t in maskedtypes for dbg in ("", "debugfs_")] | ||
| 435 | |||
| 436 | for t in basetypes: | ||
| 437 | vardeps = set() | ||
| 438 | cmds = [] | ||
| 439 | subimages = [] | ||
| 440 | realt = t | ||
| 441 | |||
| 442 | if t in maskedtypes: | ||
| 443 | continue | ||
| 444 | |||
| 445 | localdata = bb.data.createCopy(d) | ||
| 446 | debug = "" | ||
| 447 | if t.startswith("debugfs_"): | ||
| 448 | setup_debugfs_variables(localdata) | ||
| 449 | debug = "setup_debugfs " | ||
| 450 | realt = t[8:] | ||
| 451 | localdata.setVar('OVERRIDES', '%s:%s' % (realt, old_overrides)) | ||
| 452 | localdata.setVar('type', realt) | ||
| 453 | # Delete DATETIME so we don't expand any references to it now | ||
| 454 | # This means the task's hash can be stable rather than having hardcoded | ||
| 455 | # date/time values. It will get expanded at execution time. | ||
| 456 | # Similarly TMPDIR since otherwise we see QA stamp comparision problems | ||
| 457 | # Expand PV else it can trigger get_srcrev which can fail due to these variables being unset | ||
| 458 | localdata.setVar('PV', d.getVar('PV')) | ||
| 459 | localdata.delVar('DATETIME') | ||
| 460 | localdata.delVar('DATE') | ||
| 461 | localdata.delVar('TMPDIR') | ||
| 462 | localdata.delVar('IMAGE_VERSION_SUFFIX') | ||
| 463 | vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude') or '').split() | ||
| 464 | for dep in vardepsexclude: | ||
| 465 | localdata.delVar(dep) | ||
| 466 | |||
| 467 | image_cmd = localdata.getVar("IMAGE_CMD") | ||
| 468 | vardeps.add('IMAGE_CMD:' + realt) | ||
| 469 | if image_cmd: | ||
| 470 | cmds.append("\t" + image_cmd) | ||
| 471 | else: | ||
| 472 | bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t) | ||
| 473 | cmds.append(localdata.expand("\tcd ${IMGDEPLOYDIR}")) | ||
| 474 | |||
| 475 | # Since a copy of IMAGE_CMD:xxx will be inlined within do_image_xxx, | ||
| 476 | # prevent a redundant copy of IMAGE_CMD:xxx being emitted as a function. | ||
| 477 | d.delVarFlag('IMAGE_CMD:' + realt, 'func') | ||
| 478 | |||
| 479 | rm_tmp_images = set() | ||
| 480 | def gen_conversion_cmds(bt): | ||
| 481 | for ctype in sorted(ctypes): | ||
| 482 | if bt.endswith("." + ctype): | ||
| 483 | type = bt[0:-len(ctype) - 1] | ||
| 484 | original_type = type | ||
| 485 | if type.startswith("debugfs_"): | ||
| 486 | type = type[8:] | ||
| 487 | # Create input image first. | ||
| 488 | gen_conversion_cmds(type) | ||
| 489 | localdata.setVar('type', type) | ||
| 490 | cmd = "\t" + localdata.getVar("CONVERSION_CMD:" + ctype) | ||
| 491 | if cmd not in cmds: | ||
| 492 | cmds.append(cmd) | ||
| 493 | vardeps.add('CONVERSION_CMD:' + ctype) | ||
| 494 | subimage = type + "." + ctype | ||
| 495 | if subimage not in subimages: | ||
| 496 | subimages.append(subimage) | ||
| 497 | if original_type not in alltypes: | ||
| 498 | rm_tmp_images.add(localdata.expand("${IMAGE_NAME}.${type}")) | ||
| 499 | |||
| 500 | for bt in basetypes[t]: | ||
| 501 | gen_conversion_cmds(bt) | ||
| 502 | |||
| 503 | localdata.setVar('type', realt) | ||
| 504 | if t not in alltypes: | ||
| 505 | rm_tmp_images.add(localdata.expand("${IMAGE_NAME}.${type}")) | ||
| 506 | else: | ||
| 507 | subimages.append(realt) | ||
| 508 | |||
| 509 | # Clean up after applying all conversion commands. Some of them might | ||
| 510 | # use the same input, therefore we cannot delete sooner without applying | ||
| 511 | # some complex dependency analysis. | ||
| 512 | for image in sorted(rm_tmp_images): | ||
| 513 | cmds.append("\trm " + image) | ||
| 514 | |||
| 515 | after = 'do_image' | ||
| 516 | for dep in typedeps[t]: | ||
| 517 | after += ' do_image_%s' % dep.replace("-", "_").replace(".", "_") | ||
| 518 | |||
| 519 | task = "do_image_%s" % t.replace("-", "_").replace(".", "_") | ||
| 520 | |||
| 521 | d.setVar(task, '\n'.join(cmds)) | ||
| 522 | d.setVarFlag(task, 'func', '1') | ||
| 523 | d.setVarFlag(task, 'fakeroot', '1') | ||
| 524 | d.setVarFlag(task, 'imagetype', t) | ||
| 525 | |||
| 526 | d.appendVarFlag(task, 'prefuncs', ' ' + debug + ' set_image_size') | ||
| 527 | d.prependVarFlag(task, 'postfuncs', 'create_symlinks ') | ||
| 528 | d.appendVarFlag(task, 'subimages', ' ' + ' '.join(subimages)) | ||
| 529 | d.appendVarFlag(task, 'vardeps', ' ' + ' '.join(vardeps)) | ||
| 530 | d.appendVarFlag(task, 'vardepsexclude', ' DATETIME DATE ' + ' '.join(vardepsexclude)) | ||
| 531 | d.appendVarFlag(task, 'postfuncs', ' write_image_output_manifest') | ||
| 532 | |||
| 533 | bb.debug(2, "Adding task %s before %s, after %s" % (task, 'do_image_complete', after)) | ||
| 534 | bb.build.addtask(task, 'do_image_complete', after, d) | ||
| 535 | } | ||
| 536 | |||
| 537 | # | ||
| 538 | # Compute the rootfs size | ||
| 539 | # | ||
| 540 | def get_rootfs_size(d): | ||
| 541 | import subprocess, oe.utils | ||
| 542 | |||
| 543 | rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT')) | ||
| 544 | overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR')) | ||
| 545 | rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE')) | ||
| 546 | rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE')) | ||
| 547 | rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE') | ||
| 548 | image_fstypes = d.getVar('IMAGE_FSTYPES') or '' | ||
| 549 | initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES') or '' | ||
| 550 | initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE') | ||
| 551 | |||
| 552 | size_kb = oe.utils.directory_size(d.getVar("IMAGE_ROOTFS")) / 1024 | ||
| 553 | |||
| 554 | base_size = size_kb * overhead_factor | ||
| 555 | bb.debug(1, '%f = %d * %f' % (base_size, size_kb, overhead_factor)) | ||
| 556 | base_size2 = max(base_size, rootfs_req_size) + rootfs_extra_space | ||
| 557 | bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), rootfs_extra_space)) | ||
| 558 | |||
| 559 | base_size = base_size2 | ||
| 560 | if base_size != int(base_size): | ||
| 561 | base_size = int(base_size + 1) | ||
| 562 | else: | ||
| 563 | base_size = int(base_size) | ||
| 564 | bb.debug(1, '%f = int(%f)' % (base_size, base_size2)) | ||
| 565 | |||
| 566 | base_size_saved = base_size | ||
| 567 | base_size += rootfs_alignment - 1 | ||
| 568 | base_size -= base_size % rootfs_alignment | ||
| 569 | bb.debug(1, '%d = aligned(%d)' % (base_size, base_size_saved)) | ||
| 570 | |||
| 571 | # Do not check image size of the debugfs image. This is not supposed | ||
| 572 | # to be deployed, etc. so it doesn't make sense to limit the size | ||
| 573 | # of the debug. | ||
| 574 | if (d.getVar('IMAGE_BUILDING_DEBUGFS') or "") == "true": | ||
| 575 | bb.debug(1, 'returning debugfs size %d' % (base_size)) | ||
| 576 | return base_size | ||
| 577 | |||
| 578 | # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set) | ||
| 579 | if rootfs_maxsize: | ||
| 580 | rootfs_maxsize_int = int(rootfs_maxsize) | ||
| 581 | if base_size > rootfs_maxsize_int: | ||
| 582 | bb.fatal("The rootfs size %d(K) exceeds IMAGE_ROOTFS_MAXSIZE: %d(K)" % \ | ||
| 583 | (base_size, rootfs_maxsize_int)) | ||
| 584 | |||
| 585 | # Check the initramfs size against INITRAMFS_MAXSIZE (if set) | ||
| 586 | if image_fstypes == initramfs_fstypes != '' and initramfs_maxsize: | ||
| 587 | initramfs_maxsize_int = int(initramfs_maxsize) | ||
| 588 | if base_size > initramfs_maxsize_int: | ||
| 589 | bb.error("The initramfs size %d(K) exceeds INITRAMFS_MAXSIZE: %d(K)" % \ | ||
| 590 | (base_size, initramfs_maxsize_int)) | ||
| 591 | bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should") | ||
| 592 | bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n") | ||
| 593 | |||
| 594 | bb.debug(1, 'returning %d' % (base_size)) | ||
| 595 | return base_size | ||
| 596 | |||
| 597 | python set_image_size () { | ||
| 598 | rootfs_size = get_rootfs_size(d) | ||
| 599 | d.setVar('ROOTFS_SIZE', str(rootfs_size)) | ||
| 600 | d.setVarFlag('ROOTFS_SIZE', 'export', '1') | ||
| 601 | } | ||
| 602 | |||
| 603 | # | ||
| 604 | # Create symlinks to the newly created image | ||
| 605 | # | ||
| 606 | python create_symlinks() { | ||
| 607 | |||
| 608 | deploy_dir = d.getVar('IMGDEPLOYDIR') | ||
| 609 | img_name = d.getVar('IMAGE_NAME') | ||
| 610 | link_name = d.getVar('IMAGE_LINK_NAME') | ||
| 611 | manifest_name = d.getVar('IMAGE_MANIFEST') | ||
| 612 | taskname = d.getVar("BB_CURRENTTASK") | ||
| 613 | subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split() | ||
| 614 | |||
| 615 | if not link_name: | ||
| 616 | return | ||
| 617 | for type in subimages: | ||
| 618 | dst = os.path.join(deploy_dir, link_name + "." + type) | ||
| 619 | src = img_name + "." + type | ||
| 620 | if os.path.exists(os.path.join(deploy_dir, src)): | ||
| 621 | bb.note("Creating symlink: %s -> %s" % (dst, src)) | ||
| 622 | if os.path.islink(dst): | ||
| 623 | os.remove(dst) | ||
| 624 | os.symlink(src, dst) | ||
| 625 | else: | ||
| 626 | bb.note("Skipping symlink, source does not exist: %s -> %s" % (dst, src)) | ||
| 627 | } | ||
| 628 | |||
| 629 | python write_image_output_manifest() { | ||
| 630 | import json | ||
| 631 | from pathlib import Path | ||
| 632 | |||
| 633 | taskname = d.getVar("BB_CURRENTTASK") | ||
| 634 | image_deploy_dir = Path(d.getVar('IMGDEPLOYDIR')) | ||
| 635 | image_manifest_dir = Path(d.getVar('IMGMANIFESTDIR')) | ||
| 636 | manifest_path = image_manifest_dir / ("do_" + d.getVar("BB_CURRENTTASK") + ".json") | ||
| 637 | |||
| 638 | image_name = d.getVar("IMAGE_NAME") | ||
| 639 | image_basename = d.getVar("IMAGE_BASENAME") | ||
| 640 | machine = d.getVar("MACHINE") | ||
| 641 | |||
| 642 | subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split() | ||
| 643 | imagetype = d.getVarFlag("do_" + taskname, 'imagetype', False) | ||
| 644 | |||
| 645 | data = { | ||
| 646 | "taskname": taskname, | ||
| 647 | "imagetype": imagetype, | ||
| 648 | "images": [] | ||
| 649 | } | ||
| 650 | |||
| 651 | for type in subimages: | ||
| 652 | image_filename = image_name + "." + type | ||
| 653 | image_path = image_deploy_dir / image_filename | ||
| 654 | if not image_path.exists(): | ||
| 655 | continue | ||
| 656 | data["images"].append({ | ||
| 657 | "filename": image_filename, | ||
| 658 | }) | ||
| 659 | |||
| 660 | with manifest_path.open("w") as f: | ||
| 661 | json.dump([data], f) | ||
| 662 | } | ||
| 663 | |||
| 664 | MULTILIBRE_ALLOW_REP += "${base_bindir} ${base_sbindir} ${bindir} ${sbindir} ${libexecdir} ${sysconfdir} ${nonarch_base_libdir}/udev /lib/modules/[^/]*/modules.*" | ||
| 665 | MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py" | ||
| 666 | MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib" | ||
| 667 | |||
| 668 | PSEUDO_INCLUDE_PATHS .= ",${MULTILIB_TEMP_ROOTFS}" | ||
| 669 | |||
| 670 | do_fetch[noexec] = "1" | ||
| 671 | do_unpack[noexec] = "1" | ||
| 672 | do_patch[noexec] = "1" | ||
| 673 | do_configure[noexec] = "1" | ||
| 674 | do_compile[noexec] = "1" | ||
| 675 | do_install[noexec] = "1" | ||
| 676 | deltask do_populate_lic | ||
| 677 | deltask do_populate_sysroot | ||
| 678 | do_package[noexec] = "1" | ||
| 679 | deltask do_package_qa | ||
| 680 | deltask do_packagedata | ||
| 681 | deltask do_package_write_ipk | ||
| 682 | deltask do_package_write_deb | ||
| 683 | deltask do_package_write_rpm | ||
| 684 | |||
| 685 | create_merged_usr_symlinks_rootfs() { | ||
| 686 | create_merged_usr_symlinks ${IMAGE_ROOTFS} | ||
| 687 | } | ||
| 688 | |||
| 689 | ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs', '',d)}" | ||
| 690 | |||
| 691 | reproducible_final_image_task () { | ||
| 692 | if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then | ||
| 693 | REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true | ||
| 694 | if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then | ||
| 695 | REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}` | ||
| 696 | fi | ||
| 697 | fi | ||
| 698 | # Set mtime of all files to a reproducible value | ||
| 699 | bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS" | ||
| 700 | find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS | ||
| 701 | } | ||
| 702 | |||
| 703 | IMAGE_PREPROCESS_COMMAND:append = " reproducible_final_image_task " | ||
| 704 | |||
| 705 | CVE_PRODUCT = "" | ||
diff --git a/meta/classes-recipe/image_types.bbclass b/meta/classes-recipe/image_types.bbclass deleted file mode 100644 index e6ef0ce11e..0000000000 --- a/meta/classes-recipe/image_types.bbclass +++ /dev/null | |||
| @@ -1,396 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # The default aligment of the size of the rootfs is set to 1KiB. In case | ||
| 8 | # you're using the SD card emulation of a QEMU system simulator you may | ||
| 9 | # set this value to 2048 (2MiB alignment). | ||
| 10 | IMAGE_ROOTFS_ALIGNMENT ?= "1" | ||
| 11 | |||
| 12 | def imagetypes_getdepends(d): | ||
| 13 | def adddep(depstr, deps): | ||
| 14 | for d in (depstr or "").split(): | ||
| 15 | # Add task dependency if not already present | ||
| 16 | if ":" not in d: | ||
| 17 | d += ":do_populate_sysroot" | ||
| 18 | deps.add(d) | ||
| 19 | |||
| 20 | # Take a type in the form of foo.bar.car and split it into the items | ||
| 21 | # needed for the image deps "foo", and the conversion deps ["bar", "car"] | ||
| 22 | def split_types(typestring): | ||
| 23 | types = typestring.split(".") | ||
| 24 | return types[0], types[1:] | ||
| 25 | |||
| 26 | fstypes = set((d.getVar('IMAGE_FSTYPES') or "").split()) | ||
| 27 | fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS') or "").split()) | ||
| 28 | |||
| 29 | deprecated = set() | ||
| 30 | deps = set() | ||
| 31 | for typestring in fstypes: | ||
| 32 | basetype, resttypes = split_types(typestring) | ||
| 33 | |||
| 34 | var = "IMAGE_DEPENDS_%s" % basetype | ||
| 35 | if d.getVar(var) is not None: | ||
| 36 | deprecated.add(var) | ||
| 37 | |||
| 38 | for typedepends in (d.getVar("IMAGE_TYPEDEP:%s" % basetype) or "").split(): | ||
| 39 | base, rest = split_types(typedepends) | ||
| 40 | resttypes += rest | ||
| 41 | |||
| 42 | var = "IMAGE_DEPENDS_%s" % base | ||
| 43 | if d.getVar(var) is not None: | ||
| 44 | deprecated.add(var) | ||
| 45 | |||
| 46 | for ctype in resttypes: | ||
| 47 | adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype), deps) | ||
| 48 | adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype), deps) | ||
| 49 | |||
| 50 | if deprecated: | ||
| 51 | bb.fatal('Deprecated variable(s) found: "%s". ' | ||
| 52 | 'Use do_image_<type>[depends] += "<recipe>:<task>" instead' % ', '.join(deprecated)) | ||
| 53 | |||
| 54 | # Sort the set so that ordering is consistant | ||
| 55 | return " ".join(sorted(deps)) | ||
| 56 | |||
| 57 | XZ_COMPRESSION_LEVEL ?= "-6" | ||
| 58 | XZ_INTEGRITY_CHECK ?= "crc32" | ||
| 59 | |||
| 60 | ZIP_COMPRESSION_LEVEL ?= "-9" | ||
| 61 | |||
| 62 | 7ZIP_COMPRESSION_LEVEL ?= "9" | ||
| 63 | 7ZIP_COMPRESSION_METHOD ?= "BZip2" | ||
| 64 | 7ZIP_EXTENSION ?= "7z" | ||
| 65 | |||
| 66 | JFFS2_SUM_EXTRA_ARGS ?= "" | ||
| 67 | IMAGE_CMD:jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}.jffs2 ${EXTRA_IMAGECMD}" | ||
| 68 | |||
| 69 | IMAGE_CMD:cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}.cramfs ${EXTRA_IMAGECMD}" | ||
| 70 | |||
| 71 | oe_mkext234fs () { | ||
| 72 | fstype=$1 | ||
| 73 | extra_imagecmd="" | ||
| 74 | |||
| 75 | if [ $# -gt 1 ]; then | ||
| 76 | shift | ||
| 77 | extra_imagecmd=$@ | ||
| 78 | fi | ||
| 79 | |||
| 80 | # If generating an empty image the size of the sparse block should be large | ||
| 81 | # enough to allocate an ext4 filesystem using 4096 bytes per inode, this is | ||
| 82 | # about 60K, so dd needs a minimum count of 60, with bs=1024 (bytes per IO) | ||
| 83 | eval local COUNT=\"0\" | ||
| 84 | eval local MIN_COUNT=\"60\" | ||
| 85 | if [ $ROOTFS_SIZE -lt $MIN_COUNT ]; then | ||
| 86 | eval COUNT=\"$MIN_COUNT\" | ||
| 87 | fi | ||
| 88 | # Create a sparse image block | ||
| 89 | bbdebug 1 Executing "dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024" | ||
| 90 | dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024 | ||
| 91 | bbdebug 1 "Actual Rootfs size: `du -s ${IMAGE_ROOTFS}`" | ||
| 92 | bbdebug 1 "Actual Partition size: `stat -c '%s' ${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype`" | ||
| 93 | bbdebug 1 Executing "mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype -d ${IMAGE_ROOTFS}" | ||
| 94 | mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype -d ${IMAGE_ROOTFS} | ||
| 95 | # Error codes 0-3 indicate successfull operation of fsck (no errors or errors corrected) | ||
| 96 | fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype || [ $? -le 3 ] | ||
| 97 | } | ||
| 98 | |||
| 99 | IMAGE_CMD:ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}" | ||
| 100 | IMAGE_CMD:ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}" | ||
| 101 | IMAGE_CMD:ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}" | ||
| 102 | |||
| 103 | MIN_BTRFS_SIZE ?= "16384" | ||
| 104 | IMAGE_CMD:btrfs () { | ||
| 105 | size=${ROOTFS_SIZE} | ||
| 106 | if [ ${size} -lt ${MIN_BTRFS_SIZE} ] ; then | ||
| 107 | size=${MIN_BTRFS_SIZE} | ||
| 108 | bbwarn "Rootfs size is too small for BTRFS. Filesystem will be extended to ${size}K" | ||
| 109 | fi | ||
| 110 | dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}.btrfs seek=${size} count=0 bs=1024 | ||
| 111 | mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}.btrfs | ||
| 112 | } | ||
| 113 | |||
| 114 | oe_mksquashfs () { | ||
| 115 | local comp=$1; shift | ||
| 116 | local extra_imagecmd="$@" | ||
| 117 | |||
| 118 | if [ "$comp" = "zstd" ]; then | ||
| 119 | suffix="zst" | ||
| 120 | fi | ||
| 121 | |||
| 122 | # Use the bitbake reproducible timestamp instead of the hardcoded squashfs one | ||
| 123 | export SOURCE_DATE_EPOCH=$(stat -c '%Y' ${IMAGE_ROOTFS}) | ||
| 124 | mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}.squashfs${comp:+-}${suffix:-$comp} -noappend ${comp:+-comp }$comp $extra_imagecmd | ||
| 125 | } | ||
| 126 | IMAGE_CMD:squashfs = "oe_mksquashfs '' ${EXTRA_IMAGECMD}" | ||
| 127 | IMAGE_CMD:squashfs-xz = "oe_mksquashfs xz ${EXTRA_IMAGECMD}" | ||
| 128 | IMAGE_CMD:squashfs-lzo = "oe_mksquashfs lzo ${EXTRA_IMAGECMD}" | ||
| 129 | IMAGE_CMD:squashfs-lz4 = "oe_mksquashfs lz4 ${EXTRA_IMAGECMD}" | ||
| 130 | IMAGE_CMD:squashfs-zst = "oe_mksquashfs zstd ${EXTRA_IMAGECMD}" | ||
| 131 | |||
| 132 | IMAGE_CMD:erofs = "mkfs.erofs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}.erofs ${IMAGE_ROOTFS}" | ||
| 133 | IMAGE_CMD:erofs-lz4 = "mkfs.erofs -zlz4 ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}.erofs-lz4 ${IMAGE_ROOTFS}" | ||
| 134 | IMAGE_CMD:erofs-lz4hc = "mkfs.erofs -zlz4hc ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}.erofs-lz4hc ${IMAGE_ROOTFS}" | ||
| 135 | |||
| 136 | # Note that vfat can't handle all types of files that a real linux file system | ||
| 137 | # can (e.g. device files, symlinks, etc.) and therefore it not suitable for all | ||
| 138 | # use cases | ||
| 139 | oe_mkvfatfs () { | ||
| 140 | mkfs.vfat $@ -C ${IMGDEPLOYDIR}/${IMAGE_NAME}.vfat ${ROOTFS_SIZE} | ||
| 141 | mcopy -i "${IMGDEPLOYDIR}/${IMAGE_NAME}.vfat" -vsmpQ ${IMAGE_ROOTFS}/* ::/ | ||
| 142 | } | ||
| 143 | |||
| 144 | IMAGE_CMD:vfat = "oe_mkvfatfs ${EXTRA_IMAGECMD}" | ||
| 145 | |||
| 146 | IMAGE_CMD_TAR ?= "tar" | ||
| 147 | # ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs | ||
| 148 | IMAGE_CMD:tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --pax-option=delete=atime,delete=ctime --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]" | ||
| 149 | SPDX_IMAGE_PURPOSE:tar = "archive" | ||
| 150 | |||
| 151 | do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append" | ||
| 152 | IMAGE_CMD:cpio () { | ||
| 153 | (cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}.cpio) | ||
| 154 | # We only need the /init symlink if we're building the real | ||
| 155 | # image. The -dbg image doesn't need it! By being clever | ||
| 156 | # about this we also avoid 'touch' below failing, as it | ||
| 157 | # might be trying to touch /sbin/init on the host since both | ||
| 158 | # the normal and the -dbg image share the same WORKDIR | ||
| 159 | if [ "${IMAGE_BUILDING_DEBUGFS}" != "true" ]; then | ||
| 160 | if [ ! -L ${IMAGE_ROOTFS}/init ] && [ ! -e ${IMAGE_ROOTFS}/init ]; then | ||
| 161 | if [ -L ${IMAGE_ROOTFS}/sbin/init ] || [ -e ${IMAGE_ROOTFS}/sbin/init ]; then | ||
| 162 | ln -sf /sbin/init ${WORKDIR}/cpio_append/init | ||
| 163 | touch -h -r ${IMAGE_ROOTFS}/sbin/init ${WORKDIR}/cpio_append/init | ||
| 164 | else | ||
| 165 | touch -r ${IMAGE_ROOTFS} ${WORKDIR}/cpio_append/init | ||
| 166 | fi | ||
| 167 | (cd ${WORKDIR}/cpio_append && echo ./init | cpio --reproducible -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}.cpio) | ||
| 168 | fi | ||
| 169 | fi | ||
| 170 | } | ||
| 171 | SPDX_IMAGE_PURPOSE:cpio = "archive" | ||
| 172 | |||
| 173 | UBI_VOLNAME ?= "${MACHINE}-rootfs" | ||
| 174 | UBI_VOLTYPE ?= "dynamic" | ||
| 175 | UBI_IMGTYPE ?= "ubifs" | ||
| 176 | |||
| 177 | write_ubi_config() { | ||
| 178 | local vname="$1" | ||
| 179 | |||
| 180 | cat <<EOF > ubinize${vname}-${IMAGE_NAME}.cfg | ||
| 181 | [ubifs] | ||
| 182 | mode=ubi | ||
| 183 | image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}.${UBI_IMGTYPE} | ||
| 184 | vol_id=0 | ||
| 185 | vol_type=${UBI_VOLTYPE} | ||
| 186 | vol_name=${UBI_VOLNAME} | ||
| 187 | vol_flags=autoresize | ||
| 188 | EOF | ||
| 189 | } | ||
| 190 | |||
| 191 | multiubi_mkfs() { | ||
| 192 | local mkubifs_args="$1" | ||
| 193 | local ubinize_args="$2" | ||
| 194 | |||
| 195 | # Added prompt error message for ubi and ubifs image creation. | ||
| 196 | if [ -z "$mkubifs_args" ] || [ -z "$ubinize_args" ]; then | ||
| 197 | bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details" | ||
| 198 | fi | ||
| 199 | |||
| 200 | if [ -z "$3" ]; then | ||
| 201 | local vname="" | ||
| 202 | else | ||
| 203 | local vname="_$3" | ||
| 204 | fi | ||
| 205 | write_ubi_config "${vname}" | ||
| 206 | |||
| 207 | if [ -n "$vname" ]; then | ||
| 208 | mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}.ubifs ${mkubifs_args} | ||
| 209 | fi | ||
| 210 | ubinize -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg | ||
| 211 | |||
| 212 | # Cleanup cfg file | ||
| 213 | mv ubinize${vname}-${IMAGE_NAME}.cfg ${IMGDEPLOYDIR}/ | ||
| 214 | |||
| 215 | # Create own symlinks for 'named' volumes | ||
| 216 | if [ -n "$vname" ]; then | ||
| 217 | cd ${IMGDEPLOYDIR} | ||
| 218 | if [ -e ${IMAGE_NAME}${vname}.ubifs ]; then | ||
| 219 | ln -sf ${IMAGE_NAME}${vname}.ubifs \ | ||
| 220 | ${IMAGE_LINK_NAME}${vname}.ubifs | ||
| 221 | fi | ||
| 222 | if [ -e ${IMAGE_NAME}${vname}.ubi ]; then | ||
| 223 | ln -sf ${IMAGE_NAME}${vname}.ubi \ | ||
| 224 | ${IMAGE_LINK_NAME}${vname}.ubi | ||
| 225 | fi | ||
| 226 | cd - | ||
| 227 | fi | ||
| 228 | } | ||
| 229 | |||
| 230 | MULTIUBI_ARGS = "MKUBIFS_ARGS UBINIZE_ARGS" | ||
| 231 | |||
| 232 | IMAGE_CMD:multiubi () { | ||
| 233 | ${@' '.join(['%s_%s="%s";' % (arg, name, d.getVar('%s_%s' % (arg, name))) for arg in d.getVar('MULTIUBI_ARGS').split() for name in d.getVar('MULTIUBI_BUILD').split()])} | ||
| 234 | # Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name> | ||
| 235 | for name in ${MULTIUBI_BUILD}; do | ||
| 236 | eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\" | ||
| 237 | eval local ubinize_args=\"\$UBINIZE_ARGS_${name}\" | ||
| 238 | |||
| 239 | multiubi_mkfs "${mkubifs_args}" "${ubinize_args}" "${name}" | ||
| 240 | done | ||
| 241 | } | ||
| 242 | |||
| 243 | IMAGE_CMD:ubi () { | ||
| 244 | multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}" | ||
| 245 | } | ||
| 246 | IMAGE_TYPEDEP:ubi = "${UBI_IMGTYPE}" | ||
| 247 | |||
| 248 | IMAGE_CMD:ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.ubifs ${MKUBIFS_ARGS}" | ||
| 249 | |||
| 250 | MIN_F2FS_SIZE ?= "524288" | ||
| 251 | IMAGE_CMD:f2fs () { | ||
| 252 | # We need to add additional smarts here form devices smaller than 1.5G | ||
| 253 | # Need to scale appropriately between 40M -> 1.5G as the "overprovision | ||
| 254 | # ratio" goes down as the device gets bigger (70% -> 4.5%), below about | ||
| 255 | # 500M the standard IMAGE_OVERHEAD_FACTOR does not work, so add additional | ||
| 256 | # space here when under 500M | ||
| 257 | size=${ROOTFS_SIZE} | ||
| 258 | if [ ${size} -lt ${MIN_F2FS_SIZE} ] ; then | ||
| 259 | size=${MIN_F2FS_SIZE} | ||
| 260 | bbwarn "Rootfs size is too small for F2FS. Filesystem will be extended to ${size}K" | ||
| 261 | fi | ||
| 262 | dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}.f2fs seek=${size} count=0 bs=1024 | ||
| 263 | mkfs.f2fs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}.f2fs | ||
| 264 | sload.f2fs -f ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}.f2fs | ||
| 265 | } | ||
| 266 | |||
| 267 | EXTRA_IMAGECMD = "" | ||
| 268 | |||
| 269 | inherit siteinfo kernel-arch image-artifact-names | ||
| 270 | |||
| 271 | JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}" | ||
| 272 | JFFS2_ERASEBLOCK ?= "0x40000" | ||
| 273 | EXTRA_IMAGECMD:jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers" | ||
| 274 | |||
| 275 | # Change these if you want default mkfs behavior (i.e. create minimal inode number) | ||
| 276 | EXTRA_IMAGECMD:ext2 ?= "-i 4096" | ||
| 277 | EXTRA_IMAGECMD:ext3 ?= "-i 4096" | ||
| 278 | EXTRA_IMAGECMD:ext4 ?= "-i 4096" | ||
| 279 | EXTRA_IMAGECMD:btrfs ?= "-n 4096 --shrink" | ||
| 280 | EXTRA_IMAGECMD:f2fs ?= "" | ||
| 281 | |||
| 282 | # If a specific FAT size is needed, set it here (e.g. "-F 32"/"-F 16"/"-F 12") | ||
| 283 | # otherwise mkfs.vfat will automatically pick one. | ||
| 284 | EXTRA_IMAGECMD:vfat ?= "" | ||
| 285 | |||
| 286 | do_image_tar[depends] += "tar-replacement-native:do_populate_sysroot" | ||
| 287 | do_image_cpio[depends] += "cpio-native:do_populate_sysroot" | ||
| 288 | do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot" | ||
| 289 | do_image_cramfs[depends] += "util-linux-native:do_populate_sysroot" | ||
| 290 | do_image_ext2[depends] += "e2fsprogs-native:do_populate_sysroot" | ||
| 291 | do_image_ext3[depends] += "e2fsprogs-native:do_populate_sysroot" | ||
| 292 | do_image_ext4[depends] += "e2fsprogs-native:do_populate_sysroot" | ||
| 293 | do_image_btrfs[depends] += "btrfs-tools-native:do_populate_sysroot" | ||
| 294 | do_image_squashfs[depends] += "squashfs-tools-native:do_populate_sysroot" | ||
| 295 | do_image_squashfs_xz[depends] += "squashfs-tools-native:do_populate_sysroot" | ||
| 296 | do_image_squashfs_lzo[depends] += "squashfs-tools-native:do_populate_sysroot" | ||
| 297 | do_image_squashfs_lz4[depends] += "squashfs-tools-native:do_populate_sysroot" | ||
| 298 | do_image_squashfs_zst[depends] += "squashfs-tools-native:do_populate_sysroot" | ||
| 299 | do_image_ubi[depends] += "mtd-utils-native:do_populate_sysroot" | ||
| 300 | do_image_ubifs[depends] += "mtd-utils-native:do_populate_sysroot" | ||
| 301 | do_image_multiubi[depends] += "mtd-utils-native:do_populate_sysroot" | ||
| 302 | do_image_f2fs[depends] += "f2fs-tools-native:do_populate_sysroot" | ||
| 303 | do_image_erofs[depends] += "erofs-utils-native:do_populate_sysroot" | ||
| 304 | do_image_erofs_lz4[depends] += "erofs-utils-native:do_populate_sysroot" | ||
| 305 | do_image_erofs_lz4hc[depends] += "erofs-utils-native:do_populate_sysroot" | ||
| 306 | do_image_vfat[depends] += "dosfstools-native:do_populate_sysroot mtools-native:do_populate_sysroot" | ||
| 307 | |||
| 308 | # This variable is available to request which values are suitable for IMAGE_FSTYPES | ||
| 309 | IMAGE_TYPES = " \ | ||
| 310 | jffs2 jffs2.sum \ | ||
| 311 | cramfs \ | ||
| 312 | ext2 ext2.gz ext2.bz2 ext2.lzma \ | ||
| 313 | ext3 ext3.gz \ | ||
| 314 | ext4 ext4.gz \ | ||
| 315 | btrfs \ | ||
| 316 | vfat \ | ||
| 317 | squashfs squashfs-xz squashfs-lzo squashfs-lz4 squashfs-zst \ | ||
| 318 | ubi ubifs multiubi \ | ||
| 319 | tar tar.gz tar.bz2 tar.xz tar.lz4 tar.zst \ | ||
| 320 | cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 cpio.zst \ | ||
| 321 | wic wic.gz wic.bz2 wic.lzma wic.zst \ | ||
| 322 | container \ | ||
| 323 | f2fs \ | ||
| 324 | erofs erofs-lz4 erofs-lz4hc \ | ||
| 325 | " | ||
| 326 | # These image types are x86 specific as they need syslinux | ||
| 327 | IMAGE_TYPES:append:x86 = " hddimg iso" | ||
| 328 | IMAGE_TYPES:append:x86-64 = " hddimg iso" | ||
| 329 | |||
| 330 | # Compression is a special case of conversion. The old variable | ||
| 331 | # names are still supported for backward-compatibility. When defining | ||
| 332 | # new compression or conversion commands, use CONVERSIONTYPES and | ||
| 333 | # CONVERSION_CMD/DEPENDS. | ||
| 334 | COMPRESSIONTYPES ?= "" | ||
| 335 | |||
| 336 | CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip 7zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vhd vhdx vdi qcow2 base64 gzsync zsync ${COMPRESSIONTYPES}" | ||
| 337 | CONVERSION_CMD:lzma = "lzma -k -f -7 ${IMAGE_NAME}.${type}" | ||
| 338 | CONVERSION_CMD:gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.gz" | ||
| 339 | CONVERSION_CMD:bz2 = "pbzip2 -f -k ${IMAGE_NAME}.${type}" | ||
| 340 | CONVERSION_CMD:xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.xz" | ||
| 341 | CONVERSION_CMD:lz4 = "lz4 -f -9 -z -l ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.lz4" | ||
| 342 | CONVERSION_CMD:lzo = "lzop -f -9 ${IMAGE_NAME}.${type}" | ||
| 343 | CONVERSION_CMD:zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}.${type}.zip ${IMAGE_NAME}.${type}" | ||
| 344 | CONVERSION_CMD:7zip = "7za a -mx=${7ZIP_COMPRESSION_LEVEL} -mm=${7ZIP_COMPRESSION_METHOD} ${IMAGE_NAME}.${type}.${7ZIP_EXTENSION} ${IMAGE_NAME}.${type}" | ||
| 345 | CONVERSION_CMD:zst = "zstd -f -k -c ${ZSTD_DEFAULTS} ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.zst" | ||
| 346 | CONVERSION_CMD:sum = "sumtool -i ${IMAGE_NAME}.${type} -o ${IMAGE_NAME}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}" | ||
| 347 | CONVERSION_CMD:md5sum = "md5sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.md5sum" | ||
| 348 | CONVERSION_CMD:sha1sum = "sha1sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha1sum" | ||
| 349 | CONVERSION_CMD:sha224sum = "sha224sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha224sum" | ||
| 350 | CONVERSION_CMD:sha256sum = "sha256sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha256sum" | ||
| 351 | CONVERSION_CMD:sha384sum = "sha384sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha384sum" | ||
| 352 | CONVERSION_CMD:sha512sum = "sha512sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha512sum" | ||
| 353 | CONVERSION_CMD:bmap = "bmaptool create ${IMAGE_NAME}.${type} -o ${IMAGE_NAME}.${type}.bmap" | ||
| 354 | CONVERSION_CMD:u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.u-boot" | ||
| 355 | CONVERSION_CMD:vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.vmdk" | ||
| 356 | CONVERSION_CMD:vhdx = "qemu-img convert -O vhdx -o subformat=dynamic ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.vhdx" | ||
| 357 | CONVERSION_CMD:vhd = "qemu-img convert -O vpc -o subformat=fixed ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.vhd" | ||
| 358 | CONVERSION_CMD:vdi = "qemu-img convert -O vdi ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.vdi" | ||
| 359 | CONVERSION_CMD:qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.qcow2" | ||
| 360 | CONVERSION_CMD:base64 = "base64 ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.base64" | ||
| 361 | CONVERSION_CMD:zsync = "zsyncmake_curl ${IMAGE_NAME}.${type}" | ||
| 362 | CONVERSION_CMD:gzsync = "zsyncmake_curl -z ${IMAGE_NAME}.${type}" | ||
| 363 | CONVERSION_DEPENDS_lzma = "xz-native" | ||
| 364 | CONVERSION_DEPENDS_gz = "pigz-native" | ||
| 365 | CONVERSION_DEPENDS_bz2 = "pbzip2-native" | ||
| 366 | CONVERSION_DEPENDS_xz = "xz-native" | ||
| 367 | CONVERSION_DEPENDS_lz4 = "lz4-native" | ||
| 368 | CONVERSION_DEPENDS_lzo = "lzop-native" | ||
| 369 | CONVERSION_DEPENDS_zip = "zip-native" | ||
| 370 | CONVERSION_DEPENDS_7zip = "7zip-native" | ||
| 371 | CONVERSION_DEPENDS_zst = "zstd-native" | ||
| 372 | CONVERSION_DEPENDS_sum = "mtd-utils-native" | ||
| 373 | CONVERSION_DEPENDS_bmap = "bmaptool-native" | ||
| 374 | CONVERSION_DEPENDS_u-boot = "u-boot-tools-native" | ||
| 375 | CONVERSION_DEPENDS_vmdk = "qemu-system-native" | ||
| 376 | CONVERSION_DEPENDS_vdi = "qemu-system-native" | ||
| 377 | CONVERSION_DEPENDS_qcow2 = "qemu-system-native" | ||
| 378 | CONVERSION_DEPENDS_base64 = "coreutils-native" | ||
| 379 | CONVERSION_DEPENDS_vhdx = "qemu-system-native" | ||
| 380 | CONVERSION_DEPENDS_vhd = "qemu-system-native" | ||
| 381 | CONVERSION_DEPENDS_zsync = "zsync-curl-native" | ||
| 382 | CONVERSION_DEPENDS_gzsync = "zsync-curl-native" | ||
| 383 | |||
| 384 | RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4" | ||
| 385 | RUNNABLE_MACHINE_PATTERNS ?= "qemu" | ||
| 386 | |||
| 387 | DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso" | ||
| 388 | |||
| 389 | # The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES, | ||
| 390 | # images that will not be built at do_rootfs time: vmdk, vhd, vhdx, vdi, qcow2, hddimg, iso, etc. | ||
| 391 | IMAGE_TYPES_MASKED ?= "" | ||
| 392 | |||
| 393 | # bmap requires python3 to be in the PATH | ||
| 394 | EXTRANATIVEPATH += "${@'python3-native' if d.getVar('IMAGE_FSTYPES').find('.bmap') else ''}" | ||
| 395 | # reproducible tar requires our tar, not the host's | ||
| 396 | EXTRANATIVEPATH += "${@'tar-native' if 'tar' in d.getVar('IMAGE_FSTYPES') else ''}" | ||
diff --git a/meta/classes-recipe/image_types_wic.bbclass b/meta/classes-recipe/image_types_wic.bbclass deleted file mode 100644 index 675aa97513..0000000000 --- a/meta/classes-recipe/image_types_wic.bbclass +++ /dev/null | |||
| @@ -1,221 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # The WICVARS variable is used to define list of bitbake variables used in wic code | ||
| 8 | # variables from this list is written to <image>.env file | ||
| 9 | WICVARS ?= "\ | ||
| 10 | APPEND \ | ||
| 11 | ASSUME_PROVIDED \ | ||
| 12 | BBLAYERS \ | ||
| 13 | DEPLOY_DIR_IMAGE \ | ||
| 14 | FAKEROOTCMD \ | ||
| 15 | HOSTTOOLS_DIR \ | ||
| 16 | IMAGE_BASENAME \ | ||
| 17 | IMAGE_BOOT_FILES \ | ||
| 18 | IMAGE_CLASSES \ | ||
| 19 | IMAGE_EFI_BOOT_FILES \ | ||
| 20 | IMAGE_EXTRA_PARTITION_FILES \ | ||
| 21 | IMAGE_LINK_NAME \ | ||
| 22 | IMAGE_ROOTFS \ | ||
| 23 | IMGDEPLOYDIR \ | ||
| 24 | INITRAMFS_FSTYPES \ | ||
| 25 | INITRAMFS_IMAGE \ | ||
| 26 | INITRAMFS_IMAGE_BUNDLE \ | ||
| 27 | INITRAMFS_LINK_NAME \ | ||
| 28 | INITRD \ | ||
| 29 | INITRD_LIVE \ | ||
| 30 | ISODIR \ | ||
| 31 | KERNEL_CONSOLE \ | ||
| 32 | KERNEL_IMAGETYPE \ | ||
| 33 | MACHINE \ | ||
| 34 | PSEUDO_INCLUDE_PATHS \ | ||
| 35 | RECIPE_SYSROOT_NATIVE \ | ||
| 36 | ROOTFS_SIZE \ | ||
| 37 | STAGING_DATADIR \ | ||
| 38 | STAGING_DIR \ | ||
| 39 | STAGING_DIR_HOST \ | ||
| 40 | STAGING_LIBDIR \ | ||
| 41 | TARGET_SYS \ | ||
| 42 | " | ||
| 43 | |||
| 44 | inherit_defer ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)} | ||
| 45 | |||
| 46 | WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks" | ||
| 47 | WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks" | ||
| 48 | WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/wic' % p for p in '${BBPATH}'.split(':'))}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}" | ||
| 49 | WKS_FULL_PATH = "${@wks_search(d.getVar('WKS_FILES').split(), d.getVar('WKS_SEARCH_PATH')) or ''}" | ||
| 50 | |||
| 51 | def wks_search(files, search_path): | ||
| 52 | for f in files: | ||
| 53 | if os.path.isabs(f): | ||
| 54 | if os.path.exists(f): | ||
| 55 | return f | ||
| 56 | else: | ||
| 57 | searched = bb.utils.which(search_path, f) | ||
| 58 | if searched: | ||
| 59 | return searched | ||
| 60 | |||
| 61 | def wks_checksums(files, search_path): | ||
| 62 | ret = "" | ||
| 63 | for f in files: | ||
| 64 | found, hist = bb.utils.which(search_path, f, history=True) | ||
| 65 | ret = ret + " " + " ".join(h + ":False" for h in hist[:-1]) | ||
| 66 | if found: | ||
| 67 | ret = ret + " " + found + ":True" | ||
| 68 | return ret | ||
| 69 | |||
| 70 | |||
| 71 | WIC_CREATE_EXTRA_ARGS ?= "" | ||
| 72 | |||
| 73 | IMAGE_CMD:wic () { | ||
| 74 | out="${IMGDEPLOYDIR}/${IMAGE_NAME}" | ||
| 75 | build_wic="${WORKDIR}/build-wic" | ||
| 76 | tmp_wic="${WORKDIR}/tmp-wic" | ||
| 77 | wks="${WKS_FULL_PATH}" | ||
| 78 | if [ -e "$tmp_wic" ]; then | ||
| 79 | # Ensure we don't have any junk leftover from a previously interrupted | ||
| 80 | # do_image_wic execution | ||
| 81 | rm -rf "$tmp_wic" | ||
| 82 | fi | ||
| 83 | if [ -z "$wks" ]; then | ||
| 84 | bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately." | ||
| 85 | fi | ||
| 86 | BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create --debug "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS} | ||
| 87 | |||
| 88 | # look to see if the user specifies a custom imager | ||
| 89 | IMAGER=direct | ||
| 90 | eval set -- "${WIC_CREATE_EXTRA_ARGS} --" | ||
| 91 | while [ 1 ]; do | ||
| 92 | case "$1" in | ||
| 93 | --imager|-i) | ||
| 94 | shift | ||
| 95 | IMAGER=$1 | ||
| 96 | ;; | ||
| 97 | --) | ||
| 98 | shift | ||
| 99 | break | ||
| 100 | ;; | ||
| 101 | esac | ||
| 102 | shift | ||
| 103 | done | ||
| 104 | mv "$build_wic/$(basename "${wks%.wks}")"*.${IMAGER} "$out.wic" | ||
| 105 | } | ||
| 106 | IMAGE_CMD:wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR" | ||
| 107 | SPDX_IMAGE_PURPOSE:wic = "diskImage" | ||
| 108 | do_image_wic[cleandirs] = "${WORKDIR}/build-wic" | ||
| 109 | |||
| 110 | # Rebuild when the wks file or vars in WICVARS change | ||
| 111 | USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}" | ||
| 112 | WKS_FILE_CHECKSUM = "${@wks_checksums(d.getVar('WKS_FILES').split(), d.getVar('WKS_SEARCH_PATH')) if '${USING_WIC}' else ''}" | ||
| 113 | do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}" | ||
| 114 | do_image_wic[depends] += "${@' '.join('%s-native:do_populate_sysroot' % r for r in ('parted', 'gptfdisk', 'dosfstools', 'mtools'))}" | ||
| 115 | |||
| 116 | # We ensure all artfacts are deployed (e.g virtual/bootloader) | ||
| 117 | do_image_wic[recrdeptask] += "do_deploy" | ||
| 118 | do_image_wic[deptask] += "do_image_complete" | ||
| 119 | |||
| 120 | WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}' | ||
| 121 | WKS_FILE_DEPENDS_DEFAULT += "bmaptool-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native erofs-utils-native" | ||
| 122 | # Unified kernel images need objcopy | ||
| 123 | WKS_FILE_DEPENDS_DEFAULT += "virtual/cross-binutils" | ||
| 124 | WKS_FILE_DEPENDS_BOOTLOADERS = "" | ||
| 125 | WKS_FILE_DEPENDS_BOOTLOADERS:aarch64 = "grub-efi systemd-boot" | ||
| 126 | WKS_FILE_DEPENDS_BOOTLOADERS:arm = "systemd-boot" | ||
| 127 | WKS_FILE_DEPENDS_BOOTLOADERS:x86 = "syslinux grub-efi systemd-boot" | ||
| 128 | WKS_FILE_DEPENDS_BOOTLOADERS:x86-64 = "syslinux grub-efi systemd-boot" | ||
| 129 | WKS_FILE_DEPENDS_BOOTLOADERS:x86-x32 = "syslinux grub-efi" | ||
| 130 | |||
| 131 | WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}" | ||
| 132 | |||
| 133 | DEPENDS += "${@ '${WKS_FILE_DEPENDS}' if d.getVar('USING_WIC') else '' }" | ||
| 134 | |||
| 135 | python do_write_wks_template () { | ||
| 136 | """Write out expanded template contents to WKS_FULL_PATH.""" | ||
| 137 | import re | ||
| 138 | |||
| 139 | template_body = d.getVar('_WKS_TEMPLATE') | ||
| 140 | |||
| 141 | # Remove any remnant variable references left behind by the expansion | ||
| 142 | # due to undefined variables | ||
| 143 | expand_var_regexp = re.compile(r"\${[^{}@\n\t :]+}") | ||
| 144 | while True: | ||
| 145 | new_body = re.sub(expand_var_regexp, '', template_body) | ||
| 146 | if new_body == template_body: | ||
| 147 | break | ||
| 148 | else: | ||
| 149 | template_body = new_body | ||
| 150 | |||
| 151 | wks_file = d.getVar('WKS_FULL_PATH') | ||
| 152 | with open(wks_file, 'w') as f: | ||
| 153 | f.write(template_body) | ||
| 154 | f.close() | ||
| 155 | # Copy the finalized wks file to the deploy directory for later use | ||
| 156 | depdir = d.getVar('IMGDEPLOYDIR') | ||
| 157 | basename = d.getVar('IMAGE_BASENAME') | ||
| 158 | bb.utils.copyfile(wks_file, "%s/%s" % (depdir, basename + '-' + os.path.basename(wks_file))) | ||
| 159 | } | ||
| 160 | |||
| 161 | do_flush_pseudodb() { | ||
| 162 | ${FAKEROOTENV} ${FAKEROOTCMD} -S | ||
| 163 | } | ||
| 164 | |||
| 165 | python () { | ||
| 166 | if d.getVar('USING_WIC'): | ||
| 167 | wks_file_u = d.getVar('WKS_FULL_PATH', False) | ||
| 168 | wks_file = d.expand(wks_file_u) | ||
| 169 | base, ext = os.path.splitext(wks_file) | ||
| 170 | if ext == '.in' and os.path.exists(wks_file): | ||
| 171 | wks_out_file = os.path.join(d.getVar('WORKDIR'), os.path.basename(base)) | ||
| 172 | d.setVar('WKS_FULL_PATH', wks_out_file) | ||
| 173 | d.setVar('WKS_TEMPLATE_PATH', wks_file_u) | ||
| 174 | d.setVar('WKS_FILE_CHECKSUM', '${WKS_TEMPLATE_PATH}:True') | ||
| 175 | |||
| 176 | # We need to re-parse each time the file changes, and bitbake | ||
| 177 | # needs to be told about that explicitly. | ||
| 178 | bb.parse.mark_dependency(d, wks_file) | ||
| 179 | |||
| 180 | try: | ||
| 181 | with open(wks_file, 'r') as f: | ||
| 182 | body = f.read() | ||
| 183 | except (IOError, OSError) as exc: | ||
| 184 | pass | ||
| 185 | else: | ||
| 186 | # Previously, I used expandWithRefs to get the dependency list | ||
| 187 | # and add it to WICVARS, but there's no point re-parsing the | ||
| 188 | # file in process_wks_template as well, so just put it in | ||
| 189 | # a variable and let the metadata deal with the deps. | ||
| 190 | d.setVar('_WKS_TEMPLATE', body) | ||
| 191 | bb.build.addtask('do_write_wks_template', 'do_image_wic', 'do_image', d) | ||
| 192 | bb.build.addtask('do_image_wic', 'do_image_complete', None, d) | ||
| 193 | } | ||
| 194 | |||
| 195 | # | ||
| 196 | # Write environment variables used by wic | ||
| 197 | # to tmp/sysroots/<machine>/imgdata/<image>.env | ||
| 198 | # | ||
| 199 | python do_rootfs_wicenv () { | ||
| 200 | wicvars = d.getVar('WICVARS') | ||
| 201 | if not wicvars: | ||
| 202 | return | ||
| 203 | |||
| 204 | stdir = d.getVar('STAGING_DIR') | ||
| 205 | outdir = os.path.join(stdir, d.getVar('MACHINE'), 'imgdata') | ||
| 206 | bb.utils.mkdirhier(outdir) | ||
| 207 | basename = d.getVar('IMAGE_BASENAME') | ||
| 208 | with open(os.path.join(outdir, basename) + '.env', 'w') as envf: | ||
| 209 | for var in wicvars.split(): | ||
| 210 | value = d.getVar(var) | ||
| 211 | if value: | ||
| 212 | envf.write('%s="%s"\n' % (var, value.strip())) | ||
| 213 | envf.close() | ||
| 214 | # Copy .env file to deploy directory for later use with stand alone wic | ||
| 215 | depdir = d.getVar('IMGDEPLOYDIR') | ||
| 216 | bb.utils.copyfile(os.path.join(outdir, basename) + '.env', os.path.join(depdir, basename) + '.env') | ||
| 217 | } | ||
| 218 | addtask do_flush_pseudodb after do_rootfs before do_image do_image_qa | ||
| 219 | addtask do_rootfs_wicenv after do_image before do_image_wic | ||
| 220 | do_rootfs_wicenv[vardeps] += "${WICVARS}" | ||
| 221 | do_rootfs_wicenv[prefuncs] = 'set_image_size' | ||
diff --git a/meta/classes-recipe/kernel-arch.bbclass b/meta/classes-recipe/kernel-arch.bbclass deleted file mode 100644 index 7aea9cd3e8..0000000000 --- a/meta/classes-recipe/kernel-arch.bbclass +++ /dev/null | |||
| @@ -1,84 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # set the ARCH environment variable for kernel compilation (including | ||
| 9 | # modules). return value must match one of the architecture directories | ||
| 10 | # in the kernel source "arch" directory | ||
| 11 | # | ||
| 12 | |||
| 13 | valid_archs = "alpha cris ia64 \ | ||
| 14 | i386 x86 \ | ||
| 15 | m68knommu m68k ppc powerpc powerpc64 ppc64 \ | ||
| 16 | sparc sparc64 \ | ||
| 17 | arm aarch64 \ | ||
| 18 | m32r mips \ | ||
| 19 | sh sh64 um h8300 \ | ||
| 20 | parisc s390 v850 \ | ||
| 21 | avr32 blackfin \ | ||
| 22 | loongarch64 \ | ||
| 23 | microblaze \ | ||
| 24 | nios2 arc riscv xtensa" | ||
| 25 | |||
| 26 | def map_kernel_arch(a, d): | ||
| 27 | import re | ||
| 28 | |||
| 29 | valid_archs = d.getVar('valid_archs').split() | ||
| 30 | |||
| 31 | if re.match('(i.86|athlon|x86.64)$', a): return 'x86' | ||
| 32 | elif re.match('arceb$', a): return 'arc' | ||
| 33 | elif re.match('armeb$', a): return 'arm' | ||
| 34 | elif re.match('aarch64$', a): return 'arm64' | ||
| 35 | elif re.match('aarch64_be$', a): return 'arm64' | ||
| 36 | elif re.match('aarch64_ilp32$', a): return 'arm64' | ||
| 37 | elif re.match('aarch64_be_ilp32$', a): return 'arm64' | ||
| 38 | elif re.match('loongarch(32|64|)$', a): return 'loongarch' | ||
| 39 | elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a): return 'mips' | ||
| 40 | elif re.match('mcf', a): return 'm68k' | ||
| 41 | elif re.match('riscv(32|64|)(eb|)$', a): return 'riscv' | ||
| 42 | elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc' | ||
| 43 | elif re.match('sh(3|4)$', a): return 'sh' | ||
| 44 | elif re.match('bfin', a): return 'blackfin' | ||
| 45 | elif re.match('microblazee[bl]', a): return 'microblaze' | ||
| 46 | elif a in valid_archs: return a | ||
| 47 | else: | ||
| 48 | if not d.getVar("TARGET_OS").startswith("linux"): | ||
| 49 | return a | ||
| 50 | bb.error("cannot map '%s' to a linux kernel architecture" % a) | ||
| 51 | |||
| 52 | export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH'), d)}" | ||
| 53 | |||
| 54 | def map_uboot_arch(a, d): | ||
| 55 | import re | ||
| 56 | |||
| 57 | if re.match('p(pc|owerpc)(|64)', a): return 'ppc' | ||
| 58 | elif re.match('i.86$', a): return 'x86' | ||
| 59 | return a | ||
| 60 | |||
| 61 | export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH'), d)}" | ||
| 62 | |||
| 63 | # Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture | ||
| 64 | # specific options necessary for building the kernel and modules. | ||
| 65 | TARGET_CC_KERNEL_ARCH ?= "" | ||
| 66 | HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}" | ||
| 67 | TARGET_LD_KERNEL_ARCH ?= "" | ||
| 68 | HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}" | ||
| 69 | TARGET_AR_KERNEL_ARCH ?= "" | ||
| 70 | HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}" | ||
| 71 | TARGET_OBJCOPY_KERNEL_ARCH ?= "" | ||
| 72 | HOST_OBJCOPY_KERNEL_ARCH ?= "${TARGET_OBJCOPY_KERNEL_ARCH}" | ||
| 73 | |||
| 74 | KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} \ | ||
| 75 | -fuse-ld=bfd ${DEBUG_PREFIX_MAP} \ | ||
| 76 | -ffile-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} \ | ||
| 77 | -ffile-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH} \ | ||
| 78 | " | ||
| 79 | KERNEL_LD = "${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}" | ||
| 80 | KERNEL_AR = "${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}" | ||
| 81 | KERNEL_OBJCOPY = "${HOST_PREFIX}objcopy ${HOST_OBJCOPY_KERNEL_ARCH}" | ||
| 82 | # Code in package.py can't handle options on KERNEL_STRIP | ||
| 83 | KERNEL_STRIP = "${HOST_PREFIX}strip" | ||
| 84 | TOOLCHAIN = "gcc" | ||
diff --git a/meta/classes-recipe/kernel-artifact-names.bbclass b/meta/classes-recipe/kernel-artifact-names.bbclass deleted file mode 100644 index 1a7611a15e..0000000000 --- a/meta/classes-recipe/kernel-artifact-names.bbclass +++ /dev/null | |||
| @@ -1,37 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | ################################################################## | ||
| 8 | # Specific kernel creation info | ||
| 9 | # for recipes/bbclasses which need to reuse some of the kernel | ||
| 10 | # artifacts, but aren't kernel recipes themselves | ||
| 11 | ################################################################## | ||
| 12 | |||
| 13 | inherit image-artifact-names | ||
| 14 | |||
| 15 | KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}${IMAGE_MACHINE_SUFFIX}${IMAGE_VERSION_SUFFIX}" | ||
| 16 | KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}" | ||
| 17 | KERNEL_ARTIFACT_BIN_EXT ?= ".bin" | ||
| 18 | |||
| 19 | KERNEL_IMAGE_NAME ?= "${KERNEL_ARTIFACT_NAME}" | ||
| 20 | KERNEL_IMAGE_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}" | ||
| 21 | KERNEL_IMAGE_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}" | ||
| 22 | KERNEL_IMAGETYPE_SYMLINK ?= "1" | ||
| 23 | |||
| 24 | KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}" | ||
| 25 | KERNEL_DTB_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}" | ||
| 26 | KERNEL_DTB_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}" | ||
| 27 | |||
| 28 | KERNEL_FIT_NAME ?= "${KERNEL_ARTIFACT_NAME}" | ||
| 29 | KERNEL_FIT_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}" | ||
| 30 | KERNEL_FIT_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}" | ||
| 31 | |||
| 32 | MODULE_TARBALL_NAME ?= "${KERNEL_ARTIFACT_NAME}" | ||
| 33 | MODULE_TARBALL_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}" | ||
| 34 | MODULE_TARBALL_DEPLOY ?= "1" | ||
| 35 | |||
| 36 | INITRAMFS_NAME ?= "initramfs-${KERNEL_ARTIFACT_NAME}" | ||
| 37 | INITRAMFS_LINK_NAME ?= "initramfs-${KERNEL_ARTIFACT_LINK_NAME}" | ||
diff --git a/meta/classes-recipe/kernel-devicetree.bbclass b/meta/classes-recipe/kernel-devicetree.bbclass deleted file mode 100644 index eff052b402..0000000000 --- a/meta/classes-recipe/kernel-devicetree.bbclass +++ /dev/null | |||
| @@ -1,139 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Support for device tree generation | ||
| 8 | python () { | ||
| 9 | if not bb.data.inherits_class('nopackages', d): | ||
| 10 | d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-devicetree") | ||
| 11 | if d.getVar('KERNEL_DEVICETREE_BUNDLE') == '1': | ||
| 12 | d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle") | ||
| 13 | } | ||
| 14 | |||
| 15 | # recursivly search for devicetree files | ||
| 16 | FILES:${KERNEL_PACKAGE_NAME}-devicetree = " \ | ||
| 17 | /${KERNEL_DTBDEST}/**/*.dtb \ | ||
| 18 | /${KERNEL_DTBDEST}/**/*.dtbo \ | ||
| 19 | " | ||
| 20 | |||
| 21 | FILES:${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin" | ||
| 22 | |||
| 23 | # Generate kernel+devicetree bundle | ||
| 24 | KERNEL_DEVICETREE_BUNDLE ?= "0" | ||
| 25 | |||
| 26 | # dtc flags passed via DTC_FLAGS env variable | ||
| 27 | KERNEL_DTC_FLAGS ?= "" | ||
| 28 | |||
| 29 | normalize_dtb () { | ||
| 30 | dtb="$1" | ||
| 31 | if echo $dtb | grep -q '/dts/'; then | ||
| 32 | bbwarn "$dtb contains the full path to the the dts file, but only the dtb name should be used." | ||
| 33 | dtb=`basename $dtb | sed 's,\.dts$,.dtb,g'` | ||
| 34 | fi | ||
| 35 | echo "$dtb" | ||
| 36 | } | ||
| 37 | |||
| 38 | get_real_dtb_path_in_kernel () { | ||
| 39 | dtb="$1" | ||
| 40 | dtb_path="${B}/arch/${ARCH}/boot/dts/$dtb" | ||
| 41 | if [ ! -e "$dtb_path" ]; then | ||
| 42 | dtb_path="${B}/arch/${ARCH}/boot/$dtb" | ||
| 43 | fi | ||
| 44 | echo "$dtb_path" | ||
| 45 | } | ||
| 46 | |||
| 47 | do_configure:append() { | ||
| 48 | if [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then | ||
| 49 | if echo ${KERNEL_IMAGETYPE_FOR_MAKE} | grep -q 'zImage'; then | ||
| 50 | case "${ARCH}" in | ||
| 51 | "arm") | ||
| 52 | config="${B}/.config" | ||
| 53 | if ! grep -q 'CONFIG_ARM_APPENDED_DTB=y' $config; then | ||
| 54 | bbwarn 'CONFIG_ARM_APPENDED_DTB is NOT enabled in the kernel. Enabling it to allow the kernel to boot with the Device Tree appended!' | ||
| 55 | sed -i "/CONFIG_ARM_APPENDED_DTB[ =]/d" $config | ||
| 56 | echo "CONFIG_ARM_APPENDED_DTB=y" >> $config | ||
| 57 | echo "# CONFIG_ARM_ATAG_DTB_COMPAT is not set" >> $config | ||
| 58 | fi | ||
| 59 | ;; | ||
| 60 | *) | ||
| 61 | bberror "KERNEL_DEVICETREE_BUNDLE is not supported for ${ARCH}. Currently it is only supported for 'ARM'." | ||
| 62 | esac | ||
| 63 | else | ||
| 64 | bberror 'The KERNEL_DEVICETREE_BUNDLE requires the KERNEL_IMAGETYPE to contain zImage.' | ||
| 65 | fi | ||
| 66 | fi | ||
| 67 | } | ||
| 68 | |||
| 69 | do_compile:append() { | ||
| 70 | if [ -n "${KERNEL_DTC_FLAGS}" ]; then | ||
| 71 | export DTC_FLAGS="${KERNEL_DTC_FLAGS}" | ||
| 72 | fi | ||
| 73 | |||
| 74 | for dtbf in ${KERNEL_DEVICETREE}; do | ||
| 75 | dtb=`normalize_dtb "$dtbf"` | ||
| 76 | oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" STRIP="${KERNEL_STRIP}" ${KERNEL_EXTRA_ARGS} | ||
| 77 | done | ||
| 78 | } | ||
| 79 | |||
| 80 | do_install:append() { | ||
| 81 | install -d ${D}/${KERNEL_DTBDEST} | ||
| 82 | for dtbf in ${KERNEL_DEVICETREE}; do | ||
| 83 | dtb=`normalize_dtb "$dtbf"` | ||
| 84 | dtb_path=`get_real_dtb_path_in_kernel "$dtb"` | ||
| 85 | if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then | ||
| 86 | dtb_ext=${dtb##*.} | ||
| 87 | dtb_base_name=`basename $dtb .$dtb_ext` | ||
| 88 | dtb=$dtb_base_name.$dtb_ext | ||
| 89 | fi | ||
| 90 | install -Dm 0644 $dtb_path ${D}/${KERNEL_DTBDEST}/$dtb | ||
| 91 | done | ||
| 92 | } | ||
| 93 | |||
| 94 | do_deploy:append() { | ||
| 95 | for dtbf in ${KERNEL_DEVICETREE}; do | ||
| 96 | dtb=`normalize_dtb "$dtbf"` | ||
| 97 | dtb_ext=${dtb##*.} | ||
| 98 | dtb_base_name=`basename $dtb .$dtb_ext` | ||
| 99 | install -d $deployDir | ||
| 100 | if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then | ||
| 101 | dtb=$dtb_base_name.$dtb_ext | ||
| 102 | fi | ||
| 103 | install -m 0644 ${D}/${KERNEL_DTBDEST}/$dtb $deployDir/$dtb_base_name.$dtb_ext | ||
| 104 | if [ -n "${KERNEL_DTB_NAME}" ] ; then | ||
| 105 | ln -sf $dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext | ||
| 106 | fi | ||
| 107 | if [ -n "${KERNEL_DTB_LINK_NAME}" ] ; then | ||
| 108 | ln -sf $dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext | ||
| 109 | fi | ||
| 110 | for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do | ||
| 111 | if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then | ||
| 112 | cat ${D}/${KERNEL_IMAGEDEST}/$type \ | ||
| 113 | $deployDir/$dtb_base_name.$dtb_ext \ | ||
| 114 | > $deployDir/$type-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} | ||
| 115 | if [ -n "${KERNEL_DTB_NAME}" ]; then | ||
| 116 | ln -sf $type-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} \ | ||
| 117 | $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} | ||
| 118 | fi | ||
| 119 | if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then | ||
| 120 | ln -sf $type-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} \ | ||
| 121 | $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} | ||
| 122 | fi | ||
| 123 | if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then | ||
| 124 | cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \ | ||
| 125 | $deployDir/$dtb_base_name.$dtb_ext \ | ||
| 126 | > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} | ||
| 127 | if [ -n "${KERNEL_DTB_NAME}" ]; then | ||
| 128 | ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} \ | ||
| 129 | $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} | ||
| 130 | fi | ||
| 131 | if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then | ||
| 132 | ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} \ | ||
| 133 | $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} | ||
| 134 | fi | ||
| 135 | fi | ||
| 136 | fi | ||
| 137 | done | ||
| 138 | done | ||
| 139 | } | ||
diff --git a/meta/classes-recipe/kernel-fit-extra-artifacts.bbclass b/meta/classes-recipe/kernel-fit-extra-artifacts.bbclass deleted file mode 100644 index 385fe9895a..0000000000 --- a/meta/classes-recipe/kernel-fit-extra-artifacts.bbclass +++ /dev/null | |||
| @@ -1,19 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Generate and deploy additional artifacts required for FIT image creation. | ||
| 8 | # To use this class, add it to the KERNEL_CLASSES variable. | ||
| 9 | |||
| 10 | inherit kernel-uboot | ||
| 11 | |||
| 12 | kernel_do_deploy:append() { | ||
| 13 | # Provide the kernel artifacts to post processing recipes e.g. for creating a FIT image | ||
| 14 | uboot_prep_kimage "$deployDir" | ||
| 15 | # For x86 a setup.bin needs to be include"d in a fitImage as well | ||
| 16 | if [ -e ${KERNEL_OUTPUT_DIR}/setup.bin ]; then | ||
| 17 | install -D "${B}/${KERNEL_OUTPUT_DIR}/setup.bin" "$deployDir/" | ||
| 18 | fi | ||
| 19 | } | ||
diff --git a/meta/classes-recipe/kernel-fit-image.bbclass b/meta/classes-recipe/kernel-fit-image.bbclass deleted file mode 100644 index fd0d21ceee..0000000000 --- a/meta/classes-recipe/kernel-fit-image.bbclass +++ /dev/null | |||
| @@ -1,193 +0,0 @@ | |||
| 1 | |||
| 2 | inherit kernel-arch kernel-artifact-names uboot-config deploy | ||
| 3 | require conf/image-fitimage.conf | ||
| 4 | |||
| 5 | S = "${UNPACKDIR}" | ||
| 6 | |||
| 7 | PACKAGE_ARCH = "${MACHINE_ARCH}" | ||
| 8 | |||
| 9 | # This bbclass requires KERNEL_CLASSES += "kernel-fit-extra-artifacts" | ||
| 10 | EXCLUDE_FROM_WORLD = "1" | ||
| 11 | |||
| 12 | DEPENDS += "\ | ||
| 13 | u-boot-tools-native dtc-native \ | ||
| 14 | ${@'kernel-signing-keys-native' if d.getVar('FIT_GENERATE_KEYS') == '1' else ''} \ | ||
| 15 | " | ||
| 16 | |||
| 17 | python () { | ||
| 18 | image = d.getVar('INITRAMFS_IMAGE') | ||
| 19 | if image and d.getVar('INITRAMFS_IMAGE_BUNDLE') != '1': | ||
| 20 | if d.getVar('INITRAMFS_MULTICONFIG'): | ||
| 21 | mc = d.getVar('BB_CURRENT_MC') | ||
| 22 | d.appendVarFlag('do_compile', 'mcdepends', ' mc:' + mc + ':${INITRAMFS_MULTICONFIG}:${INITRAMFS_IMAGE}:do_image_complete') | ||
| 23 | else: | ||
| 24 | d.appendVarFlag('do_compile', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete') | ||
| 25 | |||
| 26 | #check if there are any dtb providers | ||
| 27 | providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb") | ||
| 28 | if providerdtb: | ||
| 29 | d.appendVarFlag('do_compile', 'depends', ' virtual/dtb:do_populate_sysroot') | ||
| 30 | d.setVar('EXTERNAL_KERNEL_DEVICETREE', "${RECIPE_SYSROOT}/boot/devicetree") | ||
| 31 | } | ||
| 32 | |||
| 33 | do_configure[noexec] = "1" | ||
| 34 | |||
| 35 | UBOOT_MKIMAGE_KERNEL_TYPE ?= "kernel" | ||
| 36 | KERNEL_IMAGEDEST ?= "/boot" | ||
| 37 | |||
| 38 | FIT_KERNEL_SIGN_ENABLE ?= "${UBOOT_SIGN_ENABLE}" | ||
| 39 | FIT_KERNEL_SIGN_KEYNAME ?= "${UBOOT_SIGN_KEYNAME}" | ||
| 40 | FIT_KERNEL_SIGN_KEYDIR ?= "${UBOOT_SIGN_KEYDIR}" | ||
| 41 | |||
| 42 | python do_compile() { | ||
| 43 | import shutil | ||
| 44 | import oe.fitimage | ||
| 45 | |||
| 46 | itsfile = "fit-image.its" | ||
| 47 | fitname = "fitImage" | ||
| 48 | kernel_deploydir = d.getVar('DEPLOY_DIR_IMAGE') | ||
| 49 | kernel_deploysubdir = d.getVar('KERNEL_DEPLOYSUBDIR') | ||
| 50 | if kernel_deploysubdir: | ||
| 51 | kernel_deploydir = os.path.join(kernel_deploydir, kernel_deploysubdir) | ||
| 52 | |||
| 53 | # Collect all the its nodes before the its file is generated and mkimage gets executed | ||
| 54 | root_node = oe.fitimage.ItsNodeRootKernel( | ||
| 55 | d.getVar("FIT_DESC"), d.getVar("FIT_ADDRESS_CELLS"), | ||
| 56 | d.getVar('HOST_PREFIX'), d.getVar('UBOOT_ARCH'), d.getVar("FIT_CONF_PREFIX"), | ||
| 57 | oe.types.boolean(d.getVar('FIT_KERNEL_SIGN_ENABLE')), d.getVar("FIT_KERNEL_SIGN_KEYDIR"), | ||
| 58 | d.getVar("UBOOT_MKIMAGE"), d.getVar("UBOOT_MKIMAGE_DTCOPTS"), | ||
| 59 | d.getVar("UBOOT_MKIMAGE_SIGN"), d.getVar("UBOOT_MKIMAGE_SIGN_ARGS"), | ||
| 60 | d.getVar('FIT_HASH_ALG'), d.getVar('FIT_SIGN_ALG'), d.getVar('FIT_PAD_ALG'), | ||
| 61 | d.getVar('FIT_KERNEL_SIGN_KEYNAME'), | ||
| 62 | oe.types.boolean(d.getVar('FIT_SIGN_INDIVIDUAL')), d.getVar('UBOOT_SIGN_IMG_KEYNAME') | ||
| 63 | ) | ||
| 64 | |||
| 65 | # Prepare a kernel image section. | ||
| 66 | shutil.copyfile(os.path.join(kernel_deploydir, "linux.bin"), "linux.bin") | ||
| 67 | with open(os.path.join(kernel_deploydir, "linux_comp")) as linux_comp_f: | ||
| 68 | linux_comp = linux_comp_f.read() | ||
| 69 | root_node.fitimage_emit_section_kernel("kernel-1", "linux.bin", linux_comp, | ||
| 70 | d.getVar('UBOOT_LOADADDRESS'), d.getVar('UBOOT_ENTRYPOINT'), | ||
| 71 | d.getVar('UBOOT_MKIMAGE_KERNEL_TYPE'), d.getVar("UBOOT_ENTRYSYMBOL")) | ||
| 72 | |||
| 73 | # Prepare a DTB image section | ||
| 74 | kernel_devicetree = d.getVar('KERNEL_DEVICETREE') | ||
| 75 | external_kernel_devicetree = d.getVar("EXTERNAL_KERNEL_DEVICETREE") | ||
| 76 | if kernel_devicetree: | ||
| 77 | for dtb in kernel_devicetree.split(): | ||
| 78 | # In deploy_dir the DTBs are without sub-directories also with KERNEL_DTBVENDORED = "1" | ||
| 79 | dtb_name = os.path.basename(dtb) | ||
| 80 | |||
| 81 | # Skip DTB if it's also provided in EXTERNAL_KERNEL_DEVICETREE directory | ||
| 82 | if external_kernel_devicetree: | ||
| 83 | ext_dtb_path = os.path.join(external_kernel_devicetree, dtb_name) | ||
| 84 | if os.path.exists(ext_dtb_path) and os.path.getsize(ext_dtb_path) > 0: | ||
| 85 | continue | ||
| 86 | |||
| 87 | # Copy the dtb or dtbo file into the FIT image assembly directory | ||
| 88 | shutil.copyfile(os.path.join(kernel_deploydir, dtb_name), dtb_name) | ||
| 89 | root_node.fitimage_emit_section_dtb(dtb_name, dtb_name, | ||
| 90 | d.getVar("UBOOT_DTB_LOADADDRESS"), d.getVar("UBOOT_DTBO_LOADADDRESS")) | ||
| 91 | |||
| 92 | if external_kernel_devicetree: | ||
| 93 | # iterate over all .dtb and .dtbo files in the external kernel devicetree directory | ||
| 94 | # and copy them to the FIT image assembly directory | ||
| 95 | for dtb_name in sorted(os.listdir(external_kernel_devicetree)): | ||
| 96 | if dtb_name.endswith('.dtb') or dtb_name.endswith('.dtbo'): | ||
| 97 | dtb_path = os.path.join(external_kernel_devicetree, dtb_name) | ||
| 98 | |||
| 99 | # For symlinks, add a configuration node that refers to the DTB image node to which the symlink points | ||
| 100 | symlink_target = oe.fitimage.symlink_points_below(dtb_name, external_kernel_devicetree) | ||
| 101 | if symlink_target: | ||
| 102 | root_node.fitimage_emit_section_dtb_alias(dtb_name, symlink_target, True) | ||
| 103 | # For real DTB files add an image node and a configuration node | ||
| 104 | else: | ||
| 105 | shutil.copyfile(dtb_path, dtb_name) | ||
| 106 | root_node.fitimage_emit_section_dtb(dtb_name, dtb_name, | ||
| 107 | d.getVar("UBOOT_DTB_LOADADDRESS"), d.getVar("UBOOT_DTBO_LOADADDRESS"), True) | ||
| 108 | |||
| 109 | # Prepare a u-boot script section | ||
| 110 | fit_uboot_env = d.getVar("FIT_UBOOT_ENV") | ||
| 111 | if fit_uboot_env: | ||
| 112 | root_node.fitimage_emit_section_boot_script("bootscr-"+fit_uboot_env , fit_uboot_env) | ||
| 113 | |||
| 114 | # Prepare a setup section (For x86) | ||
| 115 | setup_bin_path = os.path.join(kernel_deploydir, "setup.bin") | ||
| 116 | if os.path.exists(setup_bin_path): | ||
| 117 | shutil.copyfile(setup_bin_path, "setup.bin") | ||
| 118 | root_node.fitimage_emit_section_setup("setup-1", "setup.bin") | ||
| 119 | |||
| 120 | # Prepare a ramdisk section. | ||
| 121 | initramfs_image = d.getVar('INITRAMFS_IMAGE') | ||
| 122 | if initramfs_image and d.getVar("INITRAMFS_IMAGE_BUNDLE") != '1': | ||
| 123 | # Find and use the first initramfs image archive type we find | ||
| 124 | found = False | ||
| 125 | for img in d.getVar("FIT_SUPPORTED_INITRAMFS_FSTYPES").split(): | ||
| 126 | initramfs_path = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), "%s.%s" % (d.getVar('INITRAMFS_IMAGE_NAME'), img)) | ||
| 127 | if os.path.exists(initramfs_path): | ||
| 128 | bb.note("Found initramfs image: " + initramfs_path) | ||
| 129 | found = True | ||
| 130 | root_node.fitimage_emit_section_ramdisk("ramdisk-1", initramfs_path, | ||
| 131 | initramfs_image, | ||
| 132 | d.getVar("UBOOT_RD_LOADADDRESS"), | ||
| 133 | d.getVar("UBOOT_RD_ENTRYPOINT")) | ||
| 134 | break | ||
| 135 | else: | ||
| 136 | bb.note("Did not find initramfs image: " + initramfs_path) | ||
| 137 | |||
| 138 | if not found: | ||
| 139 | bb.fatal("Could not find a valid initramfs type for %s, the supported types are: %s" % (d.getVar('INITRAMFS_IMAGE_NAME'), d.getVar('FIT_SUPPORTED_INITRAMFS_FSTYPES'))) | ||
| 140 | |||
| 141 | # Generate the configuration section | ||
| 142 | root_node.fitimage_emit_section_config(d.getVar("FIT_CONF_DEFAULT_DTB")) | ||
| 143 | |||
| 144 | # Write the its file | ||
| 145 | root_node.write_its_file(itsfile) | ||
| 146 | |||
| 147 | # Assemble the FIT image | ||
| 148 | root_node.run_mkimage_assemble(itsfile, fitname) | ||
| 149 | |||
| 150 | # Sign the FIT image if required | ||
| 151 | root_node.run_mkimage_sign(fitname) | ||
| 152 | } | ||
| 153 | do_compile[depends] += "virtual/kernel:do_deploy" | ||
| 154 | |||
| 155 | do_install() { | ||
| 156 | install -d "${D}/${KERNEL_IMAGEDEST}" | ||
| 157 | install -m 0644 "${B}/fitImage" "${D}/${KERNEL_IMAGEDEST}/fitImage" | ||
| 158 | } | ||
| 159 | |||
| 160 | FILES:${PN} = "${KERNEL_IMAGEDEST}" | ||
| 161 | |||
| 162 | |||
| 163 | do_deploy() { | ||
| 164 | deploy_dir="${DEPLOYDIR}" | ||
| 165 | if [ -n "${KERNEL_DEPLOYSUBDIR}" ]; then | ||
| 166 | deploy_dir="${DEPLOYDIR}/${KERNEL_DEPLOYSUBDIR}" | ||
| 167 | fi | ||
| 168 | install -d "$deploy_dir" | ||
| 169 | install -m 0644 "${B}/fitImage" "$deploy_dir/fitImage" | ||
| 170 | install -m 0644 "${B}/fit-image.its" "$deploy_dir/fit-image.its" | ||
| 171 | |||
| 172 | if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then | ||
| 173 | ln -snf fit-image.its "$deploy_dir/fitImage-its-${KERNEL_FIT_NAME}.its" | ||
| 174 | if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then | ||
| 175 | ln -snf fit-image.its "$deploy_dir/fitImage-its-${KERNEL_FIT_LINK_NAME}" | ||
| 176 | fi | ||
| 177 | fi | ||
| 178 | |||
| 179 | if [ -n "${INITRAMFS_IMAGE}" ]; then | ||
| 180 | ln -snf fit-image.its "$deploy_dir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its" | ||
| 181 | if [ -n "${KERNEL_FIT_LINK_NAME}" ]; then | ||
| 182 | ln -snf fit-image.its "$deploy_dir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}" | ||
| 183 | fi | ||
| 184 | |||
| 185 | if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then | ||
| 186 | ln -snf fitImage "$deploy_dir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}" | ||
| 187 | if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then | ||
| 188 | ln -snf fitImage "$deploy_dir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}" | ||
| 189 | fi | ||
| 190 | fi | ||
| 191 | fi | ||
| 192 | } | ||
| 193 | addtask deploy after do_compile before do_build | ||
diff --git a/meta/classes-recipe/kernel-grub.bbclass b/meta/classes-recipe/kernel-grub.bbclass deleted file mode 100644 index 2325e635e1..0000000000 --- a/meta/classes-recipe/kernel-grub.bbclass +++ /dev/null | |||
| @@ -1,111 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # While installing a rpm to update kernel on a deployed target, it will update | ||
| 9 | # the boot area and the boot menu with the kernel as the priority but allow | ||
| 10 | # you to fall back to the original kernel as well. | ||
| 11 | # | ||
| 12 | # - In kernel-image's preinstall scriptlet, it backs up original kernel to avoid | ||
| 13 | # probable confliction with the new one. | ||
| 14 | # | ||
| 15 | # - In kernel-image's postinstall scriptlet, it modifies grub's config file to | ||
| 16 | # updates the new kernel as the boot priority. | ||
| 17 | # | ||
| 18 | |||
| 19 | python __anonymous () { | ||
| 20 | import re | ||
| 21 | |||
| 22 | preinst = ''' | ||
| 23 | # Parsing confliction | ||
| 24 | [ -f "$D/boot/grub/menu.list" ] && grubcfg="$D/boot/grub/menu.list" | ||
| 25 | [ -f "$D/boot/grub/grub.cfg" ] && grubcfg="$D/boot/grub/grub.cfg" | ||
| 26 | if [ -n "$grubcfg" ]; then | ||
| 27 | # Dereference symlink to avoid confliction with new kernel name. | ||
| 28 | if grep -q "/KERNEL_IMAGETYPE \+root=" $grubcfg; then | ||
| 29 | if [ -L "$D/boot/KERNEL_IMAGETYPE" ]; then | ||
| 30 | kimage=`realpath $D/boot/KERNEL_IMAGETYPE 2>/dev/null` | ||
| 31 | if [ -f "$D$kimage" ]; then | ||
| 32 | sed -i "s:KERNEL_IMAGETYPE \+root=:${kimage##*/} root=:" $grubcfg | ||
| 33 | fi | ||
| 34 | fi | ||
| 35 | fi | ||
| 36 | |||
| 37 | # Rename old kernel if it conflicts with new kernel name. | ||
| 38 | if grep -q "/KERNEL_IMAGETYPE-${KERNEL_VERSION} \+root=" $grubcfg; then | ||
| 39 | if [ -f "$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}" ]; then | ||
| 40 | timestamp=`date +%s` | ||
| 41 | kimage="$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}-$timestamp-back" | ||
| 42 | sed -i "s:KERNEL_IMAGETYPE-${KERNEL_VERSION} \+root=:${kimage##*/} root=:" $grubcfg | ||
| 43 | mv "$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}" "$kimage" | ||
| 44 | fi | ||
| 45 | fi | ||
| 46 | fi | ||
| 47 | ''' | ||
| 48 | |||
| 49 | postinst = ''' | ||
| 50 | get_new_grub_cfg() { | ||
| 51 | grubcfg="$1" | ||
| 52 | old_image="$2" | ||
| 53 | title="Update KERNEL_IMAGETYPE-${KERNEL_VERSION}-${PV}" | ||
| 54 | if [ "${grubcfg##*/}" = "grub.cfg" ]; then | ||
| 55 | rootfs=`grep " *linux \+[^ ]\+ \+root=" $grubcfg -m 1 | \ | ||
| 56 | sed "s#${old_image}#${old_image%/*}/KERNEL_IMAGETYPE-${KERNEL_VERSION}#"` | ||
| 57 | |||
| 58 | echo "menuentry \"$title\" {" | ||
| 59 | echo " set root=(hd0,1)" | ||
| 60 | echo "$rootfs" | ||
| 61 | echo "}" | ||
| 62 | elif [ "${grubcfg##*/}" = "menu.list" ]; then | ||
| 63 | rootfs=`grep "kernel \+[^ ]\+ \+root=" $grubcfg -m 1 | \ | ||
| 64 | sed "s#${old_image}#${old_image%/*}/KERNEL_IMAGETYPE-${KERNEL_VERSION}#"` | ||
| 65 | |||
| 66 | echo "default 0" | ||
| 67 | echo "timeout 30" | ||
| 68 | echo "title $title" | ||
| 69 | echo "root (hd0,0)" | ||
| 70 | echo "$rootfs" | ||
| 71 | fi | ||
| 72 | } | ||
| 73 | |||
| 74 | get_old_grub_cfg() { | ||
| 75 | grubcfg="$1" | ||
| 76 | if [ "${grubcfg##*/}" = "grub.cfg" ]; then | ||
| 77 | cat "$grubcfg" | ||
| 78 | elif [ "${grubcfg##*/}" = "menu.list" ]; then | ||
| 79 | sed -e '/^default/d' -e '/^timeout/d' "$grubcfg" | ||
| 80 | fi | ||
| 81 | } | ||
| 82 | |||
| 83 | if [ -f "$D/boot/grub/grub.cfg" ]; then | ||
| 84 | grubcfg="$D/boot/grub/grub.cfg" | ||
| 85 | old_image=`grep ' *linux \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'` | ||
| 86 | elif [ -f "$D/boot/grub/menu.list" ]; then | ||
| 87 | grubcfg="$D/boot/grub/menu.list" | ||
| 88 | old_image=`grep '^kernel \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'` | ||
| 89 | fi | ||
| 90 | |||
| 91 | # Don't update grubcfg at first install while old bzImage doesn't exist. | ||
| 92 | if [ -f "$D/boot/${old_image##*/}" ]; then | ||
| 93 | grubcfgtmp="$grubcfg.tmp" | ||
| 94 | get_new_grub_cfg "$grubcfg" "$old_image" > $grubcfgtmp | ||
| 95 | get_old_grub_cfg "$grubcfg" >> $grubcfgtmp | ||
| 96 | mv $grubcfgtmp $grubcfg | ||
| 97 | echo "Caution! Update kernel may affect kernel-module!" | ||
| 98 | fi | ||
| 99 | ''' | ||
| 100 | |||
| 101 | imagetypes = d.getVar('KERNEL_IMAGETYPES') | ||
| 102 | imagetypes = re.sub(r'\.gz$', '', imagetypes) | ||
| 103 | |||
| 104 | for type in imagetypes.split(): | ||
| 105 | typelower = type.lower() | ||
| 106 | preinst_append = preinst.replace('KERNEL_IMAGETYPE', type) | ||
| 107 | postinst_prepend = postinst.replace('KERNEL_IMAGETYPE', type) | ||
| 108 | d.setVar('pkg_preinst:kernel-image-' + typelower + ':append', preinst_append) | ||
| 109 | d.setVar('pkg_postinst:kernel-image-' + typelower + ':prepend', postinst_prepend) | ||
| 110 | } | ||
| 111 | |||
diff --git a/meta/classes-recipe/kernel-module-split.bbclass b/meta/classes-recipe/kernel-module-split.bbclass deleted file mode 100644 index 75ed696b72..0000000000 --- a/meta/classes-recipe/kernel-module-split.bbclass +++ /dev/null | |||
| @@ -1,245 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | pkg_postinst:modules () { | ||
| 8 | if [ -z "$D" ]; then | ||
| 9 | depmod -a ${KERNEL_VERSION} | ||
| 10 | else | ||
| 11 | # image.bbclass will call depmodwrapper after everything is installed, | ||
| 12 | # no need to do it here as well | ||
| 13 | : | ||
| 14 | fi | ||
| 15 | } | ||
| 16 | |||
| 17 | pkg_postrm:modules () { | ||
| 18 | if [ -z "$D" ]; then | ||
| 19 | depmod -a ${KERNEL_VERSION} | ||
| 20 | else | ||
| 21 | depmodwrapper -a -b $D ${KERNEL_VERSION} ${KERNEL_PACKAGE_NAME} | ||
| 22 | fi | ||
| 23 | } | ||
| 24 | |||
| 25 | autoload_postinst_fragment() { | ||
| 26 | if [ x"$D" = "x" ]; then | ||
| 27 | modprobe %s || true | ||
| 28 | fi | ||
| 29 | } | ||
| 30 | |||
| 31 | PACKAGE_WRITE_DEPS += "kmod-native depmodwrapper-cross" | ||
| 32 | |||
| 33 | modulesloaddir ??= "${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '${nonarch_libdir}', '${sysconfdir}', d)}/modules-load.d" | ||
| 34 | modprobedir ??= "${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '${nonarch_base_libdir}', '${sysconfdir}', d)}/modprobe.d" | ||
| 35 | |||
| 36 | KERNEL_SPLIT_MODULES ?= "1" | ||
| 37 | PACKAGESPLITFUNCS =+ "split_kernel_module_packages" | ||
| 38 | |||
| 39 | KERNEL_MODULES_META_PACKAGE ?= "${@ d.getVar("KERNEL_PACKAGE_NAME") or "kernel" }-modules" | ||
| 40 | |||
| 41 | KERNEL_MODULE_PACKAGE_PREFIX ?= "" | ||
| 42 | KERNEL_MODULE_PACKAGE_SUFFIX ?= "-${KERNEL_VERSION}" | ||
| 43 | KERNEL_MODULE_PROVIDE_VIRTUAL ?= "1" | ||
| 44 | |||
| 45 | python split_kernel_module_packages () { | ||
| 46 | import re | ||
| 47 | |||
| 48 | modinfoexp = re.compile("([^=]+)=(.*)") | ||
| 49 | |||
| 50 | def extract_modinfo(file): | ||
| 51 | import tempfile, subprocess | ||
| 52 | tempfile.tempdir = d.getVar("WORKDIR") | ||
| 53 | compressed = re.match( r'.*\.(gz|xz|zst)$', file) | ||
| 54 | tf = tempfile.mkstemp() | ||
| 55 | tmpfile = tf[1] | ||
| 56 | if compressed: | ||
| 57 | tmpkofile = tmpfile + ".ko" | ||
| 58 | if compressed.group(1) == 'gz': | ||
| 59 | cmd = "gunzip -dc %s > %s" % (file, tmpkofile) | ||
| 60 | subprocess.check_call(cmd, shell=True) | ||
| 61 | elif compressed.group(1) == 'xz': | ||
| 62 | cmd = "xz -dc %s > %s" % (file, tmpkofile) | ||
| 63 | subprocess.check_call(cmd, shell=True) | ||
| 64 | elif compressed.group(1) == 'zst': | ||
| 65 | cmd = "zstd -dc %s > %s" % (file, tmpkofile) | ||
| 66 | subprocess.check_call(cmd, shell=True) | ||
| 67 | else: | ||
| 68 | msg = "Cannot decompress '%s'" % file | ||
| 69 | raise msg | ||
| 70 | cmd = "%s -j .modinfo -O binary %s %s" % (d.getVar("OBJCOPY"), tmpkofile, tmpfile) | ||
| 71 | else: | ||
| 72 | cmd = "%s -j .modinfo -O binary %s %s" % (d.getVar("OBJCOPY"), file, tmpfile) | ||
| 73 | subprocess.check_call(cmd, shell=True) | ||
| 74 | # errors='replace': Some old kernel versions contain invalid utf-8 characters in mod descriptions (like 0xf6, 'ö') | ||
| 75 | with open(tmpfile, errors='replace') as f: | ||
| 76 | l = f.read().split("\000") | ||
| 77 | os.close(tf[0]) | ||
| 78 | os.unlink(tmpfile) | ||
| 79 | if compressed: | ||
| 80 | os.unlink(tmpkofile) | ||
| 81 | vals = {} | ||
| 82 | for i in l: | ||
| 83 | m = modinfoexp.match(i) | ||
| 84 | if not m: | ||
| 85 | continue | ||
| 86 | vals[m.group(1)] = m.group(2) | ||
| 87 | return vals | ||
| 88 | |||
| 89 | def handle_conf_files(d, basename, pkg): | ||
| 90 | # If autoloading is requested, output ${modulesloaddir}/<name>.conf and append | ||
| 91 | # appropriate modprobe commands to the postinst | ||
| 92 | autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD") or "").split() | ||
| 93 | autoload = d.getVar('module_autoload_%s' % basename) | ||
| 94 | if autoload and autoload == basename: | ||
| 95 | bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename) | ||
| 96 | if autoload and basename not in autoloadlist: | ||
| 97 | bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename)) | ||
| 98 | |||
| 99 | # The .conf file can either be installed by a recipe or generated from module_autoload_* | ||
| 100 | conf = '%s/%s.conf' % (d.getVar('modulesloaddir'), basename) | ||
| 101 | name = '%s%s' % (d.getVar('PKGD'), conf) | ||
| 102 | # If module name is in KERNEL_MODULE_AUTOLOAD, then generate the .conf file and write to `name`. | ||
| 103 | if basename in autoloadlist: | ||
| 104 | os.makedirs(os.path.dirname(name), exist_ok=True) | ||
| 105 | with open(name, 'w') as f: | ||
| 106 | if autoload: | ||
| 107 | for m in autoload.split(): | ||
| 108 | f.write('%s\n' % m) | ||
| 109 | else: | ||
| 110 | f.write('%s\n' % basename) | ||
| 111 | # If the .conf file exits, then add it to FILES:* and CONFFILES:* and add postinstall hook. | ||
| 112 | # It doesn't matter if it was generated from module_autoload_* or installed by the recipe. | ||
| 113 | if os.path.exists(name): | ||
| 114 | conf2append = ' %s' % conf | ||
| 115 | d.appendVar('FILES:%s' % pkg, conf2append) | ||
| 116 | d.appendVar('CONFFILES:%s' % pkg, conf2append) | ||
| 117 | postinst = d.getVar('pkg_postinst:%s' % pkg) | ||
| 118 | if not postinst: | ||
| 119 | postinst = d.getVar('pkg_postinst:modules') | ||
| 120 | postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename) | ||
| 121 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 122 | |||
| 123 | # Write out any modconf fragment | ||
| 124 | modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split() | ||
| 125 | modconf = d.getVar('module_conf_%s' % basename) | ||
| 126 | |||
| 127 | # The .conf file can either be installed by a recipe or generated from module_conf_* | ||
| 128 | conf = '%s/%s.conf' % (d.getVar('modprobedir'), basename) | ||
| 129 | name = '%s%s' % (d.getVar('PKGD'), conf) | ||
| 130 | # If module name is in KERNEL_MODULE_PROBECONF, then generate the .conf file and write to `name`. | ||
| 131 | if modconf and basename in modconflist: | ||
| 132 | os.makedirs(os.path.dirname(name), exist_ok=True) | ||
| 133 | with open(name, 'w') as f: | ||
| 134 | f.write("%s\n" % modconf) | ||
| 135 | elif modconf: | ||
| 136 | bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename)) | ||
| 137 | # If the .conf file exits, then add it to FILES:* and CONFFILES:*. | ||
| 138 | # It doesn't matter if it was generated from module_conf_* or installed by the recipe. | ||
| 139 | if os.path.exists(name): | ||
| 140 | conf2append = ' %s' % conf | ||
| 141 | d.appendVar('FILES:%s' % pkg, conf2append) | ||
| 142 | d.appendVar('CONFFILES:%s' % pkg, conf2append) | ||
| 143 | |||
| 144 | def generate_conf_files(d, root, file_regex, output_pattern): | ||
| 145 | """ | ||
| 146 | Arguments: | ||
| 147 | root -- the path in which to search. Contains system lib path | ||
| 148 | so needs expansion. | ||
| 149 | file_regex -- regular expression to match searched files. Use | ||
| 150 | parentheses () to mark the part of this expression | ||
| 151 | that should be used to derive the module name (to be | ||
| 152 | substituted where %s is used in other function | ||
| 153 | arguments as noted below) | ||
| 154 | output_pattern -- pattern to use for the package names. Must include %s. | ||
| 155 | """ | ||
| 156 | import re, stat | ||
| 157 | |||
| 158 | dvar = d.getVar('PKGD') | ||
| 159 | root = d.expand(root) | ||
| 160 | |||
| 161 | # if the root directory doesn't exist, it's fatal - exit from the current execution. | ||
| 162 | if not os.path.exists(dvar + root): | ||
| 163 | bb.fatal("kernel module root directory path does not exist") | ||
| 164 | |||
| 165 | # walk through kernel module directory. for each entry in the directory, check if it | ||
| 166 | # matches the desired regex pattern and file type. if it fullfills, process it to generate | ||
| 167 | # it's conf file based on its package name. | ||
| 168 | for walkroot, dirs, files in os.walk(dvar + root): | ||
| 169 | for file in files: | ||
| 170 | relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1) | ||
| 171 | if not relpath: | ||
| 172 | continue | ||
| 173 | m = re.match(file_regex, os.path.basename(relpath)) | ||
| 174 | if not m: | ||
| 175 | continue | ||
| 176 | file_f = os.path.join(dvar + root, relpath) | ||
| 177 | mode = os.lstat(file_f).st_mode | ||
| 178 | if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))): | ||
| 179 | continue | ||
| 180 | |||
| 181 | basename = m.group(1) | ||
| 182 | on = legitimize_package_name(basename) | ||
| 183 | pkg = output_pattern % on | ||
| 184 | handle_conf_files(d, basename, pkg) | ||
| 185 | |||
| 186 | |||
| 187 | def frob_metadata(file, pkg, pattern, format, basename): | ||
| 188 | vals = extract_modinfo(file) | ||
| 189 | dvar = d.getVar('PKGD') | ||
| 190 | |||
| 191 | handle_conf_files(d, basename, pkg) | ||
| 192 | |||
| 193 | if "description" in vals: | ||
| 194 | old_desc = d.getVar('DESCRIPTION:' + pkg) or "" | ||
| 195 | d.setVar('DESCRIPTION:' + pkg, old_desc + "; " + vals["description"]) | ||
| 196 | |||
| 197 | rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "") | ||
| 198 | modinfo_deps = [] | ||
| 199 | if "depends" in vals and vals["depends"] != "": | ||
| 200 | for dep in vals["depends"].split(","): | ||
| 201 | on = legitimize_package_name(dep) | ||
| 202 | dependency_pkg = format % on | ||
| 203 | modinfo_deps.append(dependency_pkg) | ||
| 204 | for dep in modinfo_deps: | ||
| 205 | if not dep in rdepends: | ||
| 206 | rdepends[dep] = [] | ||
| 207 | d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False)) | ||
| 208 | |||
| 209 | # Avoid automatic -dev recommendations for modules ending with -dev. | ||
| 210 | d.setVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs', 1) | ||
| 211 | |||
| 212 | # Provide virtual package without postfix | ||
| 213 | providevirt = d.getVar('KERNEL_MODULE_PROVIDE_VIRTUAL') | ||
| 214 | if providevirt == "1": | ||
| 215 | postfix = format.split('%s')[1] | ||
| 216 | d.setVar('RPROVIDES:' + pkg, pkg.replace(postfix, '')) | ||
| 217 | |||
| 218 | kernel_package_name = d.getVar("KERNEL_PACKAGE_NAME") or "kernel" | ||
| 219 | kernel_version = d.getVar("KERNEL_VERSION") | ||
| 220 | |||
| 221 | metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE') | ||
| 222 | splitmods = d.getVar('KERNEL_SPLIT_MODULES') | ||
| 223 | postinst = d.getVar('pkg_postinst:modules') | ||
| 224 | postrm = d.getVar('pkg_postrm:modules') | ||
| 225 | |||
| 226 | module_regex = r'^(.*)\.k?o(?:\.(gz|xz|zst))?$' | ||
| 227 | |||
| 228 | module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX') | ||
| 229 | module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX') | ||
| 230 | module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix | ||
| 231 | |||
| 232 | if splitmods != '1': | ||
| 233 | d.appendVar('FILES:' + metapkg, '%s %s %s/modules' % | ||
| 234 | (d.getVar('modulesloaddir'), d.getVar('modprobedir'), d.getVar("nonarch_base_libdir"))) | ||
| 235 | d.appendVar('pkg_postinst:%s' % metapkg, postinst) | ||
| 236 | d.prependVar('pkg_postrm:%s' % metapkg, postrm) | ||
| 237 | generate_conf_files(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern) | ||
| 238 | return | ||
| 239 | |||
| 240 | modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version)) | ||
| 241 | if modules: | ||
| 242 | d.appendVar('RDEPENDS:' + metapkg, ' '+' '.join(modules)) | ||
| 243 | } | ||
| 244 | |||
| 245 | do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()))}' | ||
diff --git a/meta/classes-recipe/kernel-uboot.bbclass b/meta/classes-recipe/kernel-uboot.bbclass deleted file mode 100644 index 62974baaf0..0000000000 --- a/meta/classes-recipe/kernel-uboot.bbclass +++ /dev/null | |||
| @@ -1,59 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # fitImage kernel compression algorithm | ||
| 8 | FIT_KERNEL_COMP_ALG ?= "gzip" | ||
| 9 | FIT_KERNEL_COMP_ALG_EXTENSION ?= ".gz" | ||
| 10 | |||
| 11 | # Kernel image type passed to mkimage (i.e. kernel kernel_noload...) | ||
| 12 | UBOOT_MKIMAGE_KERNEL_TYPE ?= "kernel" | ||
| 13 | |||
| 14 | uboot_prep_kimage() { | ||
| 15 | output_dir=$1 | ||
| 16 | # For backward compatibility with kernel-fitimage.bbclass and kernel-uboot.bbclass | ||
| 17 | # support calling without parameter as well | ||
| 18 | if [ -z "$output_dir" ]; then | ||
| 19 | output_dir='.' | ||
| 20 | fi | ||
| 21 | |||
| 22 | linux_bin=$output_dir/linux.bin | ||
| 23 | if [ -e "arch/${ARCH}/boot/compressed/vmlinux" ]; then | ||
| 24 | vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux" | ||
| 25 | linux_suffix="" | ||
| 26 | linux_comp="none" | ||
| 27 | elif [ -e "arch/${ARCH}/boot/vmlinuz.bin" ]; then | ||
| 28 | rm -f "$linux_bin" | ||
| 29 | cp -l "arch/${ARCH}/boot/vmlinuz.bin" "$linux_bin" | ||
| 30 | vmlinux_path="" | ||
| 31 | linux_suffix="" | ||
| 32 | linux_comp="none" | ||
| 33 | else | ||
| 34 | vmlinux_path="vmlinux" | ||
| 35 | # Use vmlinux.initramfs for $linux_bin when INITRAMFS_IMAGE_BUNDLE set | ||
| 36 | # As per the implementation in kernel.bbclass. | ||
| 37 | # See do_bundle_initramfs function | ||
| 38 | if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ] && [ -e vmlinux.initramfs ]; then | ||
| 39 | vmlinux_path="vmlinux.initramfs" | ||
| 40 | fi | ||
| 41 | linux_suffix="${FIT_KERNEL_COMP_ALG_EXTENSION}" | ||
| 42 | linux_comp="${FIT_KERNEL_COMP_ALG}" | ||
| 43 | fi | ||
| 44 | |||
| 45 | [ -n "$vmlinux_path" ] && ${KERNEL_OBJCOPY} -O binary -R .note -R .comment -S "$vmlinux_path" "$linux_bin" | ||
| 46 | |||
| 47 | if [ "$linux_comp" != "none" ] ; then | ||
| 48 | if [ "$linux_comp" = "gzip" ] ; then | ||
| 49 | gzip -9 "$linux_bin" | ||
| 50 | elif [ "$linux_comp" = "lzo" ] ; then | ||
| 51 | lzop -9 "$linux_bin" | ||
| 52 | elif [ "$linux_comp" = "lzma" ] ; then | ||
| 53 | xz --format=lzma -f -6 "$linux_bin" | ||
| 54 | fi | ||
| 55 | mv -f "$linux_bin$linux_suffix" "$linux_bin" | ||
| 56 | fi | ||
| 57 | |||
| 58 | printf "$linux_comp" > "$output_dir/linux_comp" | ||
| 59 | } | ||
diff --git a/meta/classes-recipe/kernel-uimage.bbclass b/meta/classes-recipe/kernel-uimage.bbclass deleted file mode 100644 index e353232a0e..0000000000 --- a/meta/classes-recipe/kernel-uimage.bbclass +++ /dev/null | |||
| @@ -1,42 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit kernel-uboot | ||
| 8 | |||
| 9 | python __anonymous () { | ||
| 10 | if "uImage" in d.getVar('KERNEL_IMAGETYPES'): | ||
| 11 | depends = d.getVar("DEPENDS") | ||
| 12 | depends = "%s u-boot-tools-native" % depends | ||
| 13 | d.setVar("DEPENDS", depends) | ||
| 14 | |||
| 15 | # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal | ||
| 16 | # to kernel.bbclass . We override the variable here, since we need | ||
| 17 | # to build uImage using the kernel build system if and only if | ||
| 18 | # KEEPUIMAGE == yes. Otherwise, we pack compressed vmlinux into | ||
| 19 | # the uImage . | ||
| 20 | if d.getVar("KEEPUIMAGE") != 'yes': | ||
| 21 | typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or "" | ||
| 22 | if "uImage" in typeformake.split(): | ||
| 23 | d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('uImage', 'vmlinux')) | ||
| 24 | |||
| 25 | # Enable building of uImage with mkimage | ||
| 26 | bb.build.addtask('do_uboot_mkimage', 'do_install', 'do_kernel_link_images', d) | ||
| 27 | } | ||
| 28 | |||
| 29 | do_uboot_mkimage[dirs] += "${B}" | ||
| 30 | do_uboot_mkimage() { | ||
| 31 | uboot_prep_kimage | ||
| 32 | linux_comp="$(cat linux_comp)" | ||
| 33 | |||
| 34 | ENTRYPOINT=${UBOOT_ENTRYPOINT} | ||
| 35 | if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then | ||
| 36 | ENTRYPOINT=`${HOST_PREFIX}nm ${B}/vmlinux | \ | ||
| 37 | awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'` | ||
| 38 | fi | ||
| 39 | |||
| 40 | uboot-mkimage -A ${UBOOT_ARCH} -O linux -T ${UBOOT_MKIMAGE_KERNEL_TYPE} -C "$linux_comp" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage | ||
| 41 | rm -f linux.bin | ||
| 42 | } | ||
diff --git a/meta/classes-recipe/kernel-yocto.bbclass b/meta/classes-recipe/kernel-yocto.bbclass deleted file mode 100644 index e53bf15194..0000000000 --- a/meta/classes-recipe/kernel-yocto.bbclass +++ /dev/null | |||
| @@ -1,778 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # remove tasks that modify the source tree in case externalsrc is inherited | ||
| 8 | SRCTREECOVEREDTASKS += "do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch" | ||
| 9 | PATCH_GIT_USER_EMAIL ?= "kernel-yocto@oe" | ||
| 10 | PATCH_GIT_USER_NAME ?= "OpenEmbedded" | ||
| 11 | |||
| 12 | # The distro or local.conf should set this, but if nobody cares... | ||
| 13 | LINUX_KERNEL_TYPE ??= "standard" | ||
| 14 | |||
| 15 | # KMETA ?= "" | ||
| 16 | KBRANCH ?= "master" | ||
| 17 | KMACHINE ?= "${MACHINE}" | ||
| 18 | SRCREV_FORMAT ?= "meta_machine" | ||
| 19 | |||
| 20 | # LEVELS: | ||
| 21 | # 0: no reporting | ||
| 22 | # 1: report options that are specified, but not in the final config | ||
| 23 | # 2: report options that are not hardware related, but set by a BSP | ||
| 24 | KCONF_AUDIT_LEVEL ?= "1" | ||
| 25 | KCONF_BSP_AUDIT_LEVEL ?= "0" | ||
| 26 | KMETA_AUDIT ?= "yes" | ||
| 27 | KMETA_AUDIT_WERROR ?= "" | ||
| 28 | KMETA_CONFIG_FEATURES ?= "" | ||
| 29 | |||
| 30 | # returns local (absolute) path names for all valid patches in the | ||
| 31 | # src_uri | ||
| 32 | def find_patches(d,subdir): | ||
| 33 | patches = src_patches(d) | ||
| 34 | patch_list=[] | ||
| 35 | for p in patches: | ||
| 36 | _, _, local, _, _, parm = bb.fetch.decodeurl(p) | ||
| 37 | # if patchdir has been passed, we won't be able to apply it so skip | ||
| 38 | # the patch for now, and special processing happens later | ||
| 39 | patchdir = '' | ||
| 40 | if "patchdir" in parm: | ||
| 41 | patchdir = parm["patchdir"] | ||
| 42 | if subdir: | ||
| 43 | if subdir == patchdir: | ||
| 44 | patch_list.append(local) | ||
| 45 | else: | ||
| 46 | # skip the patch if a patchdir was supplied, it won't be handled | ||
| 47 | # properly | ||
| 48 | if not patchdir: | ||
| 49 | patch_list.append(local) | ||
| 50 | |||
| 51 | return patch_list | ||
| 52 | |||
| 53 | # returns all the elements from the src uri that are .scc files | ||
| 54 | def find_sccs(d): | ||
| 55 | sources=src_patches(d, True) | ||
| 56 | sources_list=[] | ||
| 57 | for s in sources: | ||
| 58 | base, ext = os.path.splitext(os.path.basename(s)) | ||
| 59 | if ext and ext in [".scc", ".cfg"]: | ||
| 60 | sources_list.append(s) | ||
| 61 | elif base and 'defconfig' in base: | ||
| 62 | sources_list.append(s) | ||
| 63 | |||
| 64 | return sources_list | ||
| 65 | |||
| 66 | # check the SRC_URI for "kmeta" type'd git repositories and directories. Return | ||
| 67 | # the name of the repository or directory as it will be found in UNPACKDIR | ||
| 68 | def find_kernel_feature_dirs(d): | ||
| 69 | feature_dirs=[] | ||
| 70 | fetch = bb.fetch2.Fetch([], d) | ||
| 71 | for url in fetch.urls: | ||
| 72 | urldata = fetch.ud[url] | ||
| 73 | parm = urldata.parm | ||
| 74 | type="" | ||
| 75 | destdir = "" | ||
| 76 | if "type" in parm: | ||
| 77 | type = parm["type"] | ||
| 78 | if "destsuffix" in parm: | ||
| 79 | destdir = parm["destsuffix"] | ||
| 80 | elif urldata.type == "file": | ||
| 81 | destdir = urldata.basepath | ||
| 82 | if type == "kmeta" and destdir: | ||
| 83 | feature_dirs.append(destdir) | ||
| 84 | |||
| 85 | return feature_dirs | ||
| 86 | |||
| 87 | # find the master/machine source branch. In the same way that the fetcher proceses | ||
| 88 | # git repositories in the SRC_URI we take the first repo found, first branch. | ||
| 89 | def get_machine_branch(d, default): | ||
| 90 | fetch = bb.fetch2.Fetch([], d) | ||
| 91 | for url in fetch.urls: | ||
| 92 | urldata = fetch.ud[url] | ||
| 93 | parm = urldata.parm | ||
| 94 | if "branch" in parm: | ||
| 95 | branches = urldata.parm.get("branch").split(',') | ||
| 96 | btype = urldata.parm.get("type") | ||
| 97 | if btype != "kmeta": | ||
| 98 | return branches[0] | ||
| 99 | |||
| 100 | return default | ||
| 101 | |||
| 102 | # returns a list of all directories that are on FILESEXTRAPATHS (and | ||
| 103 | # hence available to the build) that contain .scc or .cfg files | ||
| 104 | def get_dirs_with_fragments(d): | ||
| 105 | extrapaths = [] | ||
| 106 | extrafiles = [] | ||
| 107 | extrapathsvalue = (d.getVar("FILESEXTRAPATHS") or "") | ||
| 108 | # Remove default flag which was used for checking | ||
| 109 | extrapathsvalue = extrapathsvalue.replace("__default:", "") | ||
| 110 | extrapaths = extrapathsvalue.split(":") | ||
| 111 | for path in extrapaths: | ||
| 112 | if path + ":True" not in extrafiles: | ||
| 113 | extrafiles.append(path + ":" + str(os.path.exists(path))) | ||
| 114 | |||
| 115 | return " ".join(extrafiles) | ||
| 116 | |||
| 117 | do_kernel_metadata() { | ||
| 118 | set +e | ||
| 119 | |||
| 120 | if [ -n "$1" ]; then | ||
| 121 | mode="$1" | ||
| 122 | else | ||
| 123 | mode="patch" | ||
| 124 | fi | ||
| 125 | |||
| 126 | cd ${S} | ||
| 127 | export KMETA=${KMETA} | ||
| 128 | |||
| 129 | bbnote "do_kernel_metadata: for summary/debug, set KCONF_AUDIT_LEVEL > 0" | ||
| 130 | |||
| 131 | # if kernel tools are available in-tree, they are preferred | ||
| 132 | # and are placed on the path before any external tools. Unless | ||
| 133 | # the external tools flag is set, in that case we do nothing. | ||
| 134 | if [ -f "${S}/scripts/util/configme" ]; then | ||
| 135 | if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then | ||
| 136 | PATH=${S}/scripts/util:${PATH} | ||
| 137 | fi | ||
| 138 | fi | ||
| 139 | |||
| 140 | # In a similar manner to the kernel itself: | ||
| 141 | # | ||
| 142 | # defconfig: $(obj)/conf | ||
| 143 | # ifeq ($(KBUILD_DEFCONFIG),) | ||
| 144 | # $< --defconfig $(Kconfig) | ||
| 145 | # else | ||
| 146 | # @echo "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'" | ||
| 147 | # $(Q)$< --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig) | ||
| 148 | # endif | ||
| 149 | # | ||
| 150 | # If a defconfig is specified via the KBUILD_DEFCONFIG variable, we copy it | ||
| 151 | # from the source tree, into a common location and normalized "defconfig" name, | ||
| 152 | # where the rest of the process will include and incoroporate it into the build | ||
| 153 | # | ||
| 154 | if [ -n "${KBUILD_DEFCONFIG}" ]; then | ||
| 155 | if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then | ||
| 156 | if [ -f "${UNPACKDIR}/defconfig" ]; then | ||
| 157 | # If the two defconfig's are different, warn that we overwrote the | ||
| 158 | # one already placed in UNPACKDIR | ||
| 159 | cmp "${UNPACKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" | ||
| 160 | if [ $? -ne 0 ]; then | ||
| 161 | bbdebug 1 "detected SRC_URI or patched defconfig in UNPACKDIR. ${KBUILD_DEFCONFIG} copied over it" | ||
| 162 | fi | ||
| 163 | fi | ||
| 164 | cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${UNPACKDIR}/defconfig | ||
| 165 | in_tree_defconfig="${UNPACKDIR}/defconfig" | ||
| 166 | else | ||
| 167 | bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree (${S}/arch/${ARCH}/configs/)" | ||
| 168 | fi | ||
| 169 | fi | ||
| 170 | |||
| 171 | if [ "$mode" = "patch" ]; then | ||
| 172 | # was anyone trying to patch the kernel meta data ?, we need to do | ||
| 173 | # this here, since the scc commands migrate the .cfg fragments to the | ||
| 174 | # kernel source tree, where they'll be used later. | ||
| 175 | check_git_config | ||
| 176 | patches="${@" ".join(find_patches(d,'kernel-meta'))}" | ||
| 177 | if [ -n "$patches" ]; then | ||
| 178 | ( | ||
| 179 | cd ${UNPACKDIR}/kernel-meta | ||
| 180 | |||
| 181 | # take the SRC_URI patches, and create a series file | ||
| 182 | # this is required to support some better processing | ||
| 183 | # of issues with the patches | ||
| 184 | rm -f series | ||
| 185 | for p in $patches; do | ||
| 186 | cp $p . | ||
| 187 | echo "$(basename $p)" >> series | ||
| 188 | done | ||
| 189 | |||
| 190 | # process the series with kgit-s2q, which is what is | ||
| 191 | # handling the rest of the kernel. This allows us | ||
| 192 | # more flexibility for handling failures or advanced | ||
| 193 | # mergeing functinoality | ||
| 194 | message=$(kgit-s2q --gen -v --patches ${UNPACKDIR}/kernel-meta 2>&1) | ||
| 195 | if [ $? -ne 0 ]; then | ||
| 196 | # setup to try the patch again | ||
| 197 | kgit-s2q --prev | ||
| 198 | bberror "Problem applying patches to: ${UNPACKDIR}/kernel-meta" | ||
| 199 | bbfatal_log "\n($message)" | ||
| 200 | fi | ||
| 201 | ) | ||
| 202 | fi | ||
| 203 | fi | ||
| 204 | |||
| 205 | sccs_from_src_uri="${@" ".join(find_sccs(d))}" | ||
| 206 | patches="${@" ".join(find_patches(d,''))}" | ||
| 207 | feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}" | ||
| 208 | |||
| 209 | # a quick check to make sure we don't have duplicate defconfigs If | ||
| 210 | # there's a defconfig in the SRC_URI, did we also have one from the | ||
| 211 | # KBUILD_DEFCONFIG processing above ? | ||
| 212 | src_uri_defconfig=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") != 0) { print $0 }' RS=' ') | ||
| 213 | # drop and defconfig's from the src_uri variable, we captured it just above here if it existed | ||
| 214 | sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") == 0) { print $0 }' RS=' ') | ||
| 215 | |||
| 216 | if [ -n "$in_tree_defconfig" ]; then | ||
| 217 | sccs_defconfig=$in_tree_defconfig | ||
| 218 | if [ -n "$src_uri_defconfig" ]; then | ||
| 219 | bbwarn "[NOTE]: defconfig was supplied both via KBUILD_DEFCONFIG and SRC_URI. Dropping SRC_URI entry $src_uri_defconfig" | ||
| 220 | fi | ||
| 221 | else | ||
| 222 | # if we didn't have an in-tree one, make our defconfig the one | ||
| 223 | # from the src_uri. Note: there may not have been one from the | ||
| 224 | # src_uri, so this can be an empty variable. | ||
| 225 | sccs_defconfig=$src_uri_defconfig | ||
| 226 | fi | ||
| 227 | sccs="$sccs_from_src_uri" | ||
| 228 | |||
| 229 | # check for feature directories/repos/branches that were part of the | ||
| 230 | # SRC_URI. If they were supplied, we convert them into include directives | ||
| 231 | # for the update part of the process | ||
| 232 | for f in ${feat_dirs}; do | ||
| 233 | if [ -d "${UNPACKDIR}/$f/kernel-meta" ]; then | ||
| 234 | includes="$includes -I${UNPACKDIR}/$f/kernel-meta" | ||
| 235 | elif [ -d "${UNPACKDIR}/$f" ]; then | ||
| 236 | includes="$includes -I${UNPACKDIR}/$f" | ||
| 237 | fi | ||
| 238 | done | ||
| 239 | for s in ${sccs} ${patches}; do | ||
| 240 | sdir=$(dirname $s) | ||
| 241 | includes="$includes -I${sdir}" | ||
| 242 | # if a SRC_URI passed patch or .scc has a subdir of "kernel-meta", | ||
| 243 | # then we add it to the search path | ||
| 244 | if [ -d "${sdir}/kernel-meta" ]; then | ||
| 245 | includes="$includes -I${sdir}/kernel-meta" | ||
| 246 | fi | ||
| 247 | done | ||
| 248 | |||
| 249 | # allow in-tree config fragments to be used in KERNEL_FEATURES | ||
| 250 | includes="$includes -I${S}/arch/${ARCH}/configs -I${S}/kernel/configs" | ||
| 251 | |||
| 252 | # expand kernel features into their full path equivalents | ||
| 253 | bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE}) | ||
| 254 | if [ -z "$bsp_definition" ]; then | ||
| 255 | if [ -z "$sccs_defconfig" ]; then | ||
| 256 | bbfatal_log "Could not locate BSP definition for ${KMACHINE}/${LINUX_KERNEL_TYPE} and no defconfig was provided" | ||
| 257 | fi | ||
| 258 | else | ||
| 259 | # if the bsp definition has "define KMETA_EXTERNAL_BSP t", | ||
| 260 | # then we need to set a flag that will instruct the next | ||
| 261 | # steps to use the BSP as both configuration and patches. | ||
| 262 | grep -q KMETA_EXTERNAL_BSP $bsp_definition | ||
| 263 | if [ $? -eq 0 ]; then | ||
| 264 | KMETA_EXTERNAL_BSPS="t" | ||
| 265 | fi | ||
| 266 | fi | ||
| 267 | meta_dir=$(kgit --meta) | ||
| 268 | |||
| 269 | KERNEL_FEATURES_FINAL="" | ||
| 270 | if [ -n "${KERNEL_FEATURES}" ]; then | ||
| 271 | for feature in ${KERNEL_FEATURES}; do | ||
| 272 | feature_as_specified="$feature" | ||
| 273 | feature="$(echo $feature_as_specified | cut -d: -f1)" | ||
| 274 | feature_specifier="$(echo $feature_as_specified | cut -d: -f2)" | ||
| 275 | feature_found=f | ||
| 276 | for d in $includes; do | ||
| 277 | path_to_check=$(echo $d | sed 's/^-I//') | ||
| 278 | if [ "$feature_found" = "f" ] && [ -e "$path_to_check/$feature" ]; then | ||
| 279 | feature_found=t | ||
| 280 | fi | ||
| 281 | done | ||
| 282 | if [ "$feature_found" = "f" ]; then | ||
| 283 | if [ -n "${KERNEL_DANGLING_FEATURES_WARN_ONLY}" ]; then | ||
| 284 | bbwarn "Feature '$feature' not found, but KERNEL_DANGLING_FEATURES_WARN_ONLY is set" | ||
| 285 | bbwarn "This may cause runtime issues, dropping feature and allowing configuration to continue" | ||
| 286 | else | ||
| 287 | bberror "Feature '$feature' not found, this will cause configuration failures." | ||
| 288 | bberror "Check the SRC_URI for meta-data repositories or directories that may be missing" | ||
| 289 | bbfatal_log "Set KERNEL_DANGLING_FEATURES_WARN_ONLY to ignore this issue" | ||
| 290 | fi | ||
| 291 | else | ||
| 292 | KERNEL_FEATURES_FINAL="$KERNEL_FEATURES_FINAL $feature_as_specified" | ||
| 293 | fi | ||
| 294 | done | ||
| 295 | fi | ||
| 296 | |||
| 297 | if [ "$mode" = "config" ]; then | ||
| 298 | # run1: pull all the configuration fragments, no matter where they come from | ||
| 299 | elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} $KERNEL_FEATURES_FINAL`" | ||
| 300 | if [ -n "${elements}" ]; then | ||
| 301 | echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition | ||
| 302 | echo "${KMETA_CONFIG_FEATURES}" | grep -q "prefer-modules" | ||
| 303 | if [ $? -eq 0 ]; then | ||
| 304 | scc_defines="-DMODULE_OR_Y=m" | ||
| 305 | fi | ||
| 306 | scc --force $scc_defines -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches $KERNEL_FEATURES_FINAL | ||
| 307 | if [ $? -ne 0 ]; then | ||
| 308 | bbfatal_log "Could not generate configuration queue for ${KMACHINE}." | ||
| 309 | fi | ||
| 310 | fi | ||
| 311 | fi | ||
| 312 | |||
| 313 | # if KMETA_EXTERNAL_BSPS has been set, or it has been detected from | ||
| 314 | # the bsp definition, then we inject the bsp_definition into the | ||
| 315 | # patch phase below. we'll piggy back on the sccs variable. | ||
| 316 | if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then | ||
| 317 | sccs="${bsp_definition} ${sccs}" | ||
| 318 | fi | ||
| 319 | |||
| 320 | if [ "$mode" = "patch" ]; then | ||
| 321 | # run2: only generate patches for elements that have been passed on the SRC_URI | ||
| 322 | elements="`echo -n ${sccs} ${patches} $KERNEL_FEATURES_FINAL`" | ||
| 323 | if [ -n "${elements}" ]; then | ||
| 324 | scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} $KERNEL_FEATURES_FINAL | ||
| 325 | if [ $? -ne 0 ]; then | ||
| 326 | bbfatal_log "Could not generate configuration queue for ${KMACHINE}." | ||
| 327 | fi | ||
| 328 | fi | ||
| 329 | fi | ||
| 330 | |||
| 331 | if [ ${KCONF_AUDIT_LEVEL} -gt 0 ]; then | ||
| 332 | bbnote "kernel meta data summary for ${KMACHINE} (${LINUX_KERNEL_TYPE}):" | ||
| 333 | bbnote "======================================================================" | ||
| 334 | if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then | ||
| 335 | bbnote "Non kernel-cache (external) bsp" | ||
| 336 | fi | ||
| 337 | bbnote "BSP entry point / definition: $bsp_definition" | ||
| 338 | if [ -n "$in_tree_defconfig" ]; then | ||
| 339 | bbnote "KBUILD_DEFCONFIG: ${KBUILD_DEFCONFIG}" | ||
| 340 | fi | ||
| 341 | bbnote "Fragments from SRC_URI: $sccs_from_src_uri" | ||
| 342 | bbnote "KERNEL_FEATURES: $KERNEL_FEATURES_FINAL" | ||
| 343 | bbnote "Final scc/cfg list: $sccs_defconfig $bsp_definition $sccs $KERNEL_FEATURES_FINAL" | ||
| 344 | fi | ||
| 345 | |||
| 346 | set -e | ||
| 347 | } | ||
| 348 | |||
| 349 | do_patch() { | ||
| 350 | set +e | ||
| 351 | cd ${S} | ||
| 352 | |||
| 353 | check_git_config | ||
| 354 | if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then | ||
| 355 | reproducible_git_committer_author | ||
| 356 | fi | ||
| 357 | meta_dir=$(kgit --meta) | ||
| 358 | (cd ${meta_dir}; ln -sf patch.queue series) | ||
| 359 | if [ -f "${meta_dir}/series" ]; then | ||
| 360 | kgit_extra_args="" | ||
| 361 | if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then | ||
| 362 | kgit_extra_args="--commit-sha author" | ||
| 363 | fi | ||
| 364 | kgit-s2q --gen -v $kgit_extra_args --patches .kernel-meta/ | ||
| 365 | if [ $? -ne 0 ]; then | ||
| 366 | bberror "Could not apply patches for ${KMACHINE}." | ||
| 367 | bbfatal_log "Patch failures can be resolved in the linux source directory ${S})" | ||
| 368 | fi | ||
| 369 | fi | ||
| 370 | |||
| 371 | if [ -f "${meta_dir}/merge.queue" ]; then | ||
| 372 | # we need to merge all these branches | ||
| 373 | for b in $(cat ${meta_dir}/merge.queue); do | ||
| 374 | git show-ref --verify --quiet refs/heads/${b} | ||
| 375 | if [ $? -eq 0 ]; then | ||
| 376 | bbnote "Merging branch ${b}" | ||
| 377 | git merge -q --no-ff -m "Merge branch ${b}" ${b} | ||
| 378 | else | ||
| 379 | bbfatal "branch ${b} does not exist, cannot merge" | ||
| 380 | fi | ||
| 381 | done | ||
| 382 | fi | ||
| 383 | |||
| 384 | set -e | ||
| 385 | } | ||
| 386 | |||
| 387 | do_kernel_checkout() { | ||
| 388 | set +e | ||
| 389 | |||
| 390 | source_dir=`echo ${S} | sed 's%/$%%'` | ||
| 391 | source_unpackdir="${UNPACKDIR}/${BB_GIT_DEFAULT_DESTSUFFIX}" | ||
| 392 | if [ -d "${source_unpackdir}" ]; then | ||
| 393 | # case: git repository | ||
| 394 | # if S is UNPACKDIR/BB_GIT_DEFAULT_DESTSUFFIX, then we shouldn't be moving or deleting the tree. | ||
| 395 | if [ "${source_dir}" != "${source_unpackdir}" ]; then | ||
| 396 | if [ -d "${source_unpackdir}/.git" ]; then | ||
| 397 | # regular git repository with .git | ||
| 398 | rm -rf ${S} | ||
| 399 | mv ${source_unpackdir} ${S} | ||
| 400 | else | ||
| 401 | # create source for bare cloned git repository | ||
| 402 | git clone ${source_unpackdir} ${S} | ||
| 403 | rm -rf ${source_unpackdir} | ||
| 404 | fi | ||
| 405 | fi | ||
| 406 | cd ${S} | ||
| 407 | |||
| 408 | # convert any remote branches to local tracking ones | ||
| 409 | for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do | ||
| 410 | b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`; | ||
| 411 | git show-ref --quiet --verify -- "refs/heads/$b" | ||
| 412 | if [ $? -ne 0 ]; then | ||
| 413 | git branch $b $i > /dev/null | ||
| 414 | fi | ||
| 415 | done | ||
| 416 | |||
| 417 | # Create a working tree copy of the kernel by checking out a branch | ||
| 418 | machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}" | ||
| 419 | |||
| 420 | # checkout and clobber any unimportant files | ||
| 421 | git checkout -f ${machine_branch} | ||
| 422 | else | ||
| 423 | # case: we have no git repository at all. | ||
| 424 | # To support low bandwidth options for building the kernel, we'll just | ||
| 425 | # convert the tree to a git repo and let the rest of the process work unchanged | ||
| 426 | |||
| 427 | # if ${S} hasn't been set to the proper subdirectory a default of "linux" is | ||
| 428 | # used, but we can't initialize that empty directory. So check it and throw a | ||
| 429 | # clear error | ||
| 430 | |||
| 431 | cd ${S} | ||
| 432 | if [ ! -f "Makefile" ]; then | ||
| 433 | bberror "S is not set to the linux source directory. Check " | ||
| 434 | bbfatal "the recipe and set S to the proper extracted subdirectory" | ||
| 435 | fi | ||
| 436 | rm -f .gitignore | ||
| 437 | git init | ||
| 438 | check_git_config | ||
| 439 | if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then | ||
| 440 | reproducible_git_committer_author | ||
| 441 | fi | ||
| 442 | git add . | ||
| 443 | git commit -q -n -m "baseline commit: creating repo for ${PN}-${PV}" | ||
| 444 | git clean -d -f | ||
| 445 | fi | ||
| 446 | |||
| 447 | set -e | ||
| 448 | } | ||
| 449 | do_kernel_checkout[dirs] = "${S} ${UNPACKDIR}" | ||
| 450 | |||
| 451 | addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc | ||
| 452 | addtask kernel_metadata after do_validate_branches do_unpack before do_patch | ||
| 453 | do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot" | ||
| 454 | do_kernel_metadata[file-checksums] = " ${@get_dirs_with_fragments(d)}" | ||
| 455 | do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot" | ||
| 456 | |||
| 457 | # ${S} doesn't exist for us at unpack | ||
| 458 | do_qa_unpack() { | ||
| 459 | return | ||
| 460 | } | ||
| 461 | |||
| 462 | do_kernel_configme[depends] += "virtual/cross-binutils:do_populate_sysroot" | ||
| 463 | do_kernel_configme[depends] += "virtual/cross-cc:do_populate_sysroot" | ||
| 464 | do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot" | ||
| 465 | do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot" | ||
| 466 | do_kernel_configme[dirs] += "${S} ${B}" | ||
| 467 | do_kernel_configme() { | ||
| 468 | do_kernel_metadata config | ||
| 469 | |||
| 470 | # translate the kconfig_mode into something that merge_config.sh | ||
| 471 | # understands | ||
| 472 | case ${KCONFIG_MODE} in | ||
| 473 | *allnoconfig) | ||
| 474 | config_flags="-n" | ||
| 475 | ;; | ||
| 476 | *alldefconfig) | ||
| 477 | config_flags="" | ||
| 478 | ;; | ||
| 479 | *) | ||
| 480 | if [ -f ${UNPACKDIR}/defconfig ]; then | ||
| 481 | config_flags="-n" | ||
| 482 | fi | ||
| 483 | ;; | ||
| 484 | esac | ||
| 485 | |||
| 486 | cd ${S} | ||
| 487 | |||
| 488 | meta_dir=$(kgit --meta) | ||
| 489 | configs="$(scc --configs -o ${meta_dir})" | ||
| 490 | if [ $? -ne 0 ]; then | ||
| 491 | bberror "${configs}" | ||
| 492 | bbfatal_log "Could not find configuration queue (${meta_dir}/config.queue)" | ||
| 493 | fi | ||
| 494 | |||
| 495 | CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" STRIP="${KERNEL_STRIP}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1 | ||
| 496 | if [ $? -ne 0 -o ! -f ${B}/.config ]; then | ||
| 497 | bberror "Could not generate a .config for ${KMACHINE}-${LINUX_KERNEL_TYPE}" | ||
| 498 | if [ ${KCONF_AUDIT_LEVEL} -gt 1 ]; then | ||
| 499 | bbfatal_log "`cat ${meta_dir}/cfg/merge_config_build.log`" | ||
| 500 | else | ||
| 501 | bbfatal_log "Details can be found at: ${S}/${meta_dir}/cfg/merge_config_build.log" | ||
| 502 | fi | ||
| 503 | fi | ||
| 504 | |||
| 505 | if [ ! -z "${LINUX_VERSION_EXTENSION}" ]; then | ||
| 506 | echo "# Global settings from linux recipe" >> ${B}/.config | ||
| 507 | echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config | ||
| 508 | fi | ||
| 509 | } | ||
| 510 | |||
| 511 | addtask kernel_configme before do_configure after do_patch | ||
| 512 | addtask config_analysis | ||
| 513 | |||
| 514 | do_config_analysis[depends] = "virtual/kernel:do_configure" | ||
| 515 | do_config_analysis[depends] += "kern-tools-native:do_populate_sysroot" | ||
| 516 | |||
| 517 | CONFIG_AUDIT_FILE ?= "${WORKDIR}/config-audit.txt" | ||
| 518 | CONFIG_ANALYSIS_FILE ?= "${WORKDIR}/config-analysis.txt" | ||
| 519 | |||
| 520 | python do_config_analysis() { | ||
| 521 | import re, string, sys, subprocess | ||
| 522 | |||
| 523 | s = d.getVar('S') | ||
| 524 | |||
| 525 | env = os.environ.copy() | ||
| 526 | env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/") | ||
| 527 | env['LD'] = d.getVar('KERNEL_LD') | ||
| 528 | env['CC'] = d.getVar('KERNEL_CC') | ||
| 529 | env['OBJCOPY'] = d.getVar('KERNEL_OBJCOPY') | ||
| 530 | env['STRIP'] = d.getVar('KERNEL_STRIP') | ||
| 531 | env['ARCH'] = d.getVar('ARCH') | ||
| 532 | env['srctree'] = s | ||
| 533 | |||
| 534 | # read specific symbols from the kernel recipe or from local.conf | ||
| 535 | # i.e.: CONFIG_ANALYSIS:pn-linux-yocto-dev = 'NF_CONNTRACK LOCALVERSION' | ||
| 536 | config = d.getVar( 'CONFIG_ANALYSIS' ) | ||
| 537 | if not config: | ||
| 538 | config = [ "" ] | ||
| 539 | else: | ||
| 540 | config = config.split() | ||
| 541 | |||
| 542 | for c in config: | ||
| 543 | for action in ["analysis","audit"]: | ||
| 544 | if action == "analysis": | ||
| 545 | try: | ||
| 546 | analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--blame', c], cwd=s, env=env ).decode('utf-8') | ||
| 547 | except subprocess.CalledProcessError as e: | ||
| 548 | bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8'))) | ||
| 549 | |||
| 550 | outfile = d.getVar( 'CONFIG_ANALYSIS_FILE' ) | ||
| 551 | |||
| 552 | if action == "audit": | ||
| 553 | try: | ||
| 554 | analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--summary', '--extended', '--sanity', c], cwd=s, env=env ).decode('utf-8') | ||
| 555 | except subprocess.CalledProcessError as e: | ||
| 556 | bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8'))) | ||
| 557 | |||
| 558 | outfile = d.getVar( 'CONFIG_AUDIT_FILE' ) | ||
| 559 | |||
| 560 | if c: | ||
| 561 | outdir = os.path.dirname( outfile ) | ||
| 562 | outname = os.path.basename( outfile ) | ||
| 563 | outfile = outdir + '/'+ c + '-' + outname | ||
| 564 | |||
| 565 | if config and os.path.isfile(outfile): | ||
| 566 | os.remove(outfile) | ||
| 567 | |||
| 568 | with open(outfile, 'w+') as f: | ||
| 569 | f.write( analysis ) | ||
| 570 | |||
| 571 | bb.warn( "Configuration {} executed, see: {} for details".format(action,outfile )) | ||
| 572 | if c: | ||
| 573 | bb.warn( analysis ) | ||
| 574 | } | ||
| 575 | |||
| 576 | python do_kernel_configcheck() { | ||
| 577 | import re, string, sys, subprocess | ||
| 578 | |||
| 579 | audit_flag = d.getVar( "KMETA_AUDIT" ) | ||
| 580 | if not audit_flag: | ||
| 581 | bb.note( "kernel config audit disabled, skipping .." ) | ||
| 582 | return | ||
| 583 | |||
| 584 | s = d.getVar('S') | ||
| 585 | |||
| 586 | # if KMETA isn't set globally by a recipe using this routine, use kgit to | ||
| 587 | # locate or create the meta directory. Otherwise, kconf_check is not | ||
| 588 | # passed a valid meta-series for processing | ||
| 589 | kmeta = d.getVar("KMETA") | ||
| 590 | if not kmeta or not os.path.exists('{}/{}'.format(s,kmeta)): | ||
| 591 | kmeta = subprocess.check_output(['kgit', '--meta'], cwd=d.getVar('S')).decode('utf-8').rstrip() | ||
| 592 | |||
| 593 | env = os.environ.copy() | ||
| 594 | env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/") | ||
| 595 | env['LD'] = d.getVar('KERNEL_LD') | ||
| 596 | env['CC'] = d.getVar('KERNEL_CC') | ||
| 597 | env['OBJCOPY'] = d.getVar('KERNEL_OBJCOPY') | ||
| 598 | env['STRIP'] = d.getVar('KERNEL_STRIP') | ||
| 599 | env['ARCH'] = d.getVar('ARCH') | ||
| 600 | env['srctree'] = s | ||
| 601 | |||
| 602 | try: | ||
| 603 | configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8') | ||
| 604 | except subprocess.CalledProcessError as e: | ||
| 605 | bb.fatal( "Cannot gather config fragments for audit: %s" % e.output.decode("utf-8") ) | ||
| 606 | |||
| 607 | config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0) | ||
| 608 | bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0) | ||
| 609 | kmeta_audit_werror = d.getVar("KMETA_AUDIT_WERROR") or "" | ||
| 610 | warnings_detected = False | ||
| 611 | |||
| 612 | # if config check visibility is "1", that's the lowest level of audit. So | ||
| 613 | # we add the --classify option to the run, since classification will | ||
| 614 | # streamline the output to only report options that could be boot issues, | ||
| 615 | # or are otherwise required for proper operation. | ||
| 616 | extra_params = "" | ||
| 617 | if config_check_visibility == 1: | ||
| 618 | extra_params = "--classify" | ||
| 619 | |||
| 620 | # category #1: mismatches | ||
| 621 | try: | ||
| 622 | analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--mismatches', extra_params], cwd=s, env=env ).decode('utf-8') | ||
| 623 | except subprocess.CalledProcessError as e: | ||
| 624 | bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8'))) | ||
| 625 | |||
| 626 | if analysis: | ||
| 627 | outfile = "{}/{}/cfg/mismatch.txt".format( s, kmeta ) | ||
| 628 | if os.path.isfile(outfile): | ||
| 629 | os.remove(outfile) | ||
| 630 | with open(outfile, 'w+') as f: | ||
| 631 | f.write( analysis ) | ||
| 632 | |||
| 633 | if config_check_visibility and os.stat(outfile).st_size > 0: | ||
| 634 | with open (outfile, "r") as myfile: | ||
| 635 | results = myfile.read() | ||
| 636 | bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results) | ||
| 637 | warnings_detected = True | ||
| 638 | |||
| 639 | # category #2: invalid fragment elements | ||
| 640 | extra_params = "" | ||
| 641 | if bsp_check_visibility > 1: | ||
| 642 | extra_params = "--strict" | ||
| 643 | try: | ||
| 644 | analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--invalid', extra_params], cwd=s, env=env ).decode('utf-8') | ||
| 645 | except subprocess.CalledProcessError as e: | ||
| 646 | bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8'))) | ||
| 647 | |||
| 648 | if analysis: | ||
| 649 | outfile = "{}/{}/cfg/invalid.txt".format(s,kmeta) | ||
| 650 | if os.path.isfile(outfile): | ||
| 651 | os.remove(outfile) | ||
| 652 | with open(outfile, 'w+') as f: | ||
| 653 | f.write( analysis ) | ||
| 654 | |||
| 655 | if bsp_check_visibility and os.stat(outfile).st_size > 0: | ||
| 656 | with open (outfile, "r") as myfile: | ||
| 657 | results = myfile.read() | ||
| 658 | bb.warn( "[kernel config]: This BSP contains fragments with warnings:\n\n%s" % results) | ||
| 659 | warnings_detected = True | ||
| 660 | |||
| 661 | # category #3: redefined options (this is pretty verbose and is debug only) | ||
| 662 | try: | ||
| 663 | analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--sanity'], cwd=s, env=env ).decode('utf-8') | ||
| 664 | except subprocess.CalledProcessError as e: | ||
| 665 | bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8'))) | ||
| 666 | |||
| 667 | if analysis: | ||
| 668 | outfile = "{}/{}/cfg/redefinition.txt".format(s,kmeta) | ||
| 669 | if os.path.isfile(outfile): | ||
| 670 | os.remove(outfile) | ||
| 671 | with open(outfile, 'w+') as f: | ||
| 672 | f.write( analysis ) | ||
| 673 | |||
| 674 | # if the audit level is greater than two, we report if a fragment has overriden | ||
| 675 | # a value from a base fragment. This is really only used for new kernel introduction | ||
| 676 | if bsp_check_visibility > 2 and os.stat(outfile).st_size > 0: | ||
| 677 | with open (outfile, "r") as myfile: | ||
| 678 | results = myfile.read() | ||
| 679 | bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results) | ||
| 680 | warnings_detected = True | ||
| 681 | |||
| 682 | if warnings_detected and kmeta_audit_werror: | ||
| 683 | bb.fatal( "configuration warnings detected, werror is set, promoting to fatal" ) | ||
| 684 | } | ||
| 685 | |||
| 686 | # Ensure that the branches (BSP and meta) are on the locations specified by | ||
| 687 | # their SRCREV values. If they are NOT on the right commits, the branches | ||
| 688 | # are corrected to the proper commit. | ||
| 689 | do_validate_branches() { | ||
| 690 | set +e | ||
| 691 | cd ${S} | ||
| 692 | |||
| 693 | machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}" | ||
| 694 | machine_srcrev="${SRCREV_machine}" | ||
| 695 | |||
| 696 | # if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to | ||
| 697 | # check and we can exit early | ||
| 698 | if [ "${machine_srcrev}" = "AUTOINC" ]; then | ||
| 699 | linux_yocto_dev='${@oe.utils.conditional("PREFERRED_PROVIDER_virtual/kernel", "linux-yocto-dev", "1", "", d)}' | ||
| 700 | if [ -n "$linux_yocto_dev" ]; then | ||
| 701 | git checkout -q -f ${machine_branch} | ||
| 702 | ver=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//) | ||
| 703 | patchlevel=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//) | ||
| 704 | sublevel=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//) | ||
| 705 | kver="$ver.$patchlevel" | ||
| 706 | bbnote "dev kernel: performing version -> branch -> SRCREV validation" | ||
| 707 | bbnote "dev kernel: recipe version ${LINUX_VERSION}, src version: $kver" | ||
| 708 | echo "${LINUX_VERSION}" | grep -q $kver | ||
| 709 | if [ $? -ne 0 ]; then | ||
| 710 | version="$(echo ${LINUX_VERSION} | sed 's/\+.*$//g')" | ||
| 711 | versioned_branch="v$version/$machine_branch" | ||
| 712 | |||
| 713 | machine_branch=$versioned_branch | ||
| 714 | force_srcrev="$(git rev-parse $machine_branch 2> /dev/null)" | ||
| 715 | if [ $? -ne 0 ]; then | ||
| 716 | bbfatal "kernel version mismatch detected, and no valid branch $machine_branch detected" | ||
| 717 | fi | ||
| 718 | |||
| 719 | bbnote "dev kernel: adjusting branch to $machine_branch, srcrev to: $force_srcrev" | ||
| 720 | fi | ||
| 721 | else | ||
| 722 | bbnote "SRCREV validation is not required for AUTOREV" | ||
| 723 | fi | ||
| 724 | elif [ "${machine_srcrev}" = "" ]; then | ||
| 725 | if [ "${SRCREV}" != "AUTOINC" ] && [ "${SRCREV}" != "INVALID" ]; then | ||
| 726 | # SRCREV_machine_<MACHINE> was not set. This means that a custom recipe | ||
| 727 | # that doesn't use the SRCREV_FORMAT "machine_meta" is being built. In | ||
| 728 | # this case, we need to reset to the give SRCREV before heading to patching | ||
| 729 | bbnote "custom recipe is being built, forcing SRCREV to ${SRCREV}" | ||
| 730 | force_srcrev="${SRCREV}" | ||
| 731 | fi | ||
| 732 | else | ||
| 733 | git cat-file -t ${machine_srcrev} > /dev/null | ||
| 734 | if [ $? -ne 0 ]; then | ||
| 735 | bberror "${machine_srcrev} is not a valid commit ID." | ||
| 736 | bbfatal_log "The kernel source tree may be out of sync" | ||
| 737 | fi | ||
| 738 | force_srcrev=${machine_srcrev} | ||
| 739 | fi | ||
| 740 | |||
| 741 | git checkout -q -f ${machine_branch} | ||
| 742 | if [ -n "${force_srcrev}" ]; then | ||
| 743 | # see if the branch we are about to patch has been properly reset to the defined | ||
| 744 | # SRCREV .. if not, we reset it. | ||
| 745 | branch_head=`git rev-parse HEAD` | ||
| 746 | if [ "${force_srcrev}" != "${branch_head}" ]; then | ||
| 747 | current_branch=`git rev-parse --abbrev-ref HEAD` | ||
| 748 | git branch "$current_branch-orig" | ||
| 749 | git reset --hard ${force_srcrev} | ||
| 750 | # We've checked out HEAD, make sure we cleanup kgit-s2q fence post check | ||
| 751 | # so the patches are applied as expected otherwise no patching | ||
| 752 | # would be done in some corner cases. | ||
| 753 | kgit-s2q --clean | ||
| 754 | fi | ||
| 755 | fi | ||
| 756 | |||
| 757 | set -e | ||
| 758 | } | ||
| 759 | |||
| 760 | OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT" | ||
| 761 | KBUILD_OUTPUT = "${B}" | ||
| 762 | |||
| 763 | python () { | ||
| 764 | # If diffconfig is available, ensure it runs after kernel_configme | ||
| 765 | if 'do_diffconfig' in d: | ||
| 766 | bb.build.addtask('do_diffconfig', None, 'do_kernel_configme', d) | ||
| 767 | |||
| 768 | externalsrc = d.getVar('EXTERNALSRC') | ||
| 769 | if externalsrc: | ||
| 770 | # If we deltask do_patch, do_kernel_configme is left without | ||
| 771 | # dependencies and runs too early | ||
| 772 | d.setVarFlag('do_kernel_configme', 'deps', (d.getVarFlag('do_kernel_configme', 'deps', False) or []) + ['do_unpack']) | ||
| 773 | } | ||
| 774 | |||
| 775 | # extra tasks | ||
| 776 | addtask kernel_version_sanity_check after do_kernel_metadata do_kernel_checkout before do_compile | ||
| 777 | addtask validate_branches before do_patch after do_kernel_checkout | ||
| 778 | addtask kernel_configcheck after do_configure before do_compile | ||
diff --git a/meta/classes-recipe/kernel.bbclass b/meta/classes-recipe/kernel.bbclass deleted file mode 100644 index 003a155e79..0000000000 --- a/meta/classes-recipe/kernel.bbclass +++ /dev/null | |||
| @@ -1,868 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit linux-kernel-base kernel-module-split features_check | ||
| 8 | |||
| 9 | COMPATIBLE_HOST = ".*-linux" | ||
| 10 | |||
| 11 | # Linux has a minimum ISA requires on riscv, see arch/riscv/Makefile | ||
| 12 | REQUIRED_TUNE_FEATURES:riscv32 = "rv 32 i m a zicsr zifencei" | ||
| 13 | REQUIRED_TUNE_FEATURES:riscv64 = "rv 64 i m a zicsr zifencei" | ||
| 14 | |||
| 15 | KERNEL_PACKAGE_NAME ??= "kernel" | ||
| 16 | KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }" | ||
| 17 | |||
| 18 | PROVIDES += "virtual/kernel" | ||
| 19 | DEPENDS += "virtual/cross-binutils virtual/cross-cc kmod-native bc-native bison-native" | ||
| 20 | DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lzo", "lzop-native", "", d)}" | ||
| 21 | DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}" | ||
| 22 | DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.zst", "zstd-native", "", d)}" | ||
| 23 | DEPENDS += "${@bb.utils.contains("KERNEL_IMAGETYPES", "Image.lz4", "lz4-native", "", d)}" | ||
| 24 | PACKAGE_WRITE_DEPS += "depmodwrapper-cross" | ||
| 25 | |||
| 26 | do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot gzip-native:do_populate_sysroot" | ||
| 27 | do_clean[depends] += "make-mod-scripts:do_clean" | ||
| 28 | |||
| 29 | # CPE entries from NVD use linux_kernel, but the raw CVE entries from the kernel CNA have | ||
| 30 | # vendor: linux and product: linux. Note that multiple distributions use "linux" as a product | ||
| 31 | # name, so we need to fill vendor to avoid false positives | ||
| 32 | CVE_PRODUCT ?= "linux_kernel linux:linux" | ||
| 33 | |||
| 34 | S = "${STAGING_KERNEL_DIR}" | ||
| 35 | B = "${WORKDIR}/build" | ||
| 36 | KBUILD_OUTPUT = "${B}" | ||
| 37 | OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT" | ||
| 38 | |||
| 39 | # we include gcc above, we dont need virtual/libc | ||
| 40 | INHIBIT_DEFAULT_DEPS = "1" | ||
| 41 | |||
| 42 | KERNEL_IMAGETYPE ?= "zImage" | ||
| 43 | INITRAMFS_IMAGE ?= "" | ||
| 44 | INITRAMFS_TASK ?= "" | ||
| 45 | INITRAMFS_IMAGE_BUNDLE ?= "" | ||
| 46 | INITRAMFS_DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR_IMAGE}" | ||
| 47 | INITRAMFS_MULTICONFIG ?= "" | ||
| 48 | |||
| 49 | # KERNEL_VERSION is extracted from source code. It is evaluated as | ||
| 50 | # None for the first parsing, since the code has not been fetched. | ||
| 51 | # After the code is fetched, it will be evaluated as real version | ||
| 52 | # number and cause kernel to be rebuilt. To avoid this, make | ||
| 53 | # KERNEL_VERSION_NAME and KERNEL_VERSION_PKG_NAME depend on | ||
| 54 | # LINUX_VERSION which is a constant. | ||
| 55 | KERNEL_VERSION_NAME = "${@d.getVar('KERNEL_VERSION') or ""}" | ||
| 56 | KERNEL_VERSION_NAME[vardepvalue] = "${LINUX_VERSION}" | ||
| 57 | KERNEL_VERSION_PKG_NAME = "${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}" | ||
| 58 | KERNEL_VERSION_PKG_NAME[vardepvalue] = "${LINUX_VERSION}" | ||
| 59 | |||
| 60 | python __anonymous () { | ||
| 61 | pn = d.getVar("PN") | ||
| 62 | kpn = d.getVar("KERNEL_PACKAGE_NAME") | ||
| 63 | |||
| 64 | # XXX Remove this after bug 11905 is resolved | ||
| 65 | # FILES:${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly | ||
| 66 | if kpn == pn: | ||
| 67 | bb.warn("Some packages (E.g. *-dev) might be missing due to " | ||
| 68 | "bug 11905 (variable KERNEL_PACKAGE_NAME == PN)") | ||
| 69 | |||
| 70 | # The default kernel recipe builds in a shared location defined by | ||
| 71 | # bitbake/distro confs: STAGING_KERNEL_DIR and STAGING_KERNEL_BUILDDIR. | ||
| 72 | # Set these variables to directories under ${WORKDIR} in alternate | ||
| 73 | # kernel recipes (I.e. where KERNEL_PACKAGE_NAME != kernel) so that they | ||
| 74 | # may build in parallel with the default kernel without clobbering. | ||
| 75 | if kpn != "kernel": | ||
| 76 | workdir = d.getVar("WORKDIR") | ||
| 77 | sourceDir = os.path.join(workdir, 'kernel-source') | ||
| 78 | artifactsDir = os.path.join(workdir, 'kernel-build-artifacts') | ||
| 79 | d.setVar("STAGING_KERNEL_DIR", sourceDir) | ||
| 80 | d.setVar("STAGING_KERNEL_BUILDDIR", artifactsDir) | ||
| 81 | |||
| 82 | # Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES | ||
| 83 | type = d.getVar('KERNEL_IMAGETYPE') or "" | ||
| 84 | alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or "" | ||
| 85 | types = d.getVar('KERNEL_IMAGETYPES') or "" | ||
| 86 | if type not in types.split(): | ||
| 87 | types = (type + ' ' + types).strip() | ||
| 88 | if alttype not in types.split(): | ||
| 89 | types = (alttype + ' ' + types).strip() | ||
| 90 | d.setVar('KERNEL_IMAGETYPES', types) | ||
| 91 | |||
| 92 | # Since kernel-fitimage.bbclass got replaced by kernel-fit-image.bbclass | ||
| 93 | if "fitImage" in types: | ||
| 94 | bb.error("fitImage is no longer supported as a KERNEL_IMAGETYPE(S). FIT images are built by the linux-yocto-fitimage recipe.") | ||
| 95 | |||
| 96 | # KERNEL_IMAGETYPES may contain a mixture of image types supported directly | ||
| 97 | # by the kernel build system and types which are created by post-processing | ||
| 98 | # the output of the kernel build system (e.g. compressing vmlinux -> | ||
| 99 | # vmlinux.gz in kernel_do_transform_kernel()). | ||
| 100 | # KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported | ||
| 101 | # directly by the kernel build system. | ||
| 102 | if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'): | ||
| 103 | typeformake = set() | ||
| 104 | for type in types.split(): | ||
| 105 | if type == 'vmlinux.gz': | ||
| 106 | type = 'vmlinux' | ||
| 107 | typeformake.add(type) | ||
| 108 | |||
| 109 | d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', ' '.join(sorted(typeformake))) | ||
| 110 | |||
| 111 | kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel" | ||
| 112 | imagedest = d.getVar('KERNEL_IMAGEDEST') | ||
| 113 | |||
| 114 | for type in types.split(): | ||
| 115 | if bb.data.inherits_class('nopackages', d): | ||
| 116 | continue | ||
| 117 | typelower = type.lower() | ||
| 118 | d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower)) | ||
| 119 | d.setVar('FILES:' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type) | ||
| 120 | d.appendVar('RDEPENDS:%s-image' % kname, ' %s-image-%s (= ${EXTENDPKGV})' % (kname, typelower)) | ||
| 121 | splitmods = d.getVar("KERNEL_SPLIT_MODULES") | ||
| 122 | if splitmods != '1': | ||
| 123 | d.appendVar('RDEPENDS:%s-image' % kname, ' %s-modules (= ${EXTENDPKGV})' % kname) | ||
| 124 | d.appendVar('RDEPENDS:%s-image-%s' % (kname, typelower), ' %s-modules-${KERNEL_VERSION_PKG_NAME} (= ${EXTENDPKGV})' % kname) | ||
| 125 | d.setVar('PKG:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname) | ||
| 126 | d.appendVar('RPROVIDES:%s-modules' % kname, ' %s-modules-${KERNEL_VERSION_PKG_NAME}' % kname) | ||
| 127 | |||
| 128 | d.setVar('PKG:%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower)) | ||
| 129 | d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1') | ||
| 130 | |||
| 131 | if d.getVar('KERNEL_IMAGETYPE_SYMLINK') == '1': | ||
| 132 | d.prependVar('pkg_postinst:%s-image-%s' % (kname,typelower), """set +e | ||
| 133 | if [ -n "$D" ]; then | ||
| 134 | ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1 | ||
| 135 | else | ||
| 136 | ln -sf %s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1 | ||
| 137 | if [ $? -ne 0 ]; then | ||
| 138 | echo "Filesystem on ${KERNEL_IMAGEDEST}/ doesn't support symlinks, falling back to copied image (%s)." | ||
| 139 | install -m 0644 ${KERNEL_IMAGEDEST}/%s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s | ||
| 140 | fi | ||
| 141 | fi | ||
| 142 | set -e | ||
| 143 | """ % (type, type, type, type, type, type, type)) | ||
| 144 | d.setVar('pkg_postrm:%s-image-%s' % (kname,typelower), """set +e | ||
| 145 | if [ -f "${KERNEL_IMAGEDEST}/%s" -o -L "${KERNEL_IMAGEDEST}/%s" ]; then | ||
| 146 | rm -f ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1 | ||
| 147 | fi | ||
| 148 | set -e | ||
| 149 | """ % (type, type, type)) | ||
| 150 | |||
| 151 | |||
| 152 | image = d.getVar('INITRAMFS_IMAGE') | ||
| 153 | # If the INTIRAMFS_IMAGE is set but the INITRAMFS_IMAGE_BUNDLE is set to 0, | ||
| 154 | # the do_bundle_initramfs does nothing, but the INITRAMFS_IMAGE is built | ||
| 155 | # standalone for use by wic and other tools. | ||
| 156 | if image: | ||
| 157 | if d.getVar('INITRAMFS_MULTICONFIG'): | ||
| 158 | d.appendVarFlag('do_bundle_initramfs', 'mcdepends', ' mc:${BB_CURRENT_MC}:${INITRAMFS_MULTICONFIG}:${INITRAMFS_IMAGE}:do_image_complete') | ||
| 159 | else: | ||
| 160 | d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete') | ||
| 161 | if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')): | ||
| 162 | bb.build.addtask('do_transform_bundled_initramfs', 'do_deploy', 'do_bundle_initramfs', d) | ||
| 163 | |||
| 164 | # NOTE: setting INITRAMFS_TASK is for backward compatibility | ||
| 165 | # The preferred method is to set INITRAMFS_IMAGE, because | ||
| 166 | # this INITRAMFS_TASK has circular dependency problems | ||
| 167 | # if the initramfs requires kernel modules | ||
| 168 | image_task = d.getVar('INITRAMFS_TASK') | ||
| 169 | if image_task: | ||
| 170 | d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}') | ||
| 171 | } | ||
| 172 | |||
| 173 | # Here we pull in all various kernel image types which we support. | ||
| 174 | # | ||
| 175 | # In case you're wondering why kernel.bbclass inherits the other image | ||
| 176 | # types instead of the other way around, the reason for that is to | ||
| 177 | # maintain compatibility with various currently existing meta-layers. | ||
| 178 | # By pulling in the various kernel image types here, we retain the | ||
| 179 | # original behavior of kernel.bbclass, so no meta-layers should get | ||
| 180 | # broken. | ||
| 181 | # | ||
| 182 | # KERNEL_CLASSES by default pulls in kernel-uimage.bbclass, since this | ||
| 183 | # used to be the default behavior when only uImage was supported. This | ||
| 184 | # variable can be appended by users who implement support for new kernel | ||
| 185 | # image types. | ||
| 186 | |||
| 187 | KERNEL_CLASSES ?= " kernel-uimage " | ||
| 188 | inherit_defer ${KERNEL_CLASSES} | ||
| 189 | |||
| 190 | # Old style kernels may set ${S} = ${WORKDIR}/git for example | ||
| 191 | # We need to move these over to STAGING_KERNEL_DIR. We can't just | ||
| 192 | # create the symlink in advance as the git fetcher can't cope with | ||
| 193 | # the symlink. | ||
| 194 | do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}" | ||
| 195 | do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}" | ||
| 196 | python do_symlink_kernsrc () { | ||
| 197 | s = d.getVar("S") | ||
| 198 | kernsrc = d.getVar("STAGING_KERNEL_DIR") | ||
| 199 | if s != kernsrc: | ||
| 200 | bb.utils.mkdirhier(kernsrc) | ||
| 201 | bb.utils.remove(kernsrc, recurse=True) | ||
| 202 | if s[-1] == '/': | ||
| 203 | # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as | ||
| 204 | # directory name and fail | ||
| 205 | s = s[:-1] | ||
| 206 | if d.getVar("EXTERNALSRC"): | ||
| 207 | # With EXTERNALSRC S will not be wiped so we can symlink to it | ||
| 208 | os.symlink(s, kernsrc) | ||
| 209 | else: | ||
| 210 | import shutil | ||
| 211 | shutil.move(s, kernsrc) | ||
| 212 | os.symlink(kernsrc, s) | ||
| 213 | } | ||
| 214 | # do_patch is normally ordered before do_configure, but | ||
| 215 | # externalsrc.bbclass deletes do_patch, breaking the dependency of | ||
| 216 | # do_configure on do_symlink_kernsrc. | ||
| 217 | addtask symlink_kernsrc before do_patch do_configure after do_unpack | ||
| 218 | |||
| 219 | inherit kernel-arch deploy | ||
| 220 | |||
| 221 | PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-module-.*" | ||
| 222 | PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-image-.*" | ||
| 223 | PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-firmware-.*" | ||
| 224 | |||
| 225 | export OS = "${TARGET_OS}" | ||
| 226 | export CROSS_COMPILE = "${TARGET_PREFIX}" | ||
| 227 | |||
| 228 | KERNEL_RELEASE ?= "${KERNEL_VERSION}" | ||
| 229 | |||
| 230 | # The directory where built kernel lies in the kernel tree | ||
| 231 | KERNEL_OUTPUT_DIR ?= "arch/${ARCH}/boot" | ||
| 232 | KERNEL_IMAGEDEST ?= "boot" | ||
| 233 | KERNEL_DTBDEST ?= "${KERNEL_IMAGEDEST}" | ||
| 234 | KERNEL_DTBVENDORED ?= "0" | ||
| 235 | |||
| 236 | # | ||
| 237 | # configuration | ||
| 238 | # | ||
| 239 | KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}" | ||
| 240 | |||
| 241 | # kernels are generally machine specific | ||
| 242 | PACKAGE_ARCH = "${MACHINE_ARCH}" | ||
| 243 | |||
| 244 | # U-Boot support | ||
| 245 | UBOOT_ENTRYPOINT ?= "0x20008000" | ||
| 246 | UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}" | ||
| 247 | |||
| 248 | # Some Linux kernel configurations need additional parameters on the command line | ||
| 249 | KERNEL_EXTRA_ARGS ?= "" | ||
| 250 | |||
| 251 | EXTRA_OEMAKE += ' CC="${KERNEL_CC}" LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" STRIP="${KERNEL_STRIP}"' | ||
| 252 | EXTRA_OEMAKE += ' HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}"' | ||
| 253 | EXTRA_OEMAKE += ' HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}"' | ||
| 254 | # Only for newer kernels (5.19+), native pkg-config variables are set for older kernels when building kernel and modules | ||
| 255 | EXTRA_OEMAKE += ' HOSTPKG_CONFIG="pkg-config-native"' | ||
| 256 | |||
| 257 | KERNEL_ALT_IMAGETYPE ??= "" | ||
| 258 | |||
| 259 | copy_initramfs() { | ||
| 260 | echo "Copying initramfs into ./usr ..." | ||
| 261 | # In case the directory is not created yet from the first pass compile: | ||
| 262 | mkdir -p ${B}/usr | ||
| 263 | # Find and use the first initramfs image archive type we find | ||
| 264 | rm -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio | ||
| 265 | for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst; do | ||
| 266 | if [ -e "${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then | ||
| 267 | cp ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/. | ||
| 268 | case $img in | ||
| 269 | *gz) | ||
| 270 | echo "gzip decompressing image" | ||
| 271 | gunzip -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img | ||
| 272 | break | ||
| 273 | ;; | ||
| 274 | *lz4) | ||
| 275 | echo "lz4 decompressing image" | ||
| 276 | lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio | ||
| 277 | break | ||
| 278 | ;; | ||
| 279 | *lzo) | ||
| 280 | echo "lzo decompressing image" | ||
| 281 | lzop -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img | ||
| 282 | break | ||
| 283 | ;; | ||
| 284 | *lzma) | ||
| 285 | echo "lzma decompressing image" | ||
| 286 | lzma -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img | ||
| 287 | break | ||
| 288 | ;; | ||
| 289 | *xz) | ||
| 290 | echo "xz decompressing image" | ||
| 291 | xz -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img | ||
| 292 | break | ||
| 293 | ;; | ||
| 294 | *zst) | ||
| 295 | echo "zst decompressing image" | ||
| 296 | zstd -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img | ||
| 297 | break | ||
| 298 | ;; | ||
| 299 | esac | ||
| 300 | break | ||
| 301 | fi | ||
| 302 | done | ||
| 303 | # Verify that the above loop found a initramfs, fail otherwise | ||
| 304 | [ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz|.zst) for bundling; INITRAMFS_IMAGE_NAME might be wrong." | ||
| 305 | } | ||
| 306 | |||
| 307 | do_bundle_initramfs () { | ||
| 308 | if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then | ||
| 309 | echo "Creating a kernel image with a bundled initramfs..." | ||
| 310 | copy_initramfs | ||
| 311 | # Backing up kernel image relies on its type(regular file or symbolic link) | ||
| 312 | tmp_path="" | ||
| 313 | for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do | ||
| 314 | if [ -h ${KERNEL_OUTPUT_DIR}/$imageType ] ; then | ||
| 315 | linkpath=`readlink -n ${KERNEL_OUTPUT_DIR}/$imageType` | ||
| 316 | realpath=`readlink -fn ${KERNEL_OUTPUT_DIR}/$imageType` | ||
| 317 | mv -f $realpath $realpath.bak | ||
| 318 | tmp_path=$tmp_path" "$imageType"#"$linkpath"#"$realpath | ||
| 319 | elif [ -f ${KERNEL_OUTPUT_DIR}/$imageType ]; then | ||
| 320 | mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.bak | ||
| 321 | tmp_path=$tmp_path" "$imageType"##" | ||
| 322 | fi | ||
| 323 | done | ||
| 324 | use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio | ||
| 325 | kernel_do_compile | ||
| 326 | # Restoring kernel image | ||
| 327 | for tp in $tmp_path ; do | ||
| 328 | imageType=`echo $tp|cut -d "#" -f 1` | ||
| 329 | linkpath=`echo $tp|cut -d "#" -f 2` | ||
| 330 | realpath=`echo $tp|cut -d "#" -f 3` | ||
| 331 | if [ -n "$realpath" ]; then | ||
| 332 | mv -f $realpath $realpath.initramfs | ||
| 333 | mv -f $realpath.bak $realpath | ||
| 334 | ln -sf $linkpath.initramfs ${B}/${KERNEL_OUTPUT_DIR}/$imageType.initramfs | ||
| 335 | else | ||
| 336 | mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.initramfs | ||
| 337 | mv -f ${KERNEL_OUTPUT_DIR}/$imageType.bak ${KERNEL_OUTPUT_DIR}/$imageType | ||
| 338 | fi | ||
| 339 | done | ||
| 340 | fi | ||
| 341 | } | ||
| 342 | do_bundle_initramfs[dirs] = "${B}" | ||
| 343 | |||
| 344 | kernel_do_transform_bundled_initramfs() { | ||
| 345 | # vmlinux.gz is not built by kernel | ||
| 346 | if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then | ||
| 347 | gzip -9cn < ${KERNEL_OUTPUT_DIR}/vmlinux.initramfs > ${KERNEL_OUTPUT_DIR}/vmlinux.gz.initramfs | ||
| 348 | fi | ||
| 349 | } | ||
| 350 | do_transform_bundled_initramfs[dirs] = "${B}" | ||
| 351 | |||
| 352 | python do_package:prepend () { | ||
| 353 | d.setVar('STRIP', d.getVar('KERNEL_STRIP').strip()) | ||
| 354 | } | ||
| 355 | |||
| 356 | python do_devshell:prepend () { | ||
| 357 | os.environ["LDFLAGS"] = '' | ||
| 358 | } | ||
| 359 | |||
| 360 | addtask bundle_initramfs after do_install before do_deploy | ||
| 361 | |||
| 362 | KERNEL_DEBUG_TIMESTAMPS ??= "0" | ||
| 363 | |||
| 364 | kernel_do_compile() { | ||
| 365 | unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE | ||
| 366 | |||
| 367 | # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native) | ||
| 368 | export PKG_CONFIG_DIR="${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig" | ||
| 369 | export PKG_CONFIG_PATH="$PKG_CONFIG_DIR:${STAGING_DATADIR_NATIVE}/pkgconfig" | ||
| 370 | export PKG_CONFIG_LIBDIR="$PKG_CONFIG_DIR" | ||
| 371 | export PKG_CONFIG_SYSROOT_DIR="" | ||
| 372 | |||
| 373 | if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then | ||
| 374 | # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not | ||
| 375 | # be set.... | ||
| 376 | if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then | ||
| 377 | # The source directory is not necessarily a git repository, so we | ||
| 378 | # specify the git-dir to ensure that git does not query a | ||
| 379 | # repository in any parent directory. | ||
| 380 | SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"` | ||
| 381 | fi | ||
| 382 | |||
| 383 | ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH` | ||
| 384 | export KBUILD_BUILD_TIMESTAMP="$ts" | ||
| 385 | export KCONFIG_NOTIMESTAMP=1 | ||
| 386 | bbnote "KBUILD_BUILD_TIMESTAMP: $ts" | ||
| 387 | else | ||
| 388 | ts=`LC_ALL=C date` | ||
| 389 | export KBUILD_BUILD_TIMESTAMP="$ts" | ||
| 390 | bbnote "KBUILD_BUILD_TIMESTAMP: $ts" | ||
| 391 | fi | ||
| 392 | # The $use_alternate_initrd is only set from | ||
| 393 | # do_bundle_initramfs() This variable is specifically for the | ||
| 394 | # case where we are making a second pass at the kernel | ||
| 395 | # compilation and we want to force the kernel build to use a | ||
| 396 | # different initramfs image. The way to do that in the kernel | ||
| 397 | # is to specify: | ||
| 398 | # make ...args... CONFIG_INITRAMFS_SOURCE=some_other_initramfs.cpio | ||
| 399 | if [ "$use_alternate_initrd" = "" ] && [ "${INITRAMFS_TASK}" != "" ] ; then | ||
| 400 | # The old style way of copying an prebuilt image and building it | ||
| 401 | # is turned on via INTIRAMFS_TASK != "" | ||
| 402 | copy_initramfs | ||
| 403 | use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio | ||
| 404 | fi | ||
| 405 | for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do | ||
| 406 | oe_runmake ${PARALLEL_MAKE} ${typeformake} ${KERNEL_EXTRA_ARGS} $use_alternate_initrd | ||
| 407 | done | ||
| 408 | } | ||
| 409 | |||
| 410 | kernel_do_transform_kernel() { | ||
| 411 | # vmlinux.gz is not built by kernel | ||
| 412 | if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then | ||
| 413 | mkdir -p "${KERNEL_OUTPUT_DIR}" | ||
| 414 | gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz" | ||
| 415 | fi | ||
| 416 | } | ||
| 417 | do_transform_kernel[dirs] = "${B}" | ||
| 418 | addtask transform_kernel after do_compile before do_install | ||
| 419 | |||
| 420 | do_compile_kernelmodules() { | ||
| 421 | unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE | ||
| 422 | |||
| 423 | # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native) | ||
| 424 | export PKG_CONFIG_DIR="${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig" | ||
| 425 | export PKG_CONFIG_PATH="$PKG_CONFIG_DIR:${STAGING_DATADIR_NATIVE}/pkgconfig" | ||
| 426 | export PKG_CONFIG_LIBDIR="$PKG_CONFIG_DIR" | ||
| 427 | export PKG_CONFIG_SYSROOT_DIR="" | ||
| 428 | |||
| 429 | if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then | ||
| 430 | # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not | ||
| 431 | # be set.... | ||
| 432 | if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then | ||
| 433 | # The source directory is not necessarily a git repository, so we | ||
| 434 | # specify the git-dir to ensure that git does not query a | ||
| 435 | # repository in any parent directory. | ||
| 436 | SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"` | ||
| 437 | fi | ||
| 438 | |||
| 439 | ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH` | ||
| 440 | export KBUILD_BUILD_TIMESTAMP="$ts" | ||
| 441 | export KCONFIG_NOTIMESTAMP=1 | ||
| 442 | bbnote "KBUILD_BUILD_TIMESTAMP: $ts" | ||
| 443 | else | ||
| 444 | ts=`LC_ALL=C date` | ||
| 445 | export KBUILD_BUILD_TIMESTAMP="$ts" | ||
| 446 | bbnote "KBUILD_BUILD_TIMESTAMP: $ts" | ||
| 447 | fi | ||
| 448 | if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then | ||
| 449 | oe_runmake -C ${B} ${PARALLEL_MAKE} modules ${KERNEL_EXTRA_ARGS} | ||
| 450 | |||
| 451 | # Module.symvers gets updated during the | ||
| 452 | # building of the kernel modules. We need to | ||
| 453 | # update this in the shared workdir since some | ||
| 454 | # external kernel modules has a dependency on | ||
| 455 | # other kernel modules and will look at this | ||
| 456 | # file to do symbol lookups | ||
| 457 | cp ${B}/Module.symvers ${STAGING_KERNEL_BUILDDIR}/ | ||
| 458 | # 5.10+ kernels have module.lds that we need to copy for external module builds | ||
| 459 | if [ -e "${B}/scripts/module.lds" ]; then | ||
| 460 | install -Dm 0644 ${B}/scripts/module.lds ${STAGING_KERNEL_BUILDDIR}/scripts/module.lds | ||
| 461 | fi | ||
| 462 | else | ||
| 463 | bbnote "no modules to compile" | ||
| 464 | fi | ||
| 465 | } | ||
| 466 | addtask compile_kernelmodules after do_compile before do_strip | ||
| 467 | |||
| 468 | kernel_do_install() { | ||
| 469 | # | ||
| 470 | # First install the modules | ||
| 471 | # | ||
| 472 | unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE | ||
| 473 | if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then | ||
| 474 | oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install | ||
| 475 | rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build" | ||
| 476 | rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source" | ||
| 477 | # Remove empty module directories to prevent QA issues | ||
| 478 | [ -d "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel" ] && find "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel" -type d -empty -delete | ||
| 479 | else | ||
| 480 | bbnote "no modules to install" | ||
| 481 | fi | ||
| 482 | |||
| 483 | # | ||
| 484 | # Install various kernel output (zImage, map file, config, module support files) | ||
| 485 | # | ||
| 486 | install -d ${D}/${KERNEL_IMAGEDEST} | ||
| 487 | |||
| 488 | # | ||
| 489 | # bundle_initramfs runs after do_install before do_deploy. do_deploy does what's needed therefore. | ||
| 490 | # | ||
| 491 | for imageType in ${KERNEL_IMAGETYPES} ; do | ||
| 492 | if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then | ||
| 493 | install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION} | ||
| 494 | fi | ||
| 495 | done | ||
| 496 | |||
| 497 | install -m 0644 System.map ${D}/${KERNEL_IMAGEDEST}/System.map-${KERNEL_VERSION} | ||
| 498 | install -m 0644 .config ${D}/${KERNEL_IMAGEDEST}/config-${KERNEL_VERSION} | ||
| 499 | install -m 0644 vmlinux ${D}/${KERNEL_IMAGEDEST}/vmlinux-${KERNEL_VERSION} | ||
| 500 | ! [ -e Module.symvers ] || install -m 0644 Module.symvers ${D}/${KERNEL_IMAGEDEST}/Module.symvers-${KERNEL_VERSION} | ||
| 501 | } | ||
| 502 | |||
| 503 | # Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile | ||
| 504 | do_kernel_version_sanity_check() { | ||
| 505 | if [ "x${KERNEL_VERSION_SANITY_SKIP}" = "x1" ]; then | ||
| 506 | exit 0 | ||
| 507 | fi | ||
| 508 | |||
| 509 | # The Makefile determines the kernel version shown at runtime | ||
| 510 | # Don't use KERNEL_VERSION because the headers it grabs the version from aren't generated until do_compile | ||
| 511 | VERSION=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//) | ||
| 512 | PATCHLEVEL=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//) | ||
| 513 | SUBLEVEL=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//) | ||
| 514 | EXTRAVERSION=$(grep "^EXTRAVERSION =" ${S}/Makefile | sed s/.*=\ *//) | ||
| 515 | |||
| 516 | # Build a string for regex and a plain version string | ||
| 517 | reg="^${VERSION}\.${PATCHLEVEL}" | ||
| 518 | vers="${VERSION}.${PATCHLEVEL}" | ||
| 519 | if [ -n "${SUBLEVEL}" ]; then | ||
| 520 | # Ignoring a SUBLEVEL of zero is fine | ||
| 521 | if [ "${SUBLEVEL}" = "0" ]; then | ||
| 522 | reg="${reg}(\.${SUBLEVEL})?" | ||
| 523 | else | ||
| 524 | reg="${reg}\.${SUBLEVEL}" | ||
| 525 | vers="${vers}.${SUBLEVEL}" | ||
| 526 | fi | ||
| 527 | fi | ||
| 528 | vers="${vers}${EXTRAVERSION}" | ||
| 529 | reg="${reg}${EXTRAVERSION}" | ||
| 530 | |||
| 531 | if [ -z `echo ${PV} | grep -E "${reg}"` ]; then | ||
| 532 | bbfatal "Package Version (${PV}) does not match of kernel being built (${vers}). Please update the PV variable to match the kernel source or set KERNEL_VERSION_SANITY_SKIP=\"1\" in your recipe." | ||
| 533 | fi | ||
| 534 | exit 0 | ||
| 535 | } | ||
| 536 | |||
| 537 | addtask shared_workdir after do_compile before do_compile_kernelmodules | ||
| 538 | addtask shared_workdir_setscene | ||
| 539 | |||
| 540 | do_shared_workdir_setscene () { | ||
| 541 | exit 1 | ||
| 542 | } | ||
| 543 | |||
| 544 | emit_depmod_pkgdata() { | ||
| 545 | # Stash data for depmod | ||
| 546 | install -d ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/ | ||
| 547 | echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/${KERNEL_PACKAGE_NAME}-abiversion | ||
| 548 | cp ${B}/System.map ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/System.map-${KERNEL_VERSION} | ||
| 549 | } | ||
| 550 | |||
| 551 | PACKAGEFUNCS += "emit_depmod_pkgdata" | ||
| 552 | |||
| 553 | do_shared_workdir[cleandirs] += " ${STAGING_KERNEL_BUILDDIR}" | ||
| 554 | do_shared_workdir () { | ||
| 555 | cd ${B} | ||
| 556 | |||
| 557 | kerneldir=${STAGING_KERNEL_BUILDDIR} | ||
| 558 | install -d $kerneldir | ||
| 559 | |||
| 560 | # | ||
| 561 | # Store the kernel version in sysroots for module-base.bbclass | ||
| 562 | # | ||
| 563 | |||
| 564 | echo "${KERNEL_VERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-abiversion | ||
| 565 | echo "${KERNEL_LOCALVERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-localversion | ||
| 566 | |||
| 567 | # Copy files required for module builds | ||
| 568 | cp System.map $kerneldir/System.map-${KERNEL_VERSION} | ||
| 569 | ! [ -e Module.symvers ] || cp Module.symvers $kerneldir/ | ||
| 570 | cp .config $kerneldir/ | ||
| 571 | mkdir -p $kerneldir/include/config | ||
| 572 | cp include/config/kernel.release $kerneldir/include/config/kernel.release | ||
| 573 | if [ -e certs/signing_key.x509 ]; then | ||
| 574 | # The signing_key.* files are stored in the certs/ dir in | ||
| 575 | # newer Linux kernels | ||
| 576 | mkdir -p $kerneldir/certs | ||
| 577 | cp certs/signing_key.* $kerneldir/certs/ | ||
| 578 | elif [ -e signing_key.priv ]; then | ||
| 579 | cp signing_key.* $kerneldir/ | ||
| 580 | fi | ||
| 581 | |||
| 582 | # We can also copy over all the generated files and avoid special cases | ||
| 583 | # like version.h, but we've opted to keep this small until file creep starts | ||
| 584 | # to happen | ||
| 585 | if [ -e include/linux/version.h ]; then | ||
| 586 | mkdir -p $kerneldir/include/linux | ||
| 587 | cp include/linux/version.h $kerneldir/include/linux/version.h | ||
| 588 | fi | ||
| 589 | |||
| 590 | # As of Linux kernel version 3.0.1, the clean target removes | ||
| 591 | # arch/powerpc/lib/crtsavres.o which is present in | ||
| 592 | # KBUILD_LDFLAGS_MODULE, making it required to build external modules. | ||
| 593 | if [ ${ARCH} = "powerpc" ]; then | ||
| 594 | if [ -e arch/powerpc/lib/crtsavres.o ]; then | ||
| 595 | mkdir -p $kerneldir/arch/powerpc/lib/ | ||
| 596 | cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o | ||
| 597 | fi | ||
| 598 | fi | ||
| 599 | |||
| 600 | if [ -d include/generated ]; then | ||
| 601 | mkdir -p $kerneldir/include/generated/ | ||
| 602 | cp -fR include/generated/* $kerneldir/include/generated/ | ||
| 603 | fi | ||
| 604 | |||
| 605 | if [ -d arch/${ARCH}/include/generated ]; then | ||
| 606 | mkdir -p $kerneldir/arch/${ARCH}/include/generated/ | ||
| 607 | cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/ | ||
| 608 | fi | ||
| 609 | |||
| 610 | if (grep -q -i -e '^CONFIG_UNWINDER_ORC=y$' $kerneldir/.config); then | ||
| 611 | # With CONFIG_UNWINDER_ORC (the default in 4.14), objtool is required for | ||
| 612 | # out-of-tree modules to be able to generate object files. | ||
| 613 | if [ -x tools/objtool/objtool ]; then | ||
| 614 | mkdir -p ${kerneldir}/tools/objtool | ||
| 615 | cp tools/objtool/objtool ${kerneldir}/tools/objtool/ | ||
| 616 | fi | ||
| 617 | fi | ||
| 618 | |||
| 619 | # When building with CONFIG_MODVERSIONS=y and CONFIG_RANDSTRUCT=y we need | ||
| 620 | # to copy the build assets generated for the randstruct seed to | ||
| 621 | # STAGING_KERNEL_BUILDDIR, otherwise the out-of-tree modules build will | ||
| 622 | # generate those assets which will result in a different | ||
| 623 | # RANDSTRUCT_HASHED_SEED | ||
| 624 | if [ -d scripts/basic ]; then | ||
| 625 | mkdir -p ${kerneldir}/scripts | ||
| 626 | cp -r scripts/basic ${kerneldir}/scripts | ||
| 627 | fi | ||
| 628 | |||
| 629 | if [ -d scripts/gcc-plugins ]; then | ||
| 630 | mkdir -p ${kerneldir}/scripts | ||
| 631 | cp -r scripts/gcc-plugins ${kerneldir}/scripts | ||
| 632 | fi | ||
| 633 | |||
| 634 | } | ||
| 635 | |||
| 636 | # We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware | ||
| 637 | SYSROOT_DIRS = "" | ||
| 638 | |||
| 639 | KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} O=${B} olddefconfig || oe_runmake -C ${S} O=${B} oldnoconfig" | ||
| 640 | |||
| 641 | python check_oldest_kernel() { | ||
| 642 | oldest_kernel = d.getVar('OLDEST_KERNEL') | ||
| 643 | kernel_version = d.getVar('KERNEL_VERSION') | ||
| 644 | tclibc = d.getVar('TCLIBC') | ||
| 645 | if tclibc == 'glibc': | ||
| 646 | kernel_version = kernel_version.split('-', 1)[0] | ||
| 647 | if oldest_kernel and kernel_version: | ||
| 648 | if bb.utils.vercmp_string(kernel_version, oldest_kernel) < 0: | ||
| 649 | bb.warn('%s: OLDEST_KERNEL is "%s" but the version of the kernel you are building is "%s" - therefore %s as built may not be compatible with this kernel. Either set OLDEST_KERNEL to an older version, or build a newer kernel.' % (d.getVar('PN'), oldest_kernel, kernel_version, tclibc)) | ||
| 650 | } | ||
| 651 | |||
| 652 | check_oldest_kernel[vardepsexclude] += "OLDEST_KERNEL KERNEL_VERSION" | ||
| 653 | do_compile[postfuncs] += "check_oldest_kernel" | ||
| 654 | |||
| 655 | KERNEL_LOCALVERSION ??= "" | ||
| 656 | |||
| 657 | # 6.3+ requires the variable LOCALVERSION to be set to not get a "+" in | ||
| 658 | # the local version. Having it empty means nothing will be added, and any | ||
| 659 | # value will be appended to the local kernel version. This replaces the | ||
| 660 | # use of .scmversion file for setting a localversion without using | ||
| 661 | # the CONFIG_LOCALVERSION option. | ||
| 662 | # | ||
| 663 | # Note: This class saves the value of localversion to a file | ||
| 664 | # so other recipes like make-mod-scripts can restore it via the | ||
| 665 | # helper function get_kernellocalversion_file | ||
| 666 | export LOCALVERSION = "${KERNEL_LOCALVERSION}" | ||
| 667 | |||
| 668 | kernel_do_configure() { | ||
| 669 | # fixes extra + in /lib/modules/2.6.37+ | ||
| 670 | # $ scripts/setlocalversion . => + | ||
| 671 | # $ make kernelversion => 2.6.37 | ||
| 672 | # $ make kernelrelease => 2.6.37+ | ||
| 673 | # See kernel-arch.bbclass for post v6.3 removal of the extra | ||
| 674 | # + in localversion. .scmversion is no longer used, and the | ||
| 675 | # variable LOCALVERSION must be used | ||
| 676 | if [ ! -e ${B}/.scmversion -a ! -e ${S}/.scmversion ]; then | ||
| 677 | echo ${KERNEL_LOCALVERSION} > ${B}/.scmversion | ||
| 678 | echo ${KERNEL_LOCALVERSION} > ${S}/.scmversion | ||
| 679 | fi | ||
| 680 | |||
| 681 | if [ "${S}" != "${B}" ] && [ -f "${S}/.config" ] && [ ! -f "${B}/.config" ]; then | ||
| 682 | mv "${S}/.config" "${B}/.config" | ||
| 683 | fi | ||
| 684 | |||
| 685 | # Copy defconfig to .config if .config does not exist. This allows | ||
| 686 | # recipes to manage the .config themselves in do_configure:prepend(). | ||
| 687 | if [ -f "${UNPACKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then | ||
| 688 | cp "${UNPACKDIR}/defconfig" "${B}/.config" | ||
| 689 | fi | ||
| 690 | |||
| 691 | ${KERNEL_CONFIG_COMMAND} | ||
| 692 | } | ||
| 693 | |||
| 694 | inherit cml1 pkgconfig | ||
| 695 | |||
| 696 | EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure | ||
| 697 | |||
| 698 | # kernel-base becomes kernel-${KERNEL_VERSION} | ||
| 699 | # kernel-image becomes kernel-image-${KERNEL_VERSION} | ||
| 700 | PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules ${KERNEL_PACKAGE_NAME}-dbg" | ||
| 701 | FILES:${PN} = "" | ||
| 702 | FILES:${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo" | ||
| 703 | FILES:${KERNEL_PACKAGE_NAME}-image = "" | ||
| 704 | FILES:${KERNEL_PACKAGE_NAME}-dev = "/${KERNEL_IMAGEDEST}/System.map* /${KERNEL_IMAGEDEST}/Module.symvers* /${KERNEL_IMAGEDEST}/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build" | ||
| 705 | FILES:${KERNEL_PACKAGE_NAME}-vmlinux = "/${KERNEL_IMAGEDEST}/vmlinux-${KERNEL_VERSION_NAME}" | ||
| 706 | FILES:${KERNEL_PACKAGE_NAME}-modules = "" | ||
| 707 | FILES:${KERNEL_PACKAGE_NAME}-dbg = "/usr/lib/debug /usr/src/debug" | ||
| 708 | RDEPENDS:${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base (= ${EXTENDPKGV})" | ||
| 709 | # Allow machines to override this dependency if kernel image files are | ||
| 710 | # not wanted in images as standard | ||
| 711 | RRECOMMENDS:${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image (= ${EXTENDPKGV})" | ||
| 712 | PKG:${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}" | ||
| 713 | RPROVIDES:${KERNEL_PACKAGE_NAME}-image += "${KERNEL_PACKAGE_NAME}-image" | ||
| 714 | RDEPENDS:${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux (= ${EXTENDPKGV})', '', d)}" | ||
| 715 | PKG:${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}" | ||
| 716 | RPROVIDES:${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION} ${KERNEL_PACKAGE_NAME}-base" | ||
| 717 | ALLOW_EMPTY:${KERNEL_PACKAGE_NAME} = "1" | ||
| 718 | ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-base = "1" | ||
| 719 | ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-image = "1" | ||
| 720 | ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-modules = "1" | ||
| 721 | DESCRIPTION:${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package" | ||
| 722 | |||
| 723 | pkg_postinst:${KERNEL_PACKAGE_NAME}-base () { | ||
| 724 | if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then | ||
| 725 | mkdir -p $D/lib/modules/${KERNEL_VERSION} | ||
| 726 | fi | ||
| 727 | if [ -n "$D" ]; then | ||
| 728 | depmodwrapper -a -b $D ${KERNEL_VERSION} ${KERNEL_PACKAGE_NAME} | ||
| 729 | else | ||
| 730 | depmod -a ${KERNEL_VERSION} | ||
| 731 | fi | ||
| 732 | } | ||
| 733 | |||
| 734 | PACKAGESPLITFUNCS =+ "split_kernel_packages" | ||
| 735 | |||
| 736 | python split_kernel_packages () { | ||
| 737 | do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex=r'^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='') | ||
| 738 | } | ||
| 739 | |||
| 740 | # Many scripts want to look in arch/$arch/boot for the bootable | ||
| 741 | # image. This poses a problem for vmlinux and vmlinuz based | ||
| 742 | # booting. This task arranges to have vmlinux and vmlinuz appear | ||
| 743 | # in the normalized directory location. | ||
| 744 | do_kernel_link_images() { | ||
| 745 | if [ ! -d "${B}/arch/${ARCH}/boot" ]; then | ||
| 746 | mkdir ${B}/arch/${ARCH}/boot | ||
| 747 | fi | ||
| 748 | cd ${B}/arch/${ARCH}/boot | ||
| 749 | ln -sf ../../../vmlinux | ||
| 750 | if [ -f ../../../vmlinuz ]; then | ||
| 751 | ln -sf ../../../vmlinuz | ||
| 752 | fi | ||
| 753 | if [ -f ../../../vmlinuz.bin ]; then | ||
| 754 | ln -sf ../../../vmlinuz.bin | ||
| 755 | fi | ||
| 756 | if [ -f ../../../vmlinux.64 ]; then | ||
| 757 | ln -sf ../../../vmlinux.64 | ||
| 758 | fi | ||
| 759 | } | ||
| 760 | addtask kernel_link_images after do_compile before do_strip | ||
| 761 | |||
| 762 | python do_strip() { | ||
| 763 | import shutil | ||
| 764 | |||
| 765 | strip = d.getVar('KERNEL_STRIP') | ||
| 766 | extra_sections = d.getVar('KERNEL_IMAGE_STRIP_EXTRA_SECTIONS') | ||
| 767 | kernel_image = d.getVar('B') + "/" + d.getVar('KERNEL_OUTPUT_DIR') + "/vmlinux" | ||
| 768 | |||
| 769 | if (extra_sections and kernel_image.find(d.getVar('KERNEL_IMAGEDEST') + '/vmlinux') != -1): | ||
| 770 | kernel_image_stripped = kernel_image + ".stripped" | ||
| 771 | shutil.copy2(kernel_image, kernel_image_stripped) | ||
| 772 | oe.package.runstrip((kernel_image_stripped, 8, strip, extra_sections)) | ||
| 773 | bb.debug(1, "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections: " + \ | ||
| 774 | extra_sections) | ||
| 775 | } | ||
| 776 | do_strip[dirs] = "${B}" | ||
| 777 | |||
| 778 | addtask strip before do_sizecheck after do_kernel_link_images | ||
| 779 | |||
| 780 | # Support checking the kernel size since some kernels need to reside in partitions | ||
| 781 | # with a fixed length or there is a limit in transferring the kernel to memory. | ||
| 782 | # If more than one image type is enabled, warn on any that don't fit but only fail | ||
| 783 | # if none fit. | ||
| 784 | do_sizecheck() { | ||
| 785 | if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then | ||
| 786 | invalid=`echo ${KERNEL_IMAGE_MAXSIZE} | sed 's/[0-9]//g'` | ||
| 787 | if [ -n "$invalid" ]; then | ||
| 788 | die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integer (The unit is Kbytes)" | ||
| 789 | fi | ||
| 790 | at_least_one_fits= | ||
| 791 | for imageType in ${KERNEL_IMAGETYPES} ; do | ||
| 792 | size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'` | ||
| 793 | if [ $size -gt ${KERNEL_IMAGE_MAXSIZE} ]; then | ||
| 794 | bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device." | ||
| 795 | else | ||
| 796 | at_least_one_fits=y | ||
| 797 | fi | ||
| 798 | done | ||
| 799 | if [ -z "$at_least_one_fits" ]; then | ||
| 800 | die "All kernel images are too big for your device. Please reduce the size of the kernel by making more of it modular." | ||
| 801 | fi | ||
| 802 | fi | ||
| 803 | } | ||
| 804 | do_sizecheck[dirs] = "${B}" | ||
| 805 | |||
| 806 | addtask sizecheck before do_install after do_strip | ||
| 807 | |||
| 808 | inherit kernel-artifact-names | ||
| 809 | |||
| 810 | kernel_do_deploy() { | ||
| 811 | deployDir="${DEPLOYDIR}" | ||
| 812 | if [ -n "${KERNEL_DEPLOYSUBDIR}" ]; then | ||
| 813 | deployDir="${DEPLOYDIR}/${KERNEL_DEPLOYSUBDIR}" | ||
| 814 | mkdir "$deployDir" | ||
| 815 | fi | ||
| 816 | |||
| 817 | for imageType in ${KERNEL_IMAGETYPES} ; do | ||
| 818 | baseName=$imageType-${KERNEL_IMAGE_NAME} | ||
| 819 | |||
| 820 | if [ -s ${KERNEL_OUTPUT_DIR}/$imageType.stripped ] ; then | ||
| 821 | install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.stripped $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT} | ||
| 822 | else | ||
| 823 | install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT} | ||
| 824 | fi | ||
| 825 | if [ -n "${KERNEL_IMAGE_LINK_NAME}" ] ; then | ||
| 826 | ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${KERNEL_IMAGE_LINK_NAME}${KERNEL_IMAGE_BIN_EXT} | ||
| 827 | fi | ||
| 828 | if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then | ||
| 829 | ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType | ||
| 830 | fi | ||
| 831 | done | ||
| 832 | |||
| 833 | if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then | ||
| 834 | mkdir -p ${D}${root_prefix}/lib | ||
| 835 | if [ -n "${SOURCE_DATE_EPOCH}" ]; then | ||
| 836 | TAR_ARGS="--sort=name --clamp-mtime --mtime=@${SOURCE_DATE_EPOCH}" | ||
| 837 | else | ||
| 838 | TAR_ARGS="" | ||
| 839 | fi | ||
| 840 | TAR_ARGS="$TAR_ARGS --owner=0 --group=0" | ||
| 841 | tar $TAR_ARGS -cv -C ${D}${root_prefix} lib | gzip -9n > $deployDir/modules-${MODULE_TARBALL_NAME}.tgz | ||
| 842 | |||
| 843 | if [ -n "${MODULE_TARBALL_LINK_NAME}" ] ; then | ||
| 844 | ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz | ||
| 845 | fi | ||
| 846 | fi | ||
| 847 | |||
| 848 | if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then | ||
| 849 | for imageType in ${KERNEL_IMAGETYPES} ; do | ||
| 850 | initramfsBaseName=$imageType-${INITRAMFS_NAME} | ||
| 851 | install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName${KERNEL_IMAGE_BIN_EXT} | ||
| 852 | if [ -n "${INITRAMFS_LINK_NAME}" ] ; then | ||
| 853 | ln -sf $initramfsBaseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${INITRAMFS_LINK_NAME}${KERNEL_IMAGE_BIN_EXT} | ||
| 854 | fi | ||
| 855 | done | ||
| 856 | fi | ||
| 857 | } | ||
| 858 | |||
| 859 | # We deploy to filenames that include PKGV and PKGR, read the saved data to | ||
| 860 | # ensure we get the right values for both | ||
| 861 | do_deploy[prefuncs] += "read_subpackage_metadata" | ||
| 862 | |||
| 863 | addtask deploy after do_populate_sysroot do_packagedata | ||
| 864 | |||
| 865 | EXPORT_FUNCTIONS do_deploy | ||
| 866 | |||
| 867 | # Add using Device Tree support | ||
| 868 | inherit kernel-devicetree | ||
diff --git a/meta/classes-recipe/kernelsrc.bbclass b/meta/classes-recipe/kernelsrc.bbclass deleted file mode 100644 index 9336184298..0000000000 --- a/meta/classes-recipe/kernelsrc.bbclass +++ /dev/null | |||
| @@ -1,21 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | S = "${STAGING_KERNEL_DIR}" | ||
| 8 | deltask do_fetch | ||
| 9 | deltask do_unpack | ||
| 10 | do_patch[depends] += "virtual/kernel:do_shared_workdir" | ||
| 11 | do_patch[noexec] = "1" | ||
| 12 | do_package[depends] += "virtual/kernel:do_populate_sysroot" | ||
| 13 | KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}" | ||
| 14 | LOCAL_VERSION = "${@get_kernellocalversion_file("${STAGING_KERNEL_BUILDDIR}")}" | ||
| 15 | |||
| 16 | inherit linux-kernel-base | ||
| 17 | |||
| 18 | # The final packages get the kernel version instead of the default 1.0 | ||
| 19 | python do_package:prepend() { | ||
| 20 | d.setVar('PKGV', d.getVar("KERNEL_VERSION").split("-")[0]) | ||
| 21 | } | ||
diff --git a/meta/classes-recipe/lib_package.bbclass b/meta/classes-recipe/lib_package.bbclass deleted file mode 100644 index 6d110155e5..0000000000 --- a/meta/classes-recipe/lib_package.bbclass +++ /dev/null | |||
| @@ -1,12 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | # | ||
| 7 | # ${PN}-bin is defined in bitbake.conf | ||
| 8 | # | ||
| 9 | # We need to allow the other packages to be greedy with what they | ||
| 10 | # want out of /usr/bin and /usr/sbin before ${PN}-bin gets greedy. | ||
| 11 | # | ||
| 12 | PACKAGE_BEFORE_PN = "${PN}-bin" | ||
diff --git a/meta/classes-recipe/libc-package.bbclass b/meta/classes-recipe/libc-package.bbclass deleted file mode 100644 index c06a2ce90a..0000000000 --- a/meta/classes-recipe/libc-package.bbclass +++ /dev/null | |||
| @@ -1,392 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains | ||
| 9 | # may need packaging and its pointless to duplicate this code. | ||
| 10 | # | ||
| 11 | # Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of: | ||
| 12 | # "compile" - Use QEMU to generate the binary locale files | ||
| 13 | # "precompiled" - The binary locale files are pregenerated and already present | ||
| 14 | # "ondevice" - The device will build the locale files upon first boot through the postinst | ||
| 15 | |||
| 16 | GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice" | ||
| 17 | |||
| 18 | GLIBC_SPLIT_LC_PACKAGES ?= "0" | ||
| 19 | |||
| 20 | python __anonymous () { | ||
| 21 | enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION") | ||
| 22 | |||
| 23 | pn = d.getVar("PN") | ||
| 24 | if pn.endswith("-initial"): | ||
| 25 | enabled = False | ||
| 26 | |||
| 27 | if enabled and int(enabled): | ||
| 28 | import re | ||
| 29 | |||
| 30 | target_arch = d.getVar("TARGET_ARCH") | ||
| 31 | binary_arches = d.getVar("BINARY_LOCALE_ARCHES") or "" | ||
| 32 | use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or "" | ||
| 33 | |||
| 34 | for regexp in binary_arches.split(" "): | ||
| 35 | r = re.compile(regexp) | ||
| 36 | |||
| 37 | if r.match(target_arch): | ||
| 38 | depends = d.getVar("DEPENDS") | ||
| 39 | if use_cross_localedef == "1" : | ||
| 40 | depends = "%s cross-localedef-native" % depends | ||
| 41 | else: | ||
| 42 | depends = "%s qemu-native" % depends | ||
| 43 | d.setVar("DEPENDS", depends) | ||
| 44 | d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile") | ||
| 45 | break | ||
| 46 | } | ||
| 47 | |||
| 48 | # try to fix disable charsets/locales/locale-code compile fail | ||
| 49 | PACKAGE_NO_GCONV ?= "0" | ||
| 50 | |||
| 51 | OVERRIDES:append = ":${TARGET_ARCH}-${TARGET_OS}" | ||
| 52 | |||
| 53 | locale_base_postinst_ontarget() { | ||
| 54 | mkdir ${libdir}/locale | ||
| 55 | localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s | ||
| 56 | } | ||
| 57 | |||
| 58 | locale_base_postrm() { | ||
| 59 | #!/bin/sh | ||
| 60 | localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s %s | ||
| 61 | } | ||
| 62 | |||
| 63 | LOCALETREESRC ?= "${PKGD}" | ||
| 64 | |||
| 65 | do_prep_locale_tree() { | ||
| 66 | treedir=${WORKDIR}/locale-tree | ||
| 67 | rm -rf $treedir | ||
| 68 | mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${localedir} | ||
| 69 | tar -cf - -C ${LOCALETREESRC}${datadir} -p i18n | tar -xf - -C $treedir/${datadir} | ||
| 70 | # unzip to avoid parsing errors | ||
| 71 | for i in $treedir/${datadir}/i18n/charmaps/*gz; do | ||
| 72 | gunzip $i | ||
| 73 | done | ||
| 74 | # The extract pattern "./l*.so*" is carefully selected so that it will | ||
| 75 | # match ld*.so and lib*.so*, but not any files in the gconv directory | ||
| 76 | # (if it exists). This makes sure we only unpack the files we need. | ||
| 77 | # This is important in case usrmerge is set in DISTRO_FEATURES, which | ||
| 78 | # means ${base_libdir} == ${libdir}. | ||
| 79 | tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir} --wildcards './l*.so*' | ||
| 80 | if [ -f ${STAGING_LIBDIR_NATIVE}/libgcc_s.* ]; then | ||
| 81 | tar -cf - -C ${STAGING_LIBDIR_NATIVE} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir} | ||
| 82 | fi | ||
| 83 | install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir} | ||
| 84 | } | ||
| 85 | |||
| 86 | do_collect_bins_from_locale_tree() { | ||
| 87 | treedir=${WORKDIR}/locale-tree | ||
| 88 | |||
| 89 | parent=$(dirname ${localedir}) | ||
| 90 | mkdir -p ${PKGD}/$parent | ||
| 91 | tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent | ||
| 92 | |||
| 93 | # Finalize tree by chaning all duplicate files into hard links | ||
| 94 | cross-localedef-hardlink -c -v ${WORKDIR}/locale-tree | ||
| 95 | } | ||
| 96 | |||
| 97 | inherit qemu | ||
| 98 | |||
| 99 | python package_do_split_gconvs () { | ||
| 100 | import re | ||
| 101 | if (d.getVar('PACKAGE_NO_GCONV') == '1'): | ||
| 102 | bb.note("package requested not splitting gconvs") | ||
| 103 | return | ||
| 104 | |||
| 105 | if not d.getVar('PACKAGES'): | ||
| 106 | return | ||
| 107 | |||
| 108 | mlprefix = d.getVar("MLPREFIX") or "" | ||
| 109 | |||
| 110 | bpn = d.getVar('BPN') | ||
| 111 | libdir = d.getVar('libdir') | ||
| 112 | if not libdir: | ||
| 113 | bb.error("libdir not defined") | ||
| 114 | return | ||
| 115 | datadir = d.getVar('datadir') | ||
| 116 | if not datadir: | ||
| 117 | bb.error("datadir not defined") | ||
| 118 | return | ||
| 119 | |||
| 120 | gconv_libdir = oe.path.join(libdir, "gconv") | ||
| 121 | charmap_dir = oe.path.join(datadir, "i18n", "charmaps") | ||
| 122 | locales_dir = oe.path.join(datadir, "i18n", "locales") | ||
| 123 | binary_locales_dir = d.getVar('localedir') | ||
| 124 | |||
| 125 | def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group): | ||
| 126 | deps = [] | ||
| 127 | f = open(fn, "rb") | ||
| 128 | c_re = re.compile(r'^copy "(.*)"') | ||
| 129 | i_re = re.compile(r'^include "(\w+)".*') | ||
| 130 | for l in f.readlines(): | ||
| 131 | l = l.decode("latin-1") | ||
| 132 | m = c_re.match(l) or i_re.match(l) | ||
| 133 | if m: | ||
| 134 | dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1))) | ||
| 135 | if not dp in deps: | ||
| 136 | deps.append(dp) | ||
| 137 | f.close() | ||
| 138 | if deps != []: | ||
| 139 | d.setVar('RDEPENDS:%s' % pkg, " ".join(deps)) | ||
| 140 | if bpn != 'glibc': | ||
| 141 | d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc')) | ||
| 142 | |||
| 143 | do_split_packages(d, gconv_libdir, file_regex=r'^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \ | ||
| 144 | description='gconv module for character set %s', hook=calc_gconv_deps, \ | ||
| 145 | extra_depends=bpn+'-gconv') | ||
| 146 | |||
| 147 | def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group): | ||
| 148 | deps = [] | ||
| 149 | f = open(fn, "rb") | ||
| 150 | c_re = re.compile(r'^copy "(.*)"') | ||
| 151 | i_re = re.compile(r'^include "(\w+)".*') | ||
| 152 | for l in f.readlines(): | ||
| 153 | l = l.decode("latin-1") | ||
| 154 | m = c_re.match(l) or i_re.match(l) | ||
| 155 | if m: | ||
| 156 | dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1))) | ||
| 157 | if not dp in deps: | ||
| 158 | deps.append(dp) | ||
| 159 | f.close() | ||
| 160 | if deps != []: | ||
| 161 | d.setVar('RDEPENDS:%s' % pkg, " ".join(deps)) | ||
| 162 | if bpn != 'glibc': | ||
| 163 | d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc')) | ||
| 164 | |||
| 165 | do_split_packages(d, charmap_dir, file_regex=r'^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \ | ||
| 166 | description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='') | ||
| 167 | |||
| 168 | def calc_locale_deps(fn, pkg, file_regex, output_pattern, group): | ||
| 169 | deps = [] | ||
| 170 | f = open(fn, "rb") | ||
| 171 | c_re = re.compile(r'^copy "(.*)"') | ||
| 172 | i_re = re.compile(r'^include "(\w+)".*') | ||
| 173 | for l in f.readlines(): | ||
| 174 | l = l.decode("latin-1") | ||
| 175 | m = c_re.match(l) or i_re.match(l) | ||
| 176 | if m: | ||
| 177 | dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1)) | ||
| 178 | if not dp in deps: | ||
| 179 | deps.append(dp) | ||
| 180 | f.close() | ||
| 181 | if deps != []: | ||
| 182 | d.setVar('RDEPENDS:%s' % pkg, " ".join(deps)) | ||
| 183 | if bpn != 'glibc': | ||
| 184 | d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc')) | ||
| 185 | |||
| 186 | do_split_packages(d, locales_dir, file_regex=r'(.*)', output_pattern=bpn+'-localedata-%s', \ | ||
| 187 | description='locale definition for %s', hook=calc_locale_deps, extra_depends='') | ||
| 188 | d.setVar('PACKAGES', d.getVar('PACKAGES', False) + ' ' + d.getVar('MLPREFIX', False) + bpn + '-gconv') | ||
| 189 | |||
| 190 | use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE") | ||
| 191 | |||
| 192 | dot_re = re.compile(r"(.*)\.(.*)") | ||
| 193 | |||
| 194 | # Read in supported locales and associated encodings | ||
| 195 | supported = {} | ||
| 196 | with open(oe.path.join(d.getVar('WORKDIR'), "SUPPORTED")) as f: | ||
| 197 | for line in f.readlines(): | ||
| 198 | try: | ||
| 199 | locale, charset = line.rstrip().split() | ||
| 200 | except ValueError: | ||
| 201 | continue | ||
| 202 | supported[locale] = charset | ||
| 203 | |||
| 204 | # GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales | ||
| 205 | to_generate = d.getVar('GLIBC_GENERATE_LOCALES') | ||
| 206 | if not to_generate or to_generate == 'all': | ||
| 207 | to_generate = sorted(supported.keys()) | ||
| 208 | else: | ||
| 209 | to_generate = to_generate.split() | ||
| 210 | for locale in to_generate: | ||
| 211 | if locale not in supported: | ||
| 212 | if '.' in locale: | ||
| 213 | charset = locale.split('.')[1] | ||
| 214 | else: | ||
| 215 | charset = 'UTF-8' | ||
| 216 | bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset)) | ||
| 217 | supported[locale] = charset | ||
| 218 | |||
| 219 | def output_locale_source(name, pkgname, locale, encoding): | ||
| 220 | d.setVar('RDEPENDS:%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \ | ||
| 221 | (mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding))) | ||
| 222 | d.setVar('pkg_postinst_ontarget:%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \ | ||
| 223 | % (locale, encoding, locale)) | ||
| 224 | d.setVar('pkg_postrm:%s' % pkgname, d.getVar('locale_base_postrm') % \ | ||
| 225 | (locale, encoding, locale)) | ||
| 226 | |||
| 227 | def output_locale_binary_rdepends(name, pkgname, locale, encoding): | ||
| 228 | dep = legitimize_package_name('%s-binary-localedata-%s' % (bpn, name)) | ||
| 229 | lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES') | ||
| 230 | if lcsplit and int(lcsplit): | ||
| 231 | d.appendVar('PACKAGES', ' ' + dep) | ||
| 232 | d.setVar('ALLOW_EMPTY:%s' % dep, '1') | ||
| 233 | d.setVar('RDEPENDS:%s' % pkgname, mlprefix + dep) | ||
| 234 | |||
| 235 | commands = {} | ||
| 236 | |||
| 237 | def output_locale_binary(name, pkgname, locale, encoding): | ||
| 238 | treedir = oe.path.join(d.getVar("WORKDIR"), "locale-tree") | ||
| 239 | ldlibdir = oe.path.join(treedir, d.getVar("base_libdir")) | ||
| 240 | path = d.getVar("PATH") | ||
| 241 | i18npath = oe.path.join(treedir, datadir, "i18n") | ||
| 242 | gconvpath = oe.path.join(treedir, "iconvdata") | ||
| 243 | outputpath = oe.path.join(treedir, binary_locales_dir) | ||
| 244 | |||
| 245 | use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or "0" | ||
| 246 | if use_cross_localedef == "1": | ||
| 247 | target_arch = d.getVar('TARGET_ARCH') | ||
| 248 | locale_arch_options = { \ | ||
| 249 | "arc": " --uint32-align=4 --little-endian ", \ | ||
| 250 | "arceb": " --uint32-align=4 --big-endian ", \ | ||
| 251 | "arm": " --uint32-align=4 --little-endian ", \ | ||
| 252 | "armeb": " --uint32-align=4 --big-endian ", \ | ||
| 253 | "aarch64": " --uint32-align=4 --little-endian ", \ | ||
| 254 | "aarch64_be": " --uint32-align=4 --big-endian ", \ | ||
| 255 | "sh4": " --uint32-align=4 --big-endian ", \ | ||
| 256 | "powerpc": " --uint32-align=4 --big-endian ", \ | ||
| 257 | "powerpc64": " --uint32-align=4 --big-endian ", \ | ||
| 258 | "powerpc64le": " --uint32-align=4 --little-endian ", \ | ||
| 259 | "mips": " --uint32-align=4 --big-endian ", \ | ||
| 260 | "mipsisa32r6": " --uint32-align=4 --big-endian ", \ | ||
| 261 | "mips64": " --uint32-align=4 --big-endian ", \ | ||
| 262 | "mipsisa64r6": " --uint32-align=4 --big-endian ", \ | ||
| 263 | "mipsel": " --uint32-align=4 --little-endian ", \ | ||
| 264 | "mipsisa32r6el": " --uint32-align=4 --little-endian ", \ | ||
| 265 | "mips64el":" --uint32-align=4 --little-endian ", \ | ||
| 266 | "mipsisa64r6el":" --uint32-align=4 --little-endian ", \ | ||
| 267 | "riscv64": " --uint32-align=4 --little-endian ", \ | ||
| 268 | "riscv32": " --uint32-align=4 --little-endian ", \ | ||
| 269 | "i586": " --uint32-align=4 --little-endian ", \ | ||
| 270 | "i686": " --uint32-align=4 --little-endian ", \ | ||
| 271 | "x86_64": " --uint32-align=4 --little-endian ", \ | ||
| 272 | "loongarch64": " --uint32-align=4 --little-endian " } | ||
| 273 | |||
| 274 | if target_arch in locale_arch_options: | ||
| 275 | localedef_opts = locale_arch_options[target_arch] | ||
| 276 | else: | ||
| 277 | bb.error("locale_arch_options not found for target_arch=" + target_arch) | ||
| 278 | bb.fatal("unknown arch:" + target_arch + " for locale_arch_options") | ||
| 279 | |||
| 280 | localedef_opts += " --force --no-hard-links --no-archive --prefix=%s \ | ||
| 281 | --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s --no-warnings=ascii" \ | ||
| 282 | % (treedir, treedir, datadir, locale, encoding, outputpath, name) | ||
| 283 | |||
| 284 | cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \ | ||
| 285 | (path, i18npath, gconvpath, localedef_opts) | ||
| 286 | else: # earlier slower qemu way | ||
| 287 | qemu = qemu_target_binary(d) | ||
| 288 | localedef_opts = "--force --no-hard-links --no-archive --prefix=%s \ | ||
| 289 | --inputfile=%s/i18n/locales/%s --charmap=%s %s" \ | ||
| 290 | % (treedir, datadir, locale, encoding, name) | ||
| 291 | |||
| 292 | qemu_options = d.getVar('QEMU_OPTIONS') | ||
| 293 | |||
| 294 | cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \ | ||
| 295 | -E LD_LIBRARY_PATH=%s %s %s${base_bindir}/localedef %s" % \ | ||
| 296 | (path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts) | ||
| 297 | |||
| 298 | commands["%s/%s" % (outputpath, name)] = cmd | ||
| 299 | |||
| 300 | bb.note("generating locale %s (%s)" % (locale, encoding)) | ||
| 301 | |||
| 302 | def output_locale(name, locale, encoding): | ||
| 303 | pkgname = d.getVar('MLPREFIX', False) + 'locale-base-' + legitimize_package_name(name) | ||
| 304 | d.setVar('ALLOW_EMPTY:%s' % pkgname, '1') | ||
| 305 | d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES'))) | ||
| 306 | rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name)) | ||
| 307 | m = re.match(r"(.*)_(.*)", name) | ||
| 308 | if m: | ||
| 309 | rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1)) | ||
| 310 | d.setVar('RPROVIDES:%s' % pkgname, rprovides) | ||
| 311 | |||
| 312 | if use_bin == "compile": | ||
| 313 | output_locale_binary_rdepends(name, pkgname, locale, encoding) | ||
| 314 | output_locale_binary(name, pkgname, locale, encoding) | ||
| 315 | elif use_bin == "precompiled": | ||
| 316 | output_locale_binary_rdepends(name, pkgname, locale, encoding) | ||
| 317 | else: | ||
| 318 | output_locale_source(name, pkgname, locale, encoding) | ||
| 319 | |||
| 320 | if use_bin == "compile": | ||
| 321 | bb.note("preparing tree for binary locale generation") | ||
| 322 | bb.build.exec_func("do_prep_locale_tree", d) | ||
| 323 | |||
| 324 | utf8_only = int(d.getVar('LOCALE_UTF8_ONLY') or 0) | ||
| 325 | utf8_is_default = int(d.getVar('LOCALE_UTF8_IS_DEFAULT') or 0) | ||
| 326 | |||
| 327 | encodings = {} | ||
| 328 | for locale in to_generate: | ||
| 329 | charset = supported[locale] | ||
| 330 | if utf8_only and charset != 'UTF-8': | ||
| 331 | continue | ||
| 332 | |||
| 333 | m = dot_re.match(locale) | ||
| 334 | if m: | ||
| 335 | base = m.group(1) | ||
| 336 | else: | ||
| 337 | base = locale | ||
| 338 | |||
| 339 | # Non-precompiled locales may be renamed so that the default | ||
| 340 | # (non-suffixed) encoding is always UTF-8, i.e., instead of en_US and | ||
| 341 | # en_US.UTF-8, we have en_US and en_US.ISO-8859-1. This implicitly | ||
| 342 | # contradicts SUPPORTED. | ||
| 343 | if use_bin == "precompiled" or not utf8_is_default: | ||
| 344 | output_locale(locale, base, charset) | ||
| 345 | else: | ||
| 346 | if charset == 'UTF-8': | ||
| 347 | output_locale(base, base, charset) | ||
| 348 | else: | ||
| 349 | output_locale('%s.%s' % (base, charset), base, charset) | ||
| 350 | |||
| 351 | def metapkg_hook(file, pkg, pattern, format, basename): | ||
| 352 | name = basename.split('/', 1)[0] | ||
| 353 | metapkg = legitimize_package_name('%s-binary-localedata-%s' % (mlprefix+bpn, name)) | ||
| 354 | d.appendVar('RDEPENDS:%s' % metapkg, ' ' + pkg) | ||
| 355 | |||
| 356 | if use_bin == "compile": | ||
| 357 | makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile") | ||
| 358 | with open(makefile, "w") as m: | ||
| 359 | m.write("all: %s\n\n" % " ".join(commands.keys())) | ||
| 360 | total = len(commands) | ||
| 361 | for i, (maketarget, makerecipe) in enumerate(commands.items()): | ||
| 362 | m.write(maketarget + ":\n") | ||
| 363 | m.write("\t@echo 'Progress %d/%d'\n" % (i, total)) | ||
| 364 | m.write("\t" + makerecipe + "\n\n") | ||
| 365 | d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile))) | ||
| 366 | d.setVarFlag("oe_runmake", "progress", r"outof:Progress\s(\d+)/(\d+)") | ||
| 367 | bb.note("Executing binary locale generation makefile") | ||
| 368 | bb.build.exec_func("oe_runmake", d) | ||
| 369 | bb.note("collecting binary locales from locale tree") | ||
| 370 | bb.build.exec_func("do_collect_bins_from_locale_tree", d) | ||
| 371 | |||
| 372 | if use_bin in ('compile', 'precompiled'): | ||
| 373 | lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES') | ||
| 374 | if lcsplit and int(lcsplit): | ||
| 375 | do_split_packages(d, binary_locales_dir, file_regex=r'^(.*/LC_\w+)', \ | ||
| 376 | output_pattern=bpn+'-binary-localedata-%s', \ | ||
| 377 | description='binary locale definition for %s', recursive=True, | ||
| 378 | hook=metapkg_hook, extra_depends='', allow_dirs=True, match_path=True) | ||
| 379 | else: | ||
| 380 | do_split_packages(d, binary_locales_dir, file_regex=r'(.*)', \ | ||
| 381 | output_pattern=bpn+'-binary-localedata-%s', \ | ||
| 382 | description='binary locale definition for %s', extra_depends='', allow_dirs=True) | ||
| 383 | else: | ||
| 384 | bb.note("generation of binary locales disabled. this may break i18n!") | ||
| 385 | |||
| 386 | } | ||
| 387 | |||
| 388 | # We want to do this indirection so that we can safely 'return' | ||
| 389 | # from the called function even though we're prepending | ||
| 390 | python populate_packages:prepend () { | ||
| 391 | bb.build.exec_func('package_do_split_gconvs', d) | ||
| 392 | } | ||
diff --git a/meta/classes-recipe/license_image.bbclass b/meta/classes-recipe/license_image.bbclass deleted file mode 100644 index d2c5ab902c..0000000000 --- a/meta/classes-recipe/license_image.bbclass +++ /dev/null | |||
| @@ -1,317 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | ROOTFS_LICENSE_DIR = "${IMAGE_ROOTFS}/usr/share/common-licenses" | ||
| 8 | |||
| 9 | # This requires LICENSE_CREATE_PACKAGE=1 to work too | ||
| 10 | COMPLEMENTARY_GLOB[lic-pkgs] = "*-lic" | ||
| 11 | |||
| 12 | python() { | ||
| 13 | if not oe.data.typed_value('LICENSE_CREATE_PACKAGE', d): | ||
| 14 | features = set(oe.data.typed_value('IMAGE_FEATURES', d)) | ||
| 15 | if 'lic-pkgs' in features: | ||
| 16 | bb.error("'lic-pkgs' in IMAGE_FEATURES but LICENSE_CREATE_PACKAGE not enabled to generate -lic packages") | ||
| 17 | } | ||
| 18 | |||
| 19 | python write_package_manifest() { | ||
| 20 | # Get list of installed packages | ||
| 21 | license_image_dir = d.expand('${LICENSE_DIRECTORY}/${SSTATE_PKGARCH}/${IMAGE_NAME}') | ||
| 22 | bb.utils.mkdirhier(license_image_dir) | ||
| 23 | from oe.rootfs import image_list_installed_packages | ||
| 24 | from oe.utils import format_pkg_list | ||
| 25 | |||
| 26 | pkgs = image_list_installed_packages(d) | ||
| 27 | output = format_pkg_list(pkgs) | ||
| 28 | with open(os.path.join(license_image_dir, 'package.manifest'), "w+") as package_manifest: | ||
| 29 | package_manifest.write(output) | ||
| 30 | } | ||
| 31 | |||
| 32 | python license_create_manifest() { | ||
| 33 | import oe.packagedata | ||
| 34 | from oe.rootfs import image_list_installed_packages | ||
| 35 | |||
| 36 | build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS') | ||
| 37 | if build_images_from_feeds == "1": | ||
| 38 | return 0 | ||
| 39 | |||
| 40 | pkg_dic = {} | ||
| 41 | for pkg in sorted(image_list_installed_packages(d)): | ||
| 42 | pkg_info = os.path.join(d.getVar('PKGDATA_DIR'), | ||
| 43 | 'runtime-reverse', pkg) | ||
| 44 | pkg_name = os.path.basename(os.readlink(pkg_info)) | ||
| 45 | |||
| 46 | pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info) | ||
| 47 | if not "LICENSE" in pkg_dic[pkg_name].keys(): | ||
| 48 | pkg_lic_name = "LICENSE:" + pkg_name | ||
| 49 | pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name] | ||
| 50 | |||
| 51 | rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'), | ||
| 52 | d.getVar('SSTATE_PKGARCH'), d.getVar('IMAGE_NAME'), 'license.manifest') | ||
| 53 | write_license_files(d, rootfs_license_manifest, pkg_dic, rootfs=True) | ||
| 54 | } | ||
| 55 | |||
| 56 | def write_license_files(d, license_manifest, pkg_dic, rootfs=True): | ||
| 57 | import re | ||
| 58 | import stat | ||
| 59 | |||
| 60 | bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split() | ||
| 61 | bad_licenses = oe.license.expand_wildcard_licenses(d, bad_licenses) | ||
| 62 | pkgarchs = d.getVar("SSTATE_ARCHS").split() | ||
| 63 | pkgarchs.reverse() | ||
| 64 | |||
| 65 | exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split() | ||
| 66 | with open(license_manifest, "w") as license_file: | ||
| 67 | for pkg in sorted(pkg_dic): | ||
| 68 | remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions) | ||
| 69 | incompatible_licenses = oe.license.incompatible_pkg_license(d, remaining_bad_licenses, pkg_dic[pkg]["LICENSE"]) | ||
| 70 | if incompatible_licenses: | ||
| 71 | bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(incompatible_licenses))) | ||
| 72 | else: | ||
| 73 | incompatible_licenses = oe.license.incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"]) | ||
| 74 | if incompatible_licenses: | ||
| 75 | oe.qa.handle_error('license-exception', "Including %s with incompatible license(s) %s into the image, because it has been allowed by exception list." %(pkg, ' '.join(incompatible_licenses)), d) | ||
| 76 | try: | ||
| 77 | (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \ | ||
| 78 | oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"], | ||
| 79 | remaining_bad_licenses, oe.license.canonical_license, d) | ||
| 80 | except oe.license.LicenseError as exc: | ||
| 81 | bb.fatal('%s: %s' % (d.getVar('P'), exc)) | ||
| 82 | |||
| 83 | if not "IMAGE_MANIFEST" in pkg_dic[pkg]: | ||
| 84 | # Rootfs manifest | ||
| 85 | license_file.write("PACKAGE NAME: %s\n" % pkg) | ||
| 86 | license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"]) | ||
| 87 | license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"]) | ||
| 88 | license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"]) | ||
| 89 | |||
| 90 | # If the package doesn't contain any file, that is, its size is 0, the license | ||
| 91 | # isn't relevant as far as the final image is concerned. So doing license check | ||
| 92 | # doesn't make much sense, skip it. | ||
| 93 | if pkg_dic[pkg]["PKGSIZE:%s" % pkg] == "0": | ||
| 94 | continue | ||
| 95 | else: | ||
| 96 | # Image manifest | ||
| 97 | license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"]) | ||
| 98 | license_file.write("VERSION: %s\n" % pkg_dic[pkg]["PV"]) | ||
| 99 | license_file.write("LICENSE: %s\n" % pkg_dic[pkg]["LICENSE"]) | ||
| 100 | license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"]) | ||
| 101 | |||
| 102 | for lic in pkg_dic[pkg]["LICENSES"]: | ||
| 103 | for pkgarch in pkgarchs: | ||
| 104 | lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY'), | ||
| 105 | pkgarch, | ||
| 106 | pkg_dic[pkg]["PN"], "generic_%s" % | ||
| 107 | re.sub(r'\+', '', lic)) | ||
| 108 | if os.path.exists(lic_file): | ||
| 109 | break | ||
| 110 | # add explicity avoid of CLOSED license because isn't generic | ||
| 111 | if lic == "CLOSED": | ||
| 112 | continue | ||
| 113 | |||
| 114 | if not os.path.exists(lic_file): | ||
| 115 | oe.qa.handle_error('license-file-missing', | ||
| 116 | "The license listed %s was not in the "\ | ||
| 117 | "licenses collected for recipe %s" | ||
| 118 | % (lic, pkg_dic[pkg]["PN"]), d) | ||
| 119 | oe.qa.exit_if_errors(d) | ||
| 120 | |||
| 121 | # Two options here: | ||
| 122 | # - Just copy the manifest | ||
| 123 | # - Copy the manifest and the license directories | ||
| 124 | # With both options set we see a .5 M increase in core-image-minimal | ||
| 125 | copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST') | ||
| 126 | copy_lic_dirs = d.getVar('COPY_LIC_DIRS') | ||
| 127 | if rootfs and copy_lic_manifest == "1": | ||
| 128 | rootfs_license_dir = d.getVar('ROOTFS_LICENSE_DIR') | ||
| 129 | bb.utils.mkdirhier(rootfs_license_dir) | ||
| 130 | rootfs_license_manifest = os.path.join(rootfs_license_dir, | ||
| 131 | os.path.split(license_manifest)[1]) | ||
| 132 | if not os.path.exists(rootfs_license_manifest): | ||
| 133 | oe.path.copyhardlink(license_manifest, rootfs_license_manifest) | ||
| 134 | |||
| 135 | if copy_lic_dirs == "1": | ||
| 136 | for pkg in sorted(pkg_dic): | ||
| 137 | pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg) | ||
| 138 | bb.utils.mkdirhier(pkg_rootfs_license_dir) | ||
| 139 | for pkgarch in pkgarchs: | ||
| 140 | pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'), | ||
| 141 | pkgarch, pkg_dic[pkg]["PN"]) | ||
| 142 | if os.path.exists(pkg_license_dir): | ||
| 143 | break | ||
| 144 | if not os.path.exists(pkg_license_dir ): | ||
| 145 | bb.fatal("Couldn't find license information for dependency %s" % pkg) | ||
| 146 | |||
| 147 | pkg_manifest_licenses = [oe.license.canonical_license(d, lic) \ | ||
| 148 | for lic in pkg_dic[pkg]["LICENSES"]] | ||
| 149 | |||
| 150 | licenses = os.listdir(pkg_license_dir) | ||
| 151 | for lic in licenses: | ||
| 152 | pkg_license = os.path.join(pkg_license_dir, lic) | ||
| 153 | pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic) | ||
| 154 | |||
| 155 | if re.match(r"^generic_.*$", lic): | ||
| 156 | generic_lic = oe.license.canonical_license(d, | ||
| 157 | re.search(r"^generic_(.*)$", lic).group(1)) | ||
| 158 | |||
| 159 | # Do not copy generic license into package if isn't | ||
| 160 | # declared into LICENSES of the package. | ||
| 161 | if not re.sub(r'\+$', '', generic_lic) in \ | ||
| 162 | [re.sub(r'\+', '', lic) for lic in \ | ||
| 163 | pkg_manifest_licenses]: | ||
| 164 | continue | ||
| 165 | |||
| 166 | if oe.license.license_ok(generic_lic, | ||
| 167 | bad_licenses) == False: | ||
| 168 | continue | ||
| 169 | |||
| 170 | # Make sure we use only canonical name for the license file | ||
| 171 | generic_lic_file = "generic_%s" % generic_lic | ||
| 172 | rootfs_license = os.path.join(rootfs_license_dir, generic_lic_file) | ||
| 173 | if not os.path.exists(rootfs_license): | ||
| 174 | oe.path.copyhardlink(pkg_license, rootfs_license) | ||
| 175 | |||
| 176 | if not os.path.exists(pkg_rootfs_license): | ||
| 177 | os.symlink(os.path.join('..', generic_lic_file), pkg_rootfs_license) | ||
| 178 | else: | ||
| 179 | if (oe.license.license_ok(oe.license.canonical_license(d, | ||
| 180 | lic), bad_licenses) == False or | ||
| 181 | os.path.exists(pkg_rootfs_license)): | ||
| 182 | continue | ||
| 183 | |||
| 184 | oe.path.copyhardlink(pkg_license, pkg_rootfs_license) | ||
| 185 | # Fixup file ownership and permissions | ||
| 186 | for walkroot, dirs, files in os.walk(rootfs_license_dir): | ||
| 187 | for f in files: | ||
| 188 | p = os.path.join(walkroot, f) | ||
| 189 | os.lchown(p, 0, 0) | ||
| 190 | if not os.path.islink(p): | ||
| 191 | os.chmod(p, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) | ||
| 192 | for dir in dirs: | ||
| 193 | p = os.path.join(walkroot, dir) | ||
| 194 | os.lchown(p, 0, 0) | ||
| 195 | os.chmod(p, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) | ||
| 196 | |||
| 197 | write_license_files[vardepsexclude] = "SSTATE_ARCHS" | ||
| 198 | |||
| 199 | def license_deployed_manifest(d): | ||
| 200 | """ | ||
| 201 | Write the license manifest for the deployed recipes. | ||
| 202 | The deployed recipes usually includes the bootloader | ||
| 203 | and extra files to boot the target. | ||
| 204 | """ | ||
| 205 | |||
| 206 | dep_dic = {} | ||
| 207 | man_dic = {} | ||
| 208 | lic_dir = d.getVar("LICENSE_DIRECTORY") | ||
| 209 | pkgarchs = d.getVar("SSTATE_ARCHS").split() | ||
| 210 | pkgarchs.reverse() | ||
| 211 | |||
| 212 | dep_dic = get_deployed_dependencies(d) | ||
| 213 | for dep in dep_dic.keys(): | ||
| 214 | man_dic[dep] = {} | ||
| 215 | # It is necessary to mark this will be used for image manifest | ||
| 216 | man_dic[dep]["IMAGE_MANIFEST"] = True | ||
| 217 | man_dic[dep]["PN"] = dep | ||
| 218 | man_dic[dep]["FILES"] = \ | ||
| 219 | " ".join(get_deployed_files(dep_dic[dep])) | ||
| 220 | |||
| 221 | for pkgarch in pkgarchs: | ||
| 222 | licfile = os.path.join(lic_dir, pkgarch, dep, "recipeinfo") | ||
| 223 | if os.path.exists(licfile): | ||
| 224 | break | ||
| 225 | if not os.path.exists(licfile): | ||
| 226 | bb.fatal("Couldn't find license information for dependency %s" % dep) | ||
| 227 | with open(licfile, "r") as f: | ||
| 228 | for line in f.readlines(): | ||
| 229 | key,val = line.split(": ", 1) | ||
| 230 | man_dic[dep][key] = val[:-1] | ||
| 231 | |||
| 232 | lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'), d.getVar('SSTATE_PKGARCH'), | ||
| 233 | d.getVar('IMAGE_NAME')) | ||
| 234 | bb.utils.mkdirhier(lic_manifest_dir) | ||
| 235 | image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest') | ||
| 236 | write_license_files(d, image_license_manifest, man_dic, rootfs=False) | ||
| 237 | |||
| 238 | link_name = d.getVar('IMAGE_LINK_NAME') | ||
| 239 | if link_name: | ||
| 240 | lic_manifest_symlink_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'), d.getVar('SSTATE_PKGARCH'), | ||
| 241 | link_name) | ||
| 242 | # remove old symlink | ||
| 243 | if os.path.islink(lic_manifest_symlink_dir): | ||
| 244 | os.unlink(lic_manifest_symlink_dir) | ||
| 245 | |||
| 246 | # create the image dir symlink | ||
| 247 | if lic_manifest_dir != lic_manifest_symlink_dir: | ||
| 248 | os.symlink(lic_manifest_dir, lic_manifest_symlink_dir) | ||
| 249 | |||
| 250 | license_deployed_manifest[vardepsexclude] = "SSTATE_ARCHS" | ||
| 251 | |||
| 252 | def get_deployed_dependencies(d): | ||
| 253 | """ | ||
| 254 | Get all the deployed dependencies of an image | ||
| 255 | """ | ||
| 256 | |||
| 257 | deploy = {} | ||
| 258 | # Get all the dependencies for the current task (rootfs). | ||
| 259 | taskdata = d.getVar("BB_TASKDEPDATA", False) | ||
| 260 | pn = d.getVar("PN") | ||
| 261 | depends = list(set([dep[0] for dep | ||
| 262 | in list(taskdata.values()) | ||
| 263 | if not dep[0].endswith("-native") and not dep[0] == pn])) | ||
| 264 | |||
| 265 | # To verify what was deployed it checks the rootfs dependencies against | ||
| 266 | # the SSTATE_MANIFESTS for "deploy" task. | ||
| 267 | # The manifest file name contains the arch. Because we are not running | ||
| 268 | # in the recipe context it is necessary to check every arch used. | ||
| 269 | sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS") | ||
| 270 | archs = list(set(d.getVar("SSTATE_ARCHS").split())) | ||
| 271 | for dep in depends: | ||
| 272 | for arch in archs: | ||
| 273 | sstate_manifest_file = os.path.join(sstate_manifest_dir, | ||
| 274 | "manifest-%s-%s.deploy" % (arch, dep)) | ||
| 275 | if os.path.exists(sstate_manifest_file): | ||
| 276 | deploy[dep] = sstate_manifest_file | ||
| 277 | break | ||
| 278 | |||
| 279 | return deploy | ||
| 280 | get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA SSTATE_ARCHS" | ||
| 281 | |||
| 282 | def get_deployed_files(man_file): | ||
| 283 | """ | ||
| 284 | Get the files deployed from the sstate manifest | ||
| 285 | """ | ||
| 286 | |||
| 287 | dep_files = [] | ||
| 288 | excluded_files = [] | ||
| 289 | with open(man_file, "r") as manifest: | ||
| 290 | all_files = manifest.read() | ||
| 291 | for f in all_files.splitlines(): | ||
| 292 | if ((not (os.path.islink(f) or os.path.isdir(f))) and | ||
| 293 | not os.path.basename(f) in excluded_files): | ||
| 294 | dep_files.append(os.path.basename(f)) | ||
| 295 | return dep_files | ||
| 296 | |||
| 297 | ROOTFS_POSTPROCESS_COMMAND:prepend = "write_package_manifest license_create_manifest " | ||
| 298 | do_rootfs[recrdeptask] += "do_populate_lic" | ||
| 299 | |||
| 300 | python do_populate_lic_deploy() { | ||
| 301 | license_deployed_manifest(d) | ||
| 302 | oe.qa.exit_if_errors(d) | ||
| 303 | } | ||
| 304 | |||
| 305 | addtask populate_lic_deploy before do_build after do_image_complete | ||
| 306 | do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy" | ||
| 307 | |||
| 308 | python license_qa_dead_symlink() { | ||
| 309 | import os | ||
| 310 | |||
| 311 | for root, dirs, files in os.walk(d.getVar('ROOTFS_LICENSE_DIR')): | ||
| 312 | for file in files: | ||
| 313 | full_path = root + "/" + file | ||
| 314 | if os.path.islink(full_path) and not os.path.exists(full_path): | ||
| 315 | bb.error("broken symlink: " + full_path) | ||
| 316 | } | ||
| 317 | IMAGE_QA_COMMANDS += "license_qa_dead_symlink" | ||
diff --git a/meta/classes-recipe/linux-dummy.bbclass b/meta/classes-recipe/linux-dummy.bbclass deleted file mode 100644 index 9291533cf9..0000000000 --- a/meta/classes-recipe/linux-dummy.bbclass +++ /dev/null | |||
| @@ -1,31 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | python __anonymous () { | ||
| 8 | if d.getVar('PREFERRED_PROVIDER_virtual/kernel') == 'linux-dummy': | ||
| 9 | # copy part codes from kernel.bbclass | ||
| 10 | kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel" | ||
| 11 | |||
| 12 | # set an empty package of kernel-devicetree | ||
| 13 | d.appendVar('PACKAGES', ' %s-devicetree' % kname) | ||
| 14 | d.setVar('ALLOW_EMPTY:%s-devicetree' % kname, '1') | ||
| 15 | |||
| 16 | # Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES | ||
| 17 | type = d.getVar('KERNEL_IMAGETYPE') or "" | ||
| 18 | alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or "" | ||
| 19 | types = d.getVar('KERNEL_IMAGETYPES') or "" | ||
| 20 | if type not in types.split(): | ||
| 21 | types = (type + ' ' + types).strip() | ||
| 22 | if alttype not in types.split(): | ||
| 23 | types = (alttype + ' ' + types).strip() | ||
| 24 | |||
| 25 | # set empty packages of kernel-image-* | ||
| 26 | for type in types.split(): | ||
| 27 | typelower = type.lower() | ||
| 28 | d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower)) | ||
| 29 | d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1') | ||
| 30 | } | ||
| 31 | |||
diff --git a/meta/classes-recipe/linux-kernel-base.bbclass b/meta/classes-recipe/linux-kernel-base.bbclass deleted file mode 100644 index e2187a73f0..0000000000 --- a/meta/classes-recipe/linux-kernel-base.bbclass +++ /dev/null | |||
| @@ -1,62 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # parse kernel ABI version out of <linux/version.h> | ||
| 8 | def get_kernelversion_headers(p): | ||
| 9 | import re | ||
| 10 | |||
| 11 | fn = p + '/include/linux/utsrelease.h' | ||
| 12 | if not os.path.isfile(fn): | ||
| 13 | # after 2.6.33-rc1 | ||
| 14 | fn = p + '/include/generated/utsrelease.h' | ||
| 15 | if not os.path.isfile(fn): | ||
| 16 | fn = p + '/include/linux/version.h' | ||
| 17 | |||
| 18 | try: | ||
| 19 | f = open(fn, 'r') | ||
| 20 | except IOError: | ||
| 21 | return None | ||
| 22 | |||
| 23 | l = f.readlines() | ||
| 24 | f.close() | ||
| 25 | r = re.compile("#define UTS_RELEASE \"(.*)\"") | ||
| 26 | for s in l: | ||
| 27 | m = r.match(s) | ||
| 28 | if m: | ||
| 29 | return m.group(1) | ||
| 30 | return None | ||
| 31 | |||
| 32 | |||
| 33 | def get_kernelversion_file(p): | ||
| 34 | fn = p + '/kernel-abiversion' | ||
| 35 | |||
| 36 | try: | ||
| 37 | with open(fn, 'r') as f: | ||
| 38 | return f.readlines()[0].strip() | ||
| 39 | except IOError: | ||
| 40 | return None | ||
| 41 | |||
| 42 | def get_kernellocalversion_file(p): | ||
| 43 | fn = p + '/kernel-localversion' | ||
| 44 | |||
| 45 | try: | ||
| 46 | with open(fn, 'r') as f: | ||
| 47 | return f.readlines()[0].strip() | ||
| 48 | except IOError: | ||
| 49 | return "" | ||
| 50 | |||
| 51 | return "" | ||
| 52 | |||
| 53 | def linux_module_packages(s, d): | ||
| 54 | suffix = "" | ||
| 55 | return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split())) | ||
| 56 | |||
| 57 | export KBUILD_BUILD_VERSION = "1" | ||
| 58 | export KBUILD_BUILD_USER ?= "oe-user" | ||
| 59 | export KBUILD_BUILD_HOST ?= "oe-host" | ||
| 60 | |||
| 61 | # that's all | ||
| 62 | |||
diff --git a/meta/classes-recipe/linuxloader.bbclass b/meta/classes-recipe/linuxloader.bbclass deleted file mode 100644 index a2e8f9837b..0000000000 --- a/meta/classes-recipe/linuxloader.bbclass +++ /dev/null | |||
| @@ -1,84 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | def get_musl_loader_arch(d): | ||
| 8 | import re | ||
| 9 | ldso_arch = "NotSupported" | ||
| 10 | |||
| 11 | targetarch = d.getVar("TARGET_ARCH") | ||
| 12 | if targetarch.startswith("microblaze"): | ||
| 13 | ldso_arch = "microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el', d)}" | ||
| 14 | elif targetarch.startswith("mips"): | ||
| 15 | ldso_arch = "mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}" | ||
| 16 | elif targetarch == "powerpc": | ||
| 17 | ldso_arch = "powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}" | ||
| 18 | elif targetarch.startswith("powerpc64"): | ||
| 19 | ldso_arch = "powerpc64${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'le', d)}" | ||
| 20 | elif targetarch == "x86_64": | ||
| 21 | ldso_arch = "x86_64" | ||
| 22 | elif re.search("i.86", targetarch): | ||
| 23 | ldso_arch = "i386" | ||
| 24 | elif targetarch.startswith("arm"): | ||
| 25 | ldso_arch = "arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}" | ||
| 26 | elif targetarch.startswith("aarch64"): | ||
| 27 | ldso_arch = "aarch64${ARMPKGSFX_ENDIAN_64}" | ||
| 28 | elif targetarch.startswith("riscv64"): | ||
| 29 | ldso_arch = "riscv64${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}" | ||
| 30 | elif targetarch.startswith("riscv32"): | ||
| 31 | ldso_arch = "riscv32${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}" | ||
| 32 | return ldso_arch | ||
| 33 | |||
| 34 | def get_musl_loader(d): | ||
| 35 | import re | ||
| 36 | return "/lib/ld-musl-" + get_musl_loader_arch(d) + ".so.1" | ||
| 37 | |||
| 38 | def get_glibc_loader(d): | ||
| 39 | import re | ||
| 40 | |||
| 41 | dynamic_loader = "NotSupported" | ||
| 42 | targetarch = d.getVar("TARGET_ARCH") | ||
| 43 | if targetarch in ["powerpc", "microblaze"]: | ||
| 44 | dynamic_loader = "${base_libdir}/ld.so.1" | ||
| 45 | elif targetarch in ["mipsisa32r6el", "mipsisa32r6", "mipsisa64r6el", "mipsisa64r6"]: | ||
| 46 | dynamic_loader = "${base_libdir}/ld-linux-mipsn8.so.1" | ||
| 47 | elif targetarch.startswith("mips"): | ||
| 48 | dynamic_loader = "${base_libdir}/ld.so.1" | ||
| 49 | elif targetarch.startswith("loongarch64"): | ||
| 50 | dynamic_loader = "${base_libdir}/ld-linux-loongarch-lp64d.so.1" | ||
| 51 | elif targetarch == "powerpc64le": | ||
| 52 | dynamic_loader = "${base_libdir}/ld64.so.2" | ||
| 53 | elif targetarch == "powerpc64": | ||
| 54 | dynamic_loader = "${base_libdir}/ld64.so.1" | ||
| 55 | elif targetarch == "x86_64": | ||
| 56 | dynamic_loader = "${base_libdir}/ld-linux-x86-64.so.2" | ||
| 57 | elif re.search("i.86", targetarch): | ||
| 58 | dynamic_loader = "${base_libdir}/ld-linux.so.2" | ||
| 59 | elif targetarch == "arm": | ||
| 60 | dynamic_loader = "${base_libdir}/ld-linux${@['', '-armhf'][d.getVar('TARGET_FPU') == 'hard']}.so.3" | ||
| 61 | elif targetarch.startswith("aarch64"): | ||
| 62 | dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1" | ||
| 63 | elif targetarch.startswith("riscv64"): | ||
| 64 | dynamic_loader = "${base_libdir}/ld-linux-riscv64-lp64${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1" | ||
| 65 | elif targetarch.startswith("riscv32"): | ||
| 66 | dynamic_loader = "${base_libdir}/ld-linux-riscv32-ilp32${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1" | ||
| 67 | return dynamic_loader | ||
| 68 | |||
| 69 | def get_linuxloader(d): | ||
| 70 | overrides = d.getVar("OVERRIDES").split(":") | ||
| 71 | |||
| 72 | if "libc-baremetal" in overrides: | ||
| 73 | return "NotSupported" | ||
| 74 | |||
| 75 | if "libc-musl" in overrides: | ||
| 76 | dynamic_loader = get_musl_loader(d) | ||
| 77 | else: | ||
| 78 | dynamic_loader = get_glibc_loader(d) | ||
| 79 | return dynamic_loader | ||
| 80 | |||
| 81 | get_linuxloader[vardepvalue] = "${@get_linuxloader(d)}" | ||
| 82 | get_musl_loader[vardepvalue] = "${@get_musl_loader(d)}" | ||
| 83 | get_musl_loader_arch[vardepvalue] = "${@get_musl_loader_arch(d)}" | ||
| 84 | get_glibc_loader[vardepvalue] = "${@get_glibc_loader(d)}" | ||
diff --git a/meta/classes-recipe/live-vm-common.bbclass b/meta/classes-recipe/live-vm-common.bbclass deleted file mode 100644 index d90cc67ebc..0000000000 --- a/meta/classes-recipe/live-vm-common.bbclass +++ /dev/null | |||
| @@ -1,100 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Some of the vars for vm and live image are conflicted, this function | ||
| 8 | # is used for fixing the problem. | ||
| 9 | def set_live_vm_vars(d, suffix): | ||
| 10 | vars = ['GRUB_CFG', 'SYSLINUX_CFG', 'ROOT', 'LABELS', 'INITRD'] | ||
| 11 | for var in vars: | ||
| 12 | var_with_suffix = var + '_' + suffix | ||
| 13 | if d.getVar(var): | ||
| 14 | bb.warn('Found potential conflicted var %s, please use %s rather than %s' % \ | ||
| 15 | (var, var_with_suffix, var)) | ||
| 16 | elif d.getVar(var_with_suffix): | ||
| 17 | d.setVar(var, d.getVar(var_with_suffix)) | ||
| 18 | |||
| 19 | |||
| 20 | EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}" | ||
| 21 | EFI_PROVIDER ?= "grub-efi" | ||
| 22 | EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}" | ||
| 23 | |||
| 24 | MKDOSFS_EXTRAOPTS ??= "-S 512" | ||
| 25 | |||
| 26 | # Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not | ||
| 27 | # contain "efi". This way legacy is supported by default if neither is | ||
| 28 | # specified, maintaining the original behavior. | ||
| 29 | def pcbios(d): | ||
| 30 | pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d) | ||
| 31 | if pcbios == "0": | ||
| 32 | pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d) | ||
| 33 | return pcbios | ||
| 34 | |||
| 35 | PCBIOS = "${@pcbios(d)}" | ||
| 36 | PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS') == '1']}" | ||
| 37 | |||
| 38 | # efi_populate_common DEST BOOTLOADER | ||
| 39 | efi_populate_common() { | ||
| 40 | # DEST must be the root of the image so that EFIDIR is not | ||
| 41 | # nested under a top level directory. | ||
| 42 | DEST=$1 | ||
| 43 | |||
| 44 | install -d ${DEST}${EFIDIR} | ||
| 45 | |||
| 46 | install -m 0644 ${DEPLOY_DIR_IMAGE}/$2-${EFI_BOOT_IMAGE} ${DEST}${EFIDIR}/${EFI_BOOT_IMAGE} | ||
| 47 | EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g') | ||
| 48 | printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${DEST}/startup.nsh | ||
| 49 | } | ||
| 50 | |||
| 51 | efi_iso_populate() { | ||
| 52 | iso_dir=$1 | ||
| 53 | efi_populate $iso_dir | ||
| 54 | # Build a EFI directory to create efi.img | ||
| 55 | mkdir -p ${EFIIMGDIR}/${EFIDIR} | ||
| 56 | cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR} | ||
| 57 | cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR} | ||
| 58 | |||
| 59 | EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g') | ||
| 60 | printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${EFIIMGDIR}/startup.nsh | ||
| 61 | |||
| 62 | if [ -f "$iso_dir/initrd" ] ; then | ||
| 63 | cp $iso_dir/initrd ${EFIIMGDIR} | ||
| 64 | fi | ||
| 65 | } | ||
| 66 | |||
| 67 | efi_hddimg_populate() { | ||
| 68 | efi_populate $1 | ||
| 69 | } | ||
| 70 | |||
| 71 | inherit_defer ${EFI_CLASS} | ||
| 72 | inherit_defer ${PCBIOS_CLASS} | ||
| 73 | |||
| 74 | populate_kernel() { | ||
| 75 | dest=$1 | ||
| 76 | install -d $dest | ||
| 77 | |||
| 78 | # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use. | ||
| 79 | bbnote "Trying to install ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} as $dest/${KERNEL_IMAGETYPE}" | ||
| 80 | if [ -e ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} ]; then | ||
| 81 | install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} $dest/${KERNEL_IMAGETYPE} | ||
| 82 | else | ||
| 83 | bbwarn "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} doesn't exist" | ||
| 84 | fi | ||
| 85 | |||
| 86 | # initrd is made of concatenation of multiple filesystem images | ||
| 87 | if [ -n "${INITRD}" ]; then | ||
| 88 | rm -f $dest/initrd | ||
| 89 | for fs in ${INITRD} | ||
| 90 | do | ||
| 91 | if [ -s "$fs" ]; then | ||
| 92 | cat $fs >> $dest/initrd | ||
| 93 | else | ||
| 94 | bbfatal "$fs is invalid. initrd image creation failed." | ||
| 95 | fi | ||
| 96 | done | ||
| 97 | chmod 0644 $dest/initrd | ||
| 98 | fi | ||
| 99 | } | ||
| 100 | |||
diff --git a/meta/classes-recipe/manpages.bbclass b/meta/classes-recipe/manpages.bbclass deleted file mode 100644 index f3d034b046..0000000000 --- a/meta/classes-recipe/manpages.bbclass +++ /dev/null | |||
| @@ -1,41 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Inherit this class to enable or disable building and installation of manpages | ||
| 8 | # depending on whether 'api-documentation' is in DISTRO_FEATURES. Such building | ||
| 9 | # tends to pull in the entire XML stack and other tools, so it's not enabled | ||
| 10 | # by default. | ||
| 11 | PACKAGECONFIG:append:class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}" | ||
| 12 | |||
| 13 | PACKAGE_WRITE_DEPS += "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'qemuwrapper-cross', '', d)}" | ||
| 14 | |||
| 15 | # usually manual files are packaged to ${PN}-doc except man-pages | ||
| 16 | MAN_PKG ?= "${PN}-doc" | ||
| 17 | |||
| 18 | # only add man-db to RDEPENDS when manual files are built and installed | ||
| 19 | RDEPENDS:${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}" | ||
| 20 | |||
| 21 | pkg_postinst:${MAN_PKG}:append () { | ||
| 22 | # only update manual page index caches when manual files are built and installed | ||
| 23 | if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then | ||
| 24 | if test -n "$D"; then | ||
| 25 | if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true', 'false', d)}; then | ||
| 26 | $INTERCEPT_DIR/postinst_intercept update_mandb ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} bindir=${bindir} sysconfdir=${sysconfdir} mandir=${mandir} | ||
| 27 | else | ||
| 28 | $INTERCEPT_DIR/postinst_intercept delay_to_first_boot ${PKG} mlprefix=${MLPREFIX} | ||
| 29 | fi | ||
| 30 | else | ||
| 31 | mandb -q | ||
| 32 | fi | ||
| 33 | fi | ||
| 34 | } | ||
| 35 | |||
| 36 | pkg_postrm:${MAN_PKG}:append () { | ||
| 37 | # only update manual page index caches when manual files are built and installed | ||
| 38 | if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then | ||
| 39 | mandb -q | ||
| 40 | fi | ||
| 41 | } | ||
diff --git a/meta/classes-recipe/meson-routines.bbclass b/meta/classes-recipe/meson-routines.bbclass deleted file mode 100644 index a944a8fff1..0000000000 --- a/meta/classes-recipe/meson-routines.bbclass +++ /dev/null | |||
| @@ -1,59 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit siteinfo | ||
| 8 | |||
| 9 | def meson_array(var, d): | ||
| 10 | items = d.getVar(var).split() | ||
| 11 | return repr(items[0] if len(items) == 1 else items) | ||
| 12 | |||
| 13 | # Map our ARCH values to what Meson expects: | ||
| 14 | # http://mesonbuild.com/Reference-tables.html#cpu-families | ||
| 15 | def meson_cpu_family(var, d): | ||
| 16 | import re | ||
| 17 | arch = d.getVar(var) | ||
| 18 | if arch == 'powerpc': | ||
| 19 | return 'ppc' | ||
| 20 | elif arch == 'powerpc64' or arch == 'powerpc64le': | ||
| 21 | return 'ppc64' | ||
| 22 | elif arch == 'armeb': | ||
| 23 | return 'arm' | ||
| 24 | elif arch == 'aarch64_be': | ||
| 25 | return 'aarch64' | ||
| 26 | elif arch == 'loongarch64': | ||
| 27 | return 'loongarch64' | ||
| 28 | elif arch == 'mipsel': | ||
| 29 | return 'mips' | ||
| 30 | elif arch == 'mips64el': | ||
| 31 | return 'mips64' | ||
| 32 | elif re.match(r"i[3-6]86", arch): | ||
| 33 | return "x86" | ||
| 34 | elif arch == "microblazeel": | ||
| 35 | return "microblaze" | ||
| 36 | else: | ||
| 37 | return arch | ||
| 38 | |||
| 39 | # Map our OS values to what Meson expects: | ||
| 40 | # https://mesonbuild.com/Reference-tables.html#operating-system-names | ||
| 41 | def meson_operating_system(var, d): | ||
| 42 | os = d.getVar(var) | ||
| 43 | if "mingw" in os: | ||
| 44 | return "windows" | ||
| 45 | # avoid e.g 'linux-gnueabi' | ||
| 46 | elif "linux" in os: | ||
| 47 | return "linux" | ||
| 48 | else: | ||
| 49 | return os | ||
| 50 | |||
| 51 | def meson_endian(prefix, d): | ||
| 52 | arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS") | ||
| 53 | sitedata = siteinfo_data_for_machine(arch, os, d) | ||
| 54 | if "endian-little" in sitedata: | ||
| 55 | return "little" | ||
| 56 | elif "endian-big" in sitedata: | ||
| 57 | return "big" | ||
| 58 | else: | ||
| 59 | bb.fatal("Cannot determine endianism for %s-%s" % (arch, os)) | ||
diff --git a/meta/classes-recipe/meson.bbclass b/meta/classes-recipe/meson.bbclass deleted file mode 100644 index c8b3e1ec29..0000000000 --- a/meta/classes-recipe/meson.bbclass +++ /dev/null | |||
| @@ -1,203 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit python3native meson-routines qemu | ||
| 8 | |||
| 9 | DEPENDS:append = " meson-native ninja-native" | ||
| 10 | |||
| 11 | EXEWRAPPER_ENABLED:class-native = "False" | ||
| 12 | EXEWRAPPER_ENABLED ?= "${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d)}" | ||
| 13 | DEPENDS:append = "${@' qemu-native' if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ''}" | ||
| 14 | |||
| 15 | # As Meson enforces out-of-tree builds we can just use cleandirs | ||
| 16 | B = "${WORKDIR}/build" | ||
| 17 | do_configure[cleandirs] = "${B}" | ||
| 18 | |||
| 19 | # Where the meson.build build configuration is | ||
| 20 | MESON_SOURCEPATH = "${S}" | ||
| 21 | |||
| 22 | # The target to build in do_compile. If unset the default targets are built. | ||
| 23 | MESON_TARGET ?= "" | ||
| 24 | |||
| 25 | # Since 0.60.0 you can specify custom tags to install | ||
| 26 | MESON_INSTALL_TAGS ?= "" | ||
| 27 | |||
| 28 | def noprefix(var, d): | ||
| 29 | return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1) | ||
| 30 | |||
| 31 | MESON_BUILDTYPE ?= "${@oe.utils.vartrue('DEBUG_BUILD', 'debug', 'plain', d)}" | ||
| 32 | MESON_BUILDTYPE[vardeps] += "DEBUG_BUILD" | ||
| 33 | MESONOPTS = " --prefix ${prefix} \ | ||
| 34 | --buildtype ${MESON_BUILDTYPE} \ | ||
| 35 | --bindir ${@noprefix('bindir', d)} \ | ||
| 36 | --sbindir ${@noprefix('sbindir', d)} \ | ||
| 37 | --datadir ${@noprefix('datadir', d)} \ | ||
| 38 | --libdir ${@noprefix('libdir', d)} \ | ||
| 39 | --libexecdir ${@noprefix('libexecdir', d)} \ | ||
| 40 | --includedir ${@noprefix('includedir', d)} \ | ||
| 41 | --mandir ${@noprefix('mandir', d)} \ | ||
| 42 | --infodir ${@noprefix('infodir', d)} \ | ||
| 43 | --sysconfdir ${sysconfdir} \ | ||
| 44 | --localstatedir ${localstatedir} \ | ||
| 45 | --sharedstatedir ${sharedstatedir} \ | ||
| 46 | --wrap-mode nodownload \ | ||
| 47 | --native-file ${WORKDIR}/meson.native" | ||
| 48 | |||
| 49 | EXTRA_OEMESON:append = " ${PACKAGECONFIG_CONFARGS}" | ||
| 50 | |||
| 51 | MESON_CROSS_FILE = "" | ||
| 52 | MESON_CROSS_FILE:class-target = "--cross-file ${WORKDIR}/meson.cross" | ||
| 53 | MESON_CROSS_FILE:class-nativesdk = "--cross-file ${WORKDIR}/meson.cross" | ||
| 54 | |||
| 55 | # Needed to set up qemu wrapper below | ||
| 56 | export STAGING_DIR_HOST | ||
| 57 | |||
| 58 | def rust_tool(d, target_var): | ||
| 59 | rustc = d.getVar('RUSTC') | ||
| 60 | if not rustc: | ||
| 61 | return "" | ||
| 62 | cmd = [rustc, "--target", d.getVar(target_var)] + d.getVar("RUSTFLAGS").split() | ||
| 63 | return "rust = %s" % repr(cmd) | ||
| 64 | |||
| 65 | def bindgen_args(d): | ||
| 66 | args = '${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} --target=${TARGET_SYS}' | ||
| 67 | # For SDK packages TOOLCHAIN_OPTIONS don't contain full sysroot path | ||
| 68 | if bb.data.inherits_class("nativesdk", d): | ||
| 69 | args += ' --sysroot=${STAGING_DIR_HOST}${SDKPATHNATIVE}${prefix_nativesdk}' | ||
| 70 | items = d.expand(args).split() | ||
| 71 | return repr(items[0] if len(items) == 1 else items) | ||
| 72 | |||
| 73 | addtask write_config before do_configure | ||
| 74 | do_write_config[vardeps] += "CC CXX AR NM STRIP READELF OBJCOPY CFLAGS CXXFLAGS LDFLAGS RUSTC RUSTFLAGS EXEWRAPPER_ENABLED" | ||
| 75 | do_write_config() { | ||
| 76 | # This needs to be Py to split the args into single-element lists | ||
| 77 | cat >${WORKDIR}/meson.cross <<EOF | ||
| 78 | [binaries] | ||
| 79 | c = ${@meson_array('CC', d)} | ||
| 80 | cpp = ${@meson_array('CXX', d)} | ||
| 81 | cython = 'cython3' | ||
| 82 | ar = ${@meson_array('AR', d)} | ||
| 83 | nm = ${@meson_array('NM', d)} | ||
| 84 | strip = ${@meson_array('STRIP', d)} | ||
| 85 | readelf = ${@meson_array('READELF', d)} | ||
| 86 | objcopy = ${@meson_array('OBJCOPY', d)} | ||
| 87 | pkg-config = 'pkg-config' | ||
| 88 | llvm-config = 'llvm-config' | ||
| 89 | cups-config = 'cups-config' | ||
| 90 | g-ir-scanner = '${STAGING_BINDIR}/g-ir-scanner-wrapper' | ||
| 91 | g-ir-compiler = '${STAGING_BINDIR}/g-ir-compiler-wrapper' | ||
| 92 | ${@rust_tool(d, "RUST_HOST_SYS")} | ||
| 93 | ${@"exe_wrapper = '${WORKDIR}/meson-qemuwrapper'" if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ""} | ||
| 94 | |||
| 95 | [built-in options] | ||
| 96 | c_args = ${@meson_array('CFLAGS', d)} | ||
| 97 | c_link_args = ${@meson_array('LDFLAGS', d)} | ||
| 98 | cpp_args = ${@meson_array('CXXFLAGS', d)} | ||
| 99 | cpp_link_args = ${@meson_array('LDFLAGS', d)} | ||
| 100 | |||
| 101 | [properties] | ||
| 102 | needs_exe_wrapper = true | ||
| 103 | sys_root = '${STAGING_DIR_HOST}' | ||
| 104 | bindgen_clang_arguments = ${@bindgen_args(d)} | ||
| 105 | |||
| 106 | [host_machine] | ||
| 107 | system = '${@meson_operating_system('HOST_OS', d)}' | ||
| 108 | cpu_family = '${@meson_cpu_family('HOST_ARCH', d)}' | ||
| 109 | cpu = '${HOST_ARCH}' | ||
| 110 | endian = '${@meson_endian('HOST', d)}' | ||
| 111 | |||
| 112 | [target_machine] | ||
| 113 | system = '${@meson_operating_system('TARGET_OS', d)}' | ||
| 114 | cpu_family = '${@meson_cpu_family('TARGET_ARCH', d)}' | ||
| 115 | cpu = '${TARGET_ARCH}' | ||
| 116 | endian = '${@meson_endian('TARGET', d)}' | ||
| 117 | EOF | ||
| 118 | |||
| 119 | cat >${WORKDIR}/meson.native <<EOF | ||
| 120 | [binaries] | ||
| 121 | c = ${@meson_array('BUILD_CC', d)} | ||
| 122 | cpp = ${@meson_array('BUILD_CXX', d)} | ||
| 123 | cython = 'cython3' | ||
| 124 | ar = ${@meson_array('BUILD_AR', d)} | ||
| 125 | nm = ${@meson_array('BUILD_NM', d)} | ||
| 126 | strip = ${@meson_array('BUILD_STRIP', d)} | ||
| 127 | readelf = ${@meson_array('BUILD_READELF', d)} | ||
| 128 | objcopy = ${@meson_array('BUILD_OBJCOPY', d)} | ||
| 129 | llvm-config = '${STAGING_BINDIR_NATIVE}/llvm-config' | ||
| 130 | pkg-config = 'pkg-config-native' | ||
| 131 | ${@rust_tool(d, "RUST_BUILD_SYS")} | ||
| 132 | |||
| 133 | [built-in options] | ||
| 134 | c_args = ${@meson_array('BUILD_CFLAGS', d)} | ||
| 135 | c_link_args = ${@meson_array('BUILD_LDFLAGS', d)} | ||
| 136 | cpp_args = ${@meson_array('BUILD_CXXFLAGS', d)} | ||
| 137 | cpp_link_args = ${@meson_array('BUILD_LDFLAGS', d)} | ||
| 138 | EOF | ||
| 139 | } | ||
| 140 | |||
| 141 | write_qemuwrapper() { | ||
| 142 | # Write out a qemu wrapper that will be used as exe_wrapper so that meson | ||
| 143 | # can run target helper binaries through that. | ||
| 144 | qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}" | ||
| 145 | cat > ${WORKDIR}/meson-qemuwrapper << EOF | ||
| 146 | #!/bin/sh | ||
| 147 | # Use a modules directory which doesn't exist so we don't load random things | ||
| 148 | # which may then get deleted (or their dependencies) and potentially segfault | ||
| 149 | export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy | ||
| 150 | |||
| 151 | # meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly | ||
| 152 | unset LD_LIBRARY_PATH | ||
| 153 | |||
| 154 | $qemu_binary "\$@" | ||
| 155 | EOF | ||
| 156 | chmod +x ${WORKDIR}/meson-qemuwrapper | ||
| 157 | } | ||
| 158 | |||
| 159 | do_write_config:append:class-target() { | ||
| 160 | write_qemuwrapper | ||
| 161 | } | ||
| 162 | |||
| 163 | do_write_config:append:class-nativesdk() { | ||
| 164 | write_qemuwrapper | ||
| 165 | } | ||
| 166 | |||
| 167 | # Tell externalsrc that changes to this file require a reconfigure | ||
| 168 | CONFIGURE_FILES = "meson.build" | ||
| 169 | |||
| 170 | meson_do_configure() { | ||
| 171 | # Meson requires this to be 'bfd, 'lld' or 'gold' from 0.53 onwards | ||
| 172 | # https://github.com/mesonbuild/meson/commit/ef9aeb188ea2bc7353e59916c18901cde90fa2b3 | ||
| 173 | unset LD | ||
| 174 | |||
| 175 | bbnote Executing meson ${EXTRA_OEMESON}... | ||
| 176 | if ! meson setup ${MESONOPTS} "${MESON_SOURCEPATH}" "${B}" ${MESON_CROSS_FILE} ${EXTRA_OEMESON}; then | ||
| 177 | bbfatal_log meson failed | ||
| 178 | fi | ||
| 179 | } | ||
| 180 | |||
| 181 | python meson_do_qa_configure() { | ||
| 182 | import re | ||
| 183 | warn_re = re.compile(r"^WARNING: Cross property (.+) is using default value (.+)$", re.MULTILINE) | ||
| 184 | with open(d.expand("${B}/meson-logs/meson-log.txt")) as logfile: | ||
| 185 | log = logfile.read() | ||
| 186 | for (prop, value) in warn_re.findall(log): | ||
| 187 | bb.warn("Meson cross property %s used without explicit assignment, defaulting to %s" % (prop, value)) | ||
| 188 | } | ||
| 189 | do_configure[postfuncs] += "meson_do_qa_configure" | ||
| 190 | |||
| 191 | do_compile[progress] = "outof:^\[(\d+)/(\d+)\]\s+" | ||
| 192 | meson_do_compile() { | ||
| 193 | meson compile -v ${PARALLEL_MAKE} ${MESON_TARGET} | ||
| 194 | } | ||
| 195 | |||
| 196 | meson_do_install() { | ||
| 197 | if [ "x${MESON_INSTALL_TAGS}" != "x" ] ; then | ||
| 198 | meson_install_tags="--tags ${MESON_INSTALL_TAGS}" | ||
| 199 | fi | ||
| 200 | meson install --destdir ${D} --no-rebuild $meson_install_tags | ||
| 201 | } | ||
| 202 | |||
| 203 | EXPORT_FUNCTIONS do_configure do_compile do_install | ||
diff --git a/meta/classes-recipe/mime-xdg.bbclass b/meta/classes-recipe/mime-xdg.bbclass deleted file mode 100644 index cbdcb4c7e9..0000000000 --- a/meta/classes-recipe/mime-xdg.bbclass +++ /dev/null | |||
| @@ -1,78 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | # This class creates mime <-> application associations based on entry | ||
| 7 | # 'MimeType' in *.desktop files | ||
| 8 | # | ||
| 9 | |||
| 10 | DEPENDS += "desktop-file-utils" | ||
| 11 | PACKAGE_WRITE_DEPS += "desktop-file-utils-native" | ||
| 12 | DESKTOPDIR = "${datadir}/applications" | ||
| 13 | |||
| 14 | # There are recipes out there installing their .desktop files as absolute | ||
| 15 | # symlinks. For us these are dangling and cannot be introspected for "MimeType" | ||
| 16 | # easily. By addding package-names to MIME_XDG_PACKAGES, packager can force | ||
| 17 | # proper update-desktop-database handling. Note that all introspection is | ||
| 18 | # skipped for MIME_XDG_PACKAGES not empty | ||
| 19 | MIME_XDG_PACKAGES ?= "" | ||
| 20 | |||
| 21 | mime_xdg_postinst() { | ||
| 22 | if [ "x$D" != "x" ]; then | ||
| 23 | $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \ | ||
| 24 | mlprefix=${MLPREFIX} \ | ||
| 25 | desktop_dir=${DESKTOPDIR} | ||
| 26 | else | ||
| 27 | update-desktop-database $D${DESKTOPDIR} | ||
| 28 | fi | ||
| 29 | } | ||
| 30 | |||
| 31 | mime_xdg_postrm() { | ||
| 32 | if [ "x$D" != "x" ]; then | ||
| 33 | $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \ | ||
| 34 | mlprefix=${MLPREFIX} \ | ||
| 35 | desktop_dir=${DESKTOPDIR} | ||
| 36 | else | ||
| 37 | update-desktop-database $D${DESKTOPDIR} | ||
| 38 | fi | ||
| 39 | } | ||
| 40 | |||
| 41 | python populate_packages:append () { | ||
| 42 | packages = d.getVar('PACKAGES').split() | ||
| 43 | pkgdest = d.getVar('PKGDEST') | ||
| 44 | desktop_base = d.getVar('DESKTOPDIR') | ||
| 45 | forced_mime_xdg_pkgs = (d.getVar('MIME_XDG_PACKAGES') or '').split() | ||
| 46 | |||
| 47 | for pkg in packages: | ||
| 48 | desktops_with_mime_found = pkg in forced_mime_xdg_pkgs | ||
| 49 | if d.getVar('MIME_XDG_PACKAGES') == '': | ||
| 50 | desktop_dir = '%s/%s%s' % (pkgdest, pkg, desktop_base) | ||
| 51 | if os.path.exists(desktop_dir): | ||
| 52 | for df in os.listdir(desktop_dir): | ||
| 53 | if df.endswith('.desktop'): | ||
| 54 | try: | ||
| 55 | with open(desktop_dir + '/'+ df, 'r') as f: | ||
| 56 | for line in f.read().split('\n'): | ||
| 57 | if 'MimeType' in line: | ||
| 58 | desktops_with_mime_found = True | ||
| 59 | break; | ||
| 60 | except: | ||
| 61 | bb.warn('Could not open %s. Set MIME_XDG_PACKAGES in recipe or add mime-xdg to INSANE_SKIP.' % desktop_dir + '/'+ df) | ||
| 62 | if desktops_with_mime_found: | ||
| 63 | break | ||
| 64 | if desktops_with_mime_found: | ||
| 65 | bb.note("adding mime-xdg postinst and postrm scripts to %s" % pkg) | ||
| 66 | postinst = d.getVar('pkg_postinst:%s' % pkg) | ||
| 67 | if not postinst: | ||
| 68 | postinst = '#!/bin/sh\n' | ||
| 69 | postinst += d.getVar('mime_xdg_postinst') | ||
| 70 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 71 | postrm = d.getVar('pkg_postrm:%s' % pkg) | ||
| 72 | if not postrm: | ||
| 73 | postrm = '#!/bin/sh\n' | ||
| 74 | postrm += d.getVar('mime_xdg_postrm') | ||
| 75 | d.setVar('pkg_postrm:%s' % pkg, postrm) | ||
| 76 | bb.note("adding desktop-file-utils dependency to %s" % pkg) | ||
| 77 | d.appendVar('RDEPENDS:' + pkg, " " + d.getVar('MLPREFIX')+"desktop-file-utils") | ||
| 78 | } | ||
diff --git a/meta/classes-recipe/mime.bbclass b/meta/classes-recipe/mime.bbclass deleted file mode 100644 index 9b13f62bda..0000000000 --- a/meta/classes-recipe/mime.bbclass +++ /dev/null | |||
| @@ -1,76 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # This class is used by recipes installing mime types | ||
| 9 | # | ||
| 10 | |||
| 11 | DEPENDS += "${@bb.utils.contains('BPN', 'shared-mime-info', '', 'shared-mime-info', d)}" | ||
| 12 | PACKAGE_WRITE_DEPS += "shared-mime-info-native" | ||
| 13 | MIMEDIR = "${datadir}/mime" | ||
| 14 | |||
| 15 | mime_postinst() { | ||
| 16 | if [ "x$D" != "x" ]; then | ||
| 17 | $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \ | ||
| 18 | mlprefix=${MLPREFIX} \ | ||
| 19 | mimedir=${MIMEDIR} | ||
| 20 | else | ||
| 21 | echo "Updating MIME database... this may take a while." | ||
| 22 | update-mime-database $D${MIMEDIR} | ||
| 23 | fi | ||
| 24 | } | ||
| 25 | |||
| 26 | mime_postrm() { | ||
| 27 | if [ "x$D" != "x" ]; then | ||
| 28 | $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \ | ||
| 29 | mlprefix=${MLPREFIX} \ | ||
| 30 | mimedir=${MIMEDIR} | ||
| 31 | else | ||
| 32 | echo "Updating MIME database... this may take a while." | ||
| 33 | # $D${MIMEDIR}/packages belong to package shared-mime-info-data, | ||
| 34 | # packages like libfm-mime depend on shared-mime-info-data. | ||
| 35 | # after shared-mime-info-data uninstalled, $D${MIMEDIR}/packages | ||
| 36 | # is removed, but update-mime-database need this dir to update | ||
| 37 | # database, workaround to create one and remove it later | ||
| 38 | if [ ! -d $D${MIMEDIR}/packages ]; then | ||
| 39 | mkdir -p $D${MIMEDIR}/packages | ||
| 40 | update-mime-database $D${MIMEDIR} | ||
| 41 | rmdir --ignore-fail-on-non-empty $D${MIMEDIR}/packages | ||
| 42 | else | ||
| 43 | update-mime-database $D${MIMEDIR} | ||
| 44 | fi | ||
| 45 | fi | ||
| 46 | } | ||
| 47 | |||
| 48 | python populate_packages:append () { | ||
| 49 | packages = d.getVar('PACKAGES').split() | ||
| 50 | pkgdest = d.getVar('PKGDEST') | ||
| 51 | mimedir = d.getVar('MIMEDIR') | ||
| 52 | |||
| 53 | for pkg in packages: | ||
| 54 | mime_packages_dir = '%s/%s%s/packages' % (pkgdest, pkg, mimedir) | ||
| 55 | mimes_types_found = False | ||
| 56 | if os.path.exists(mime_packages_dir): | ||
| 57 | for f in os.listdir(mime_packages_dir): | ||
| 58 | if f.endswith('.xml'): | ||
| 59 | mimes_types_found = True | ||
| 60 | break | ||
| 61 | if mimes_types_found: | ||
| 62 | bb.note("adding mime postinst and postrm scripts to %s" % pkg) | ||
| 63 | postinst = d.getVar('pkg_postinst:%s' % pkg) | ||
| 64 | if not postinst: | ||
| 65 | postinst = '#!/bin/sh\n' | ||
| 66 | postinst += d.getVar('mime_postinst') | ||
| 67 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 68 | postrm = d.getVar('pkg_postrm:%s' % pkg) | ||
| 69 | if not postrm: | ||
| 70 | postrm = '#!/bin/sh\n' | ||
| 71 | postrm += d.getVar('mime_postrm') | ||
| 72 | d.setVar('pkg_postrm:%s' % pkg, postrm) | ||
| 73 | if pkg != 'shared-mime-info-data': | ||
| 74 | bb.note("adding shared-mime-info-data dependency to %s" % pkg) | ||
| 75 | d.appendVar('RDEPENDS:' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data") | ||
| 76 | } | ||
diff --git a/meta/classes-recipe/module-base.bbclass b/meta/classes-recipe/module-base.bbclass deleted file mode 100644 index 2a225881ba..0000000000 --- a/meta/classes-recipe/module-base.bbclass +++ /dev/null | |||
| @@ -1,28 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit kernel-arch | ||
| 8 | |||
| 9 | # We do the dependency this way because the output is not preserved | ||
| 10 | # in sstate, so we must force do_compile to run (once). | ||
| 11 | do_configure[depends] += "make-mod-scripts:do_compile" | ||
| 12 | |||
| 13 | export OS = "${TARGET_OS}" | ||
| 14 | export CROSS_COMPILE = "${TARGET_PREFIX}" | ||
| 15 | |||
| 16 | # This points to the build artefacts from the main kernel build | ||
| 17 | # such as .config and System.map | ||
| 18 | # Confusingly it is not the module build output (which is ${B}) but | ||
| 19 | # we didn't pick the name. | ||
| 20 | export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}" | ||
| 21 | |||
| 22 | export KERNEL_VERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}" | ||
| 23 | export LOCALVERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-localversion')}" | ||
| 24 | KERNEL_OBJECT_SUFFIX = ".ko" | ||
| 25 | |||
| 26 | # kernel modules are generally machine specific | ||
| 27 | PACKAGE_ARCH = "${MACHINE_ARCH}" | ||
| 28 | |||
diff --git a/meta/classes-recipe/module.bbclass b/meta/classes-recipe/module.bbclass deleted file mode 100644 index 4948e995c5..0000000000 --- a/meta/classes-recipe/module.bbclass +++ /dev/null | |||
| @@ -1,89 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit module-base kernel-module-split pkgconfig | ||
| 8 | |||
| 9 | EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}" | ||
| 10 | |||
| 11 | MODULES_INSTALL_TARGET ?= "modules_install" | ||
| 12 | MODULES_MODULE_SYMVERS_LOCATION ?= "" | ||
| 13 | |||
| 14 | python __anonymous () { | ||
| 15 | depends = d.getVar('DEPENDS') | ||
| 16 | extra_symbols = [] | ||
| 17 | for dep in depends.split(): | ||
| 18 | if dep.startswith("kernel-module-"): | ||
| 19 | extra_symbols.append("${STAGING_INCDIR}/" + dep + "/Module.symvers") | ||
| 20 | d.setVar('KBUILD_EXTRA_SYMBOLS', " ".join(extra_symbols)) | ||
| 21 | } | ||
| 22 | |||
| 23 | python do_package:prepend () { | ||
| 24 | os.environ['STRIP'] = d.getVar('KERNEL_STRIP') | ||
| 25 | } | ||
| 26 | |||
| 27 | python do_devshell:prepend () { | ||
| 28 | os.environ['CFLAGS'] = '' | ||
| 29 | os.environ['CPPFLAGS'] = '' | ||
| 30 | os.environ['CXXFLAGS'] = '' | ||
| 31 | os.environ['LDFLAGS'] = '' | ||
| 32 | |||
| 33 | os.environ['KERNEL_PATH'] = d.getVar('STAGING_KERNEL_DIR') | ||
| 34 | os.environ['KERNEL_SRC'] = d.getVar('STAGING_KERNEL_DIR') | ||
| 35 | os.environ['KERNEL_VERSION'] = d.getVar('KERNEL_VERSION') | ||
| 36 | os.environ['CC'] = d.getVar('KERNEL_CC') | ||
| 37 | os.environ['LD'] = d.getVar('KERNEL_LD') | ||
| 38 | os.environ['AR'] = d.getVar('KERNEL_AR') | ||
| 39 | os.environ['OBJCOPY'] = d.getVar('KERNEL_OBJCOPY') | ||
| 40 | os.environ['STRIP'] = d.getVar('KERNEL_STRIP') | ||
| 41 | os.environ['O'] = d.getVar('STAGING_KERNEL_BUILDDIR') | ||
| 42 | kbuild_extra_symbols = d.getVar('KBUILD_EXTRA_SYMBOLS') | ||
| 43 | if kbuild_extra_symbols: | ||
| 44 | os.environ['KBUILD_EXTRA_SYMBOLS'] = kbuild_extra_symbols | ||
| 45 | else: | ||
| 46 | os.environ['KBUILD_EXTRA_SYMBOLS'] = '' | ||
| 47 | } | ||
| 48 | |||
| 49 | module_do_compile() { | ||
| 50 | unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS | ||
| 51 | oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \ | ||
| 52 | KERNEL_VERSION=${KERNEL_VERSION} \ | ||
| 53 | CC="${KERNEL_CC}" LD="${KERNEL_LD}" \ | ||
| 54 | AR="${KERNEL_AR}" OBJCOPY="${KERNEL_OBJCOPY}" \ | ||
| 55 | STRIP="${KERNEL_STRIP}" \ | ||
| 56 | O=${STAGING_KERNEL_BUILDDIR} \ | ||
| 57 | KBUILD_EXTRA_SYMBOLS="${KBUILD_EXTRA_SYMBOLS}" \ | ||
| 58 | ${MAKE_TARGETS} | ||
| 59 | } | ||
| 60 | |||
| 61 | module_do_install() { | ||
| 62 | unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS | ||
| 63 | oe_runmake DEPMOD=echo MODLIB="${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}" \ | ||
| 64 | INSTALL_FW_PATH="${D}${nonarch_base_libdir}/firmware" \ | ||
| 65 | CC="${KERNEL_CC}" LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" \ | ||
| 66 | STRIP="${KERNEL_STRIP}" \ | ||
| 67 | O=${STAGING_KERNEL_BUILDDIR} \ | ||
| 68 | KBUILD_EXTRA_SYMBOLS="${KBUILD_EXTRA_SYMBOLS}" \ | ||
| 69 | ${MODULES_INSTALL_TARGET} | ||
| 70 | |||
| 71 | if [ ! -e "${B}/${MODULES_MODULE_SYMVERS_LOCATION}/Module.symvers" ] ; then | ||
| 72 | bbwarn "Module.symvers not found in ${B}/${MODULES_MODULE_SYMVERS_LOCATION}" | ||
| 73 | bbwarn "Please consider setting MODULES_MODULE_SYMVERS_LOCATION to a" | ||
| 74 | bbwarn "directory below B to get correct inter-module dependencies" | ||
| 75 | else | ||
| 76 | install -Dm0644 "${B}/${MODULES_MODULE_SYMVERS_LOCATION}"/Module.symvers ${D}${includedir}/${BPN}/Module.symvers | ||
| 77 | # Module.symvers contains absolute path to the build directory. | ||
| 78 | # While it doesn't actually seem to matter which path is specified, | ||
| 79 | # clear them out to avoid confusion | ||
| 80 | sed -e 's:${B}/::g' -i ${D}${includedir}/${BPN}/Module.symvers | ||
| 81 | fi | ||
| 82 | } | ||
| 83 | |||
| 84 | EXPORT_FUNCTIONS do_compile do_install | ||
| 85 | |||
| 86 | # add all splitted modules to PN RDEPENDS, PN can be empty now | ||
| 87 | KERNEL_MODULES_META_PACKAGE = "${PN}" | ||
| 88 | FILES:${PN} = "" | ||
| 89 | ALLOW_EMPTY:${PN} = "1" | ||
diff --git a/meta/classes-recipe/multilib_header.bbclass b/meta/classes-recipe/multilib_header.bbclass deleted file mode 100644 index 33f7e027f0..0000000000 --- a/meta/classes-recipe/multilib_header.bbclass +++ /dev/null | |||
| @@ -1,58 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit siteinfo | ||
| 8 | |||
| 9 | # If applicable on the architecture, this routine will rename the header and | ||
| 10 | # add a unique identifier to the name for the ABI/bitsize that is being used. | ||
| 11 | # A wrapper will be generated for the architecture that knows how to call | ||
| 12 | # all of the ABI variants for that given architecture. | ||
| 13 | # | ||
| 14 | oe_multilib_header() { | ||
| 15 | |||
| 16 | case ${HOST_OS} in | ||
| 17 | *-musl*) | ||
| 18 | return | ||
| 19 | ;; | ||
| 20 | *) | ||
| 21 | esac | ||
| 22 | # For MIPS: "n32" is a special case, which needs to be | ||
| 23 | # distinct from both 64-bit and 32-bit. | ||
| 24 | case ${TARGET_ARCH} in | ||
| 25 | mips*) case "${MIPSPKGSFX_ABI}" in | ||
| 26 | "-n32") | ||
| 27 | ident=n32 | ||
| 28 | ;; | ||
| 29 | *) | ||
| 30 | ident=${SITEINFO_BITS} | ||
| 31 | ;; | ||
| 32 | esac | ||
| 33 | ;; | ||
| 34 | *) ident=${SITEINFO_BITS} | ||
| 35 | esac | ||
| 36 | for each_header in "$@" ; do | ||
| 37 | if [ ! -f "${D}/${includedir}/$each_header" ]; then | ||
| 38 | bberror "oe_multilib_header: Unable to find header $each_header." | ||
| 39 | continue | ||
| 40 | fi | ||
| 41 | stem=$(echo $each_header | sed 's#\.h$##') | ||
| 42 | # if mips64/n32 set ident to n32 | ||
| 43 | mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h | ||
| 44 | |||
| 45 | sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header | ||
| 46 | done | ||
| 47 | } | ||
| 48 | |||
| 49 | # Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic. | ||
| 50 | # We don't need multilib headers for native builds so brute force things. | ||
| 51 | oe_multilib_header:class-native () { | ||
| 52 | return | ||
| 53 | } | ||
| 54 | |||
| 55 | # Nor do we need multilib headers for nativesdk builds. | ||
| 56 | oe_multilib_header:class-nativesdk () { | ||
| 57 | return | ||
| 58 | } | ||
diff --git a/meta/classes-recipe/multilib_script.bbclass b/meta/classes-recipe/multilib_script.bbclass deleted file mode 100644 index a7a08930b7..0000000000 --- a/meta/classes-recipe/multilib_script.bbclass +++ /dev/null | |||
| @@ -1,39 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # Recipe needs to set MULTILIB_SCRIPTS in the form <pkgname>:<scriptname>, e.g. | ||
| 9 | # MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/file1 ${PN}:${base_bindir}/file2" | ||
| 10 | # to indicate which script files to process from which packages. | ||
| 11 | # | ||
| 12 | |||
| 13 | inherit update-alternatives | ||
| 14 | |||
| 15 | MULTILIB_SUFFIX = "${@d.getVar('base_libdir',1).split('/')[-1]}" | ||
| 16 | |||
| 17 | PACKAGE_PREPROCESS_FUNCS += "multilibscript_rename" | ||
| 18 | |||
| 19 | multilibscript_rename() { | ||
| 20 | : | ||
| 21 | } | ||
| 22 | |||
| 23 | python () { | ||
| 24 | # Do nothing if multilib isn't being used | ||
| 25 | if not d.getVar("MULTILIB_VARIANTS"): | ||
| 26 | return | ||
| 27 | # Do nothing for native/cross | ||
| 28 | if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d): | ||
| 29 | return | ||
| 30 | |||
| 31 | for entry in (d.getVar("MULTILIB_SCRIPTS") or "").split(): | ||
| 32 | pkg, script = entry.split(":", 1) | ||
| 33 | scriptname = os.path.basename(script) | ||
| 34 | d.appendVar("ALTERNATIVE:" + pkg, " " + scriptname + " ") | ||
| 35 | d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script) | ||
| 36 | d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}") | ||
| 37 | d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}") | ||
| 38 | d.appendVar("FILES:" + pkg, " " + script + "-${MULTILIB_SUFFIX}") | ||
| 39 | } | ||
diff --git a/meta/classes-recipe/native.bbclass b/meta/classes-recipe/native.bbclass deleted file mode 100644 index 7d1fe343fa..0000000000 --- a/meta/classes-recipe/native.bbclass +++ /dev/null | |||
| @@ -1,229 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # We want native packages to be relocatable | ||
| 8 | inherit relocatable | ||
| 9 | |||
| 10 | # Native packages are built indirectly via dependency, | ||
| 11 | # no need for them to be a direct target of 'world' | ||
| 12 | EXCLUDE_FROM_WORLD = "1" | ||
| 13 | |||
| 14 | PACKAGE_ARCH = "${BUILD_ARCH}" | ||
| 15 | |||
| 16 | # used by cmake class | ||
| 17 | OECMAKE_RPATH = "${libdir}" | ||
| 18 | OECMAKE_RPATH:class-native = "${libdir}" | ||
| 19 | |||
| 20 | TARGET_ARCH = "${BUILD_ARCH}" | ||
| 21 | TARGET_OS = "${BUILD_OS}" | ||
| 22 | TARGET_VENDOR = "${BUILD_VENDOR}" | ||
| 23 | TARGET_PREFIX = "${BUILD_PREFIX}" | ||
| 24 | TARGET_CC_ARCH = "${BUILD_CC_ARCH}" | ||
| 25 | TARGET_LD_ARCH = "${BUILD_LD_ARCH}" | ||
| 26 | TARGET_AS_ARCH = "${BUILD_AS_ARCH}" | ||
| 27 | TARGET_CPPFLAGS = "${BUILD_CPPFLAGS}" | ||
| 28 | TARGET_CFLAGS = "${BUILD_CFLAGS}" | ||
| 29 | TARGET_CXXFLAGS = "${BUILD_CXXFLAGS}" | ||
| 30 | TARGET_LDFLAGS = "${BUILD_LDFLAGS}" | ||
| 31 | TARGET_FPU = "" | ||
| 32 | TUNE_FEATURES = "" | ||
| 33 | ABIEXTENSION = "" | ||
| 34 | |||
| 35 | HOST_ARCH = "${BUILD_ARCH}" | ||
| 36 | HOST_OS = "${BUILD_OS}" | ||
| 37 | HOST_VENDOR = "${BUILD_VENDOR}" | ||
| 38 | HOST_PREFIX = "${BUILD_PREFIX}" | ||
| 39 | HOST_CC_ARCH = "${BUILD_CC_ARCH}" | ||
| 40 | HOST_LD_ARCH = "${BUILD_LD_ARCH}" | ||
| 41 | HOST_AS_ARCH = "${BUILD_AS_ARCH}" | ||
| 42 | |||
| 43 | STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}" | ||
| 44 | STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}" | ||
| 45 | |||
| 46 | # native pkg doesn't need the TOOLCHAIN_OPTIONS. | ||
| 47 | TOOLCHAIN_OPTIONS = "" | ||
| 48 | |||
| 49 | # Don't build ptest natively | ||
| 50 | PTEST_ENABLED = "0" | ||
| 51 | |||
| 52 | # Don't use site files for native builds | ||
| 53 | export CONFIG_SITE = "${COREBASE}/meta/site/native" | ||
| 54 | |||
| 55 | # set the compiler as well. It could have been set to something else | ||
| 56 | CC = "${BUILD_CC}" | ||
| 57 | CXX = "${BUILD_CXX}" | ||
| 58 | FC = "${BUILD_FC}" | ||
| 59 | CPP = "${BUILD_CPP}" | ||
| 60 | LD = "${BUILD_LD}" | ||
| 61 | CCLD = "${BUILD_CCLD}" | ||
| 62 | AR = "${BUILD_AR}" | ||
| 63 | AS = "${BUILD_AS}" | ||
| 64 | RANLIB = "${BUILD_RANLIB}" | ||
| 65 | STRIP = "${BUILD_STRIP}" | ||
| 66 | NM = "${BUILD_NM}" | ||
| 67 | OBJCOPY = "${BUILD_OBJCOPY}" | ||
| 68 | OBJDUMP = "${BUILD_OBJDUMP}" | ||
| 69 | READELF = "${BUILD_READELF}" | ||
| 70 | |||
| 71 | # Path prefixes | ||
| 72 | base_prefix = "${STAGING_DIR_NATIVE}" | ||
| 73 | prefix = "${STAGING_DIR_NATIVE}${prefix_native}" | ||
| 74 | exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}" | ||
| 75 | |||
| 76 | bindir = "${STAGING_BINDIR_NATIVE}" | ||
| 77 | sbindir = "${STAGING_SBINDIR_NATIVE}" | ||
| 78 | base_libdir = "${STAGING_BASE_LIBDIR_NATIVE}" | ||
| 79 | libdir = "${STAGING_LIBDIR_NATIVE}" | ||
| 80 | includedir = "${STAGING_INCDIR_NATIVE}" | ||
| 81 | sysconfdir = "${STAGING_ETCDIR_NATIVE}" | ||
| 82 | datadir = "${STAGING_DATADIR_NATIVE}" | ||
| 83 | |||
| 84 | baselib = "lib" | ||
| 85 | |||
| 86 | export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64" | ||
| 87 | |||
| 88 | NATIVE_PACKAGE_PATH_SUFFIX ?= "" | ||
| 89 | bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}" | ||
| 90 | sbindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}" | ||
| 91 | base_libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}" | ||
| 92 | libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}" | ||
| 93 | libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}" | ||
| 94 | |||
| 95 | do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/" | ||
| 96 | do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}" | ||
| 97 | |||
| 98 | # Since we actually install these into situ there is no staging prefix | ||
| 99 | STAGING_DIR_HOST = "" | ||
| 100 | STAGING_DIR_TARGET = "" | ||
| 101 | PKG_CONFIG_DIR = "${libdir}/pkgconfig" | ||
| 102 | |||
| 103 | EXTRA_NATIVE_PKGCONFIG_PATH ?= "" | ||
| 104 | PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}" | ||
| 105 | PKG_CONFIG_SYSROOT_DIR = "" | ||
| 106 | PKG_CONFIG_SYSTEM_LIBRARY_PATH[unexport] = "1" | ||
| 107 | PKG_CONFIG_SYSTEM_INCLUDE_PATH[unexport] = "1" | ||
| 108 | |||
| 109 | # we dont want libc-*libc to kick in for native recipes | ||
| 110 | LIBCOVERRIDE = "" | ||
| 111 | CLASSOVERRIDE = "class-native" | ||
| 112 | MACHINEOVERRIDES = "" | ||
| 113 | MACHINE_FEATURES = "" | ||
| 114 | |||
| 115 | PATH:prepend = "${COREBASE}/scripts/native-intercept:" | ||
| 116 | |||
| 117 | # This class encodes staging paths into its scripts data so can only be | ||
| 118 | # reused if we manipulate the paths. | ||
| 119 | SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}" | ||
| 120 | |||
| 121 | # No strip sysroot when DEBUG_BUILD is enabled | ||
| 122 | INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}" | ||
| 123 | |||
| 124 | python native_virtclass_handler () { | ||
| 125 | import re | ||
| 126 | pn = e.data.getVar("PN") | ||
| 127 | if not pn.endswith("-native"): | ||
| 128 | return | ||
| 129 | bpn = e.data.getVar("BPN") | ||
| 130 | |||
| 131 | # Set features here to prevent appends and distro features backfill | ||
| 132 | # from modifying native distro features | ||
| 133 | features = set(d.getVar("DISTRO_FEATURES_NATIVE").split()) | ||
| 134 | filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVE"), d).split()) | ||
| 135 | d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered))) | ||
| 136 | |||
| 137 | classextend = e.data.getVar('BBCLASSEXTEND') or "" | ||
| 138 | if "native" not in classextend: | ||
| 139 | return | ||
| 140 | |||
| 141 | def map_dependencies(varname, d, suffix, selfref=True, regex=False): | ||
| 142 | varname = varname + ":" + suffix | ||
| 143 | # Handle ${PN}-xxx -> ${BPN}-xxx-native | ||
| 144 | if suffix != "${PN}" and "${PN}" in suffix: | ||
| 145 | output_varname = varname.replace("${PN}", "${BPN}") + "-native" | ||
| 146 | d.renameVar(varname, output_varname) | ||
| 147 | |||
| 148 | d.setVarFilter("DEPENDS", "native_filter(val, '" + pn + "', '" + bpn + "', selfref=False)") | ||
| 149 | |||
| 150 | for varname in ["RDEPENDS", "RRECOMMENDS", "RSUGGESTS", "RPROVIDES", "RREPLACES"]: | ||
| 151 | d.setVarFilter(varname, "native_filter(val, '" + pn + "', '" + bpn + "')") | ||
| 152 | |||
| 153 | # We need to handle things like ${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)} | ||
| 154 | # and not pass ${PN}-test since in the native case it would be ignored. This does mean we ignore | ||
| 155 | # anonymous python derived PACKAGES entries. | ||
| 156 | for pkg in re.split(r"\${@(?:{.*?}|.)+?}|\s", d.getVar("PACKAGES", False)): | ||
| 157 | if not pkg: | ||
| 158 | continue | ||
| 159 | map_dependencies("RDEPENDS", e.data, pkg) | ||
| 160 | map_dependencies("RRECOMMENDS", e.data, pkg) | ||
| 161 | map_dependencies("RSUGGESTS", e.data, pkg) | ||
| 162 | map_dependencies("RPROVIDES", e.data, pkg) | ||
| 163 | map_dependencies("RREPLACES", e.data, pkg) | ||
| 164 | |||
| 165 | d.setVarFilter("PACKAGES", "native_filter(val, '" + pn + "', '" + bpn + "')") | ||
| 166 | d.setVarFilter("PACKAGES_DYNAMIC", "native_filter(val, '" + pn + "', '" + bpn + "', regex=True)") | ||
| 167 | |||
| 168 | provides = e.data.getVar("PROVIDES") | ||
| 169 | nprovides = [] | ||
| 170 | for prov in provides.split(): | ||
| 171 | if prov.find(pn) != -1: | ||
| 172 | nprovides.append(prov) | ||
| 173 | elif not prov.endswith("-native"): | ||
| 174 | nprovides.append(prov + "-native") | ||
| 175 | else: | ||
| 176 | nprovides.append(prov) | ||
| 177 | e.data.setVar("PROVIDES", ' '.join(nprovides)) | ||
| 178 | |||
| 179 | |||
| 180 | } | ||
| 181 | |||
| 182 | addhandler native_virtclass_handler | ||
| 183 | native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise" | ||
| 184 | |||
| 185 | python do_addto_recipe_sysroot () { | ||
| 186 | bb.build.exec_func("extend_recipe_sysroot", d) | ||
| 187 | } | ||
| 188 | addtask addto_recipe_sysroot after do_populate_sysroot | ||
| 189 | do_addto_recipe_sysroot[deptask] = "do_populate_sysroot" | ||
| 190 | |||
| 191 | inherit nopackages | ||
| 192 | |||
| 193 | do_packagedata[stamp-extra-info] = "" | ||
| 194 | |||
| 195 | USE_NLS = "no" | ||
| 196 | |||
| 197 | RECIPERDEPTASK = "do_populate_sysroot" | ||
| 198 | do_populate_sysroot[rdeptask] = "${RECIPERDEPTASK}" | ||
| 199 | |||
| 200 | # | ||
| 201 | # Native task outputs are directly run on the target (host) system after being | ||
| 202 | # built. Even if the output of this recipe doesn't change, a change in one of | ||
| 203 | # its dependencies may cause a change in the output it generates (e.g. rpm | ||
| 204 | # output depends on the output of its dependent zstd library). | ||
| 205 | # | ||
| 206 | # This can cause poor interactions with hash equivalence, since this recipes | ||
| 207 | # output-changing dependency is "hidden" and downstream task only see that this | ||
| 208 | # recipe has the same outhash and therefore is equivalent. This can result in | ||
| 209 | # different output in different cases. | ||
| 210 | # | ||
| 211 | # To resolve this, unhide the output-changing dependency by adding its unihash | ||
| 212 | # to this tasks outhash calculation. Unfortunately, don't know specifically | ||
| 213 | # know which dependencies are output-changing, so we have to add all of them. | ||
| 214 | # | ||
| 215 | python native_add_do_populate_sysroot_deps () { | ||
| 216 | current_task = "do_" + d.getVar("BB_CURRENTTASK") | ||
| 217 | if current_task != "do_populate_sysroot": | ||
| 218 | return | ||
| 219 | |||
| 220 | taskdepdata = d.getVar("BB_TASKDEPDATA", False) | ||
| 221 | pn = d.getVar("PN") | ||
| 222 | deps = { | ||
| 223 | dep[0]:dep[6] for dep in taskdepdata.values() if | ||
| 224 | dep[1] == current_task and dep[0] != pn | ||
| 225 | } | ||
| 226 | |||
| 227 | d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys()))) | ||
| 228 | } | ||
| 229 | SSTATECREATEFUNCS += "native_add_do_populate_sysroot_deps" | ||
diff --git a/meta/classes-recipe/nativesdk.bbclass b/meta/classes-recipe/nativesdk.bbclass deleted file mode 100644 index 9838d5a54b..0000000000 --- a/meta/classes-recipe/nativesdk.bbclass +++ /dev/null | |||
| @@ -1,123 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # SDK packages are built either explicitly by the user, | ||
| 8 | # or indirectly via dependency. No need to be in 'world'. | ||
| 9 | EXCLUDE_FROM_WORLD = "1" | ||
| 10 | |||
| 11 | STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}" | ||
| 12 | |||
| 13 | # libc for the SDK can be different to that of the target | ||
| 14 | NATIVESDKLIBC ?= "libc-glibc" | ||
| 15 | LIBCOVERRIDE = ":${NATIVESDKLIBC}" | ||
| 16 | CLASSOVERRIDE = "class-nativesdk" | ||
| 17 | MACHINEOVERRIDES = "" | ||
| 18 | |||
| 19 | MACHINE_FEATURES = "${SDK_MACHINE_FEATURES}" | ||
| 20 | DISTRO_FEATURES_BACKFILL = "" | ||
| 21 | MACHINE_FEATURES_BACKFILL = "" | ||
| 22 | |||
| 23 | MULTILIBS = "" | ||
| 24 | |||
| 25 | # we need consistent staging dir whether or not multilib is enabled | ||
| 26 | STAGING_DIR_HOST = "${WORKDIR}/recipe-sysroot" | ||
| 27 | STAGING_DIR_TARGET = "${WORKDIR}/recipe-sysroot" | ||
| 28 | RECIPE_SYSROOT = "${WORKDIR}/recipe-sysroot" | ||
| 29 | |||
| 30 | # | ||
| 31 | # Update PACKAGE_ARCH and PACKAGE_ARCHS | ||
| 32 | # | ||
| 33 | PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}" | ||
| 34 | PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}" | ||
| 35 | TUNE_PKGARCH = "${SDK_ARCH}" | ||
| 36 | |||
| 37 | # | ||
| 38 | # We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit | ||
| 39 | # binaries | ||
| 40 | # | ||
| 41 | DEPENDS:append = " chrpath-replacement-native" | ||
| 42 | EXTRANATIVEPATH += "chrpath-native" | ||
| 43 | |||
| 44 | PKGDATA_DIR = "${PKGDATA_DIR_SDK}" | ||
| 45 | |||
| 46 | HOST_ARCH = "${SDK_ARCH}" | ||
| 47 | HOST_VENDOR = "${SDK_VENDOR}" | ||
| 48 | HOST_OS = "${SDK_OS}" | ||
| 49 | HOST_PREFIX = "${SDK_PREFIX}" | ||
| 50 | HOST_CC_ARCH = "${SDK_CC_ARCH}" | ||
| 51 | HOST_LD_ARCH = "${SDK_LD_ARCH}" | ||
| 52 | HOST_AS_ARCH = "${SDK_AS_ARCH}" | ||
| 53 | #HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}" | ||
| 54 | |||
| 55 | TARGET_ARCH = "${SDK_ARCH}" | ||
| 56 | TARGET_VENDOR = "${SDK_VENDOR}" | ||
| 57 | TARGET_OS = "${SDK_OS}" | ||
| 58 | TARGET_PREFIX = "${SDK_PREFIX}" | ||
| 59 | TARGET_CC_ARCH = "${SDK_CC_ARCH}" | ||
| 60 | TARGET_LD_ARCH = "${SDK_LD_ARCH}" | ||
| 61 | TARGET_AS_ARCH = "${SDK_AS_ARCH}" | ||
| 62 | TARGET_CPPFLAGS = "${BUILDSDK_CPPFLAGS}" | ||
| 63 | TARGET_CFLAGS = "${BUILDSDK_CFLAGS}" | ||
| 64 | TARGET_CXXFLAGS = "${BUILDSDK_CXXFLAGS}" | ||
| 65 | TARGET_LDFLAGS = "${BUILDSDK_LDFLAGS}" | ||
| 66 | TARGET_FPU = "" | ||
| 67 | EXTRA_OECONF_GCC_FLOAT = "" | ||
| 68 | TUNE_FEATURES = "" | ||
| 69 | |||
| 70 | # Change to place files in SDKPATH | ||
| 71 | base_prefix = "${SDKPATHNATIVE}" | ||
| 72 | prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" | ||
| 73 | exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" | ||
| 74 | baselib = "lib" | ||
| 75 | sbindir = "${bindir}" | ||
| 76 | |||
| 77 | export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig" | ||
| 78 | export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}" | ||
| 79 | |||
| 80 | python nativesdk_virtclass_handler () { | ||
| 81 | pn = e.data.getVar("PN") | ||
| 82 | if not (pn.endswith("-nativesdk") or pn.startswith("nativesdk-")): | ||
| 83 | return | ||
| 84 | |||
| 85 | # Set features here to prevent appends and distro features backfill | ||
| 86 | # from modifying nativesdk distro features | ||
| 87 | features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split()) | ||
| 88 | filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split()) | ||
| 89 | d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered))) | ||
| 90 | |||
| 91 | e.data.setVar("MLPREFIX", "nativesdk-") | ||
| 92 | e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN").replace("-nativesdk", "").replace("nativesdk-", "")) | ||
| 93 | } | ||
| 94 | |||
| 95 | python () { | ||
| 96 | pn = d.getVar("PN") | ||
| 97 | if not pn.startswith("nativesdk-"): | ||
| 98 | return | ||
| 99 | |||
| 100 | import oe.classextend | ||
| 101 | |||
| 102 | clsextend = oe.classextend.ClassExtender("nativesdk", [], d) | ||
| 103 | clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split()) | ||
| 104 | |||
| 105 | clsextend.set_filter("DEPENDS", deps=True) | ||
| 106 | clsextend.set_filter("PACKAGE_WRITE_DEPS", deps=False) | ||
| 107 | clsextend.map_packagevars() | ||
| 108 | clsextend.set_filter("PROVIDES", deps=False) | ||
| 109 | |||
| 110 | d.setVar("LIBCEXTENSION", "") | ||
| 111 | d.setVar("ABIEXTENSION", "") | ||
| 112 | } | ||
| 113 | |||
| 114 | addhandler nativesdk_virtclass_handler | ||
| 115 | nativesdk_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise" | ||
| 116 | |||
| 117 | do_packagedata[stamp-extra-info] = "" | ||
| 118 | |||
| 119 | USE_NLS = "${SDKUSE_NLS}" | ||
| 120 | |||
| 121 | OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}" | ||
| 122 | |||
| 123 | PATH:prepend = "${COREBASE}/scripts/nativesdk-intercept:" | ||
diff --git a/meta/classes-recipe/nopackages.bbclass b/meta/classes-recipe/nopackages.bbclass deleted file mode 100644 index 9ea7273530..0000000000 --- a/meta/classes-recipe/nopackages.bbclass +++ /dev/null | |||
| @@ -1,19 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | deltask do_package | ||
| 8 | deltask do_package_write_rpm | ||
| 9 | deltask do_package_write_ipk | ||
| 10 | deltask do_package_write_deb | ||
| 11 | deltask do_package_write_tar | ||
| 12 | deltask do_package_qa | ||
| 13 | deltask do_packagedata | ||
| 14 | deltask do_package_setscene | ||
| 15 | deltask do_package_write_rpm_setscene | ||
| 16 | deltask do_package_write_ipk_setscene | ||
| 17 | deltask do_package_write_deb_setscene | ||
| 18 | deltask do_package_qa_setscene | ||
| 19 | deltask do_packagedata_setscene | ||
diff --git a/meta/classes-recipe/nospdx.bbclass b/meta/classes-recipe/nospdx.bbclass deleted file mode 100644 index b20e28218b..0000000000 --- a/meta/classes-recipe/nospdx.bbclass +++ /dev/null | |||
| @@ -1,13 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | deltask do_collect_spdx_deps | ||
| 8 | deltask do_create_spdx | ||
| 9 | deltask do_create_spdx_runtime | ||
| 10 | deltask do_create_package_spdx | ||
| 11 | deltask do_create_rootfs_spdx | ||
| 12 | deltask do_create_image_spdx | ||
| 13 | deltask do_create_image_sbom | ||
diff --git a/meta/classes-recipe/npm.bbclass b/meta/classes-recipe/npm.bbclass deleted file mode 100644 index 344e8b4bec..0000000000 --- a/meta/classes-recipe/npm.bbclass +++ /dev/null | |||
| @@ -1,357 +0,0 @@ | |||
| 1 | # Copyright (C) 2020 Savoir-Faire Linux | ||
| 2 | # | ||
| 3 | # SPDX-License-Identifier: GPL-2.0-only | ||
| 4 | # | ||
| 5 | # This bbclass builds and installs an npm package to the target. The package | ||
| 6 | # sources files should be fetched in the calling recipe by using the SRC_URI | ||
| 7 | # variable. The ${S} variable should be updated depending of your fetcher. | ||
| 8 | # | ||
| 9 | # Usage: | ||
| 10 | # SRC_URI = "..." | ||
| 11 | # inherit npm | ||
| 12 | # | ||
| 13 | # Optional variables: | ||
| 14 | # NPM_ARCH: | ||
| 15 | # Override the auto generated npm architecture. | ||
| 16 | # | ||
| 17 | # NPM_INSTALL_DEV: | ||
| 18 | # Set to 1 to also install devDependencies. | ||
| 19 | |||
| 20 | inherit python3native | ||
| 21 | |||
| 22 | DEPENDS:prepend = "nodejs-native nodejs-oe-cache-native " | ||
| 23 | RDEPENDS:${PN}:append:class-target = " nodejs" | ||
| 24 | |||
| 25 | EXTRA_OENPM = "" | ||
| 26 | |||
| 27 | NPM_INSTALL_DEV ?= "0" | ||
| 28 | |||
| 29 | NPM_NODEDIR ?= "${RECIPE_SYSROOT_NATIVE}${prefix_native}" | ||
| 30 | |||
| 31 | ## must match mapping in nodejs.bb (openembedded-meta) | ||
| 32 | def map_nodejs_arch(a, d): | ||
| 33 | import re | ||
| 34 | |||
| 35 | if re.match('i.86$', a): return 'ia32' | ||
| 36 | elif re.match('x86_64$', a): return 'x64' | ||
| 37 | elif re.match('aarch64$', a): return 'arm64' | ||
| 38 | elif re.match('(powerpc64|powerpc64le|ppc64le)$', a): return 'ppc64' | ||
| 39 | elif re.match('powerpc$', a): return 'ppc' | ||
| 40 | return a | ||
| 41 | |||
| 42 | NPM_ARCH ?= "${@map_nodejs_arch(d.getVar("TARGET_ARCH"), d)}" | ||
| 43 | |||
| 44 | NPM_PACKAGE = "${WORKDIR}/npm-package" | ||
| 45 | NPM_CACHE = "${WORKDIR}/npm-cache" | ||
| 46 | NPM_BUILD = "${WORKDIR}/npm-build" | ||
| 47 | NPM_REGISTRY = "${WORKDIR}/npm-registry" | ||
| 48 | |||
| 49 | def npm_global_configs(d): | ||
| 50 | """Get the npm global configuration""" | ||
| 51 | configs = [] | ||
| 52 | # Ensure no network access is done | ||
| 53 | configs.append(("offline", "true")) | ||
| 54 | configs.append(("proxy", "http://invalid")) | ||
| 55 | configs.append(("fund", False)) | ||
| 56 | configs.append(("audit", False)) | ||
| 57 | # Configure the cache directory | ||
| 58 | configs.append(("cache", d.getVar("NPM_CACHE"))) | ||
| 59 | return configs | ||
| 60 | |||
| 61 | ## 'npm pack' runs 'prepare' and 'prepack' scripts. Support for | ||
| 62 | ## 'ignore-scripts' which prevents this behavior has been removed | ||
| 63 | ## from nodejs 16. Use simple 'tar' instead of. | ||
| 64 | def npm_pack(env, srcdir, workdir): | ||
| 65 | """Emulate 'npm pack' on a specified directory""" | ||
| 66 | import subprocess | ||
| 67 | import os | ||
| 68 | import json | ||
| 69 | |||
| 70 | src = os.path.join(srcdir, 'package.json') | ||
| 71 | with open(src) as f: | ||
| 72 | j = json.load(f) | ||
| 73 | |||
| 74 | # base does not really matter and is for documentation purposes | ||
| 75 | # only. But the 'version' part must exist because other parts of | ||
| 76 | # the bbclass rely on it. | ||
| 77 | if 'version' not in j: | ||
| 78 | j['version'] = '0.0.0-unknown' | ||
| 79 | base = j['name'].split('/')[-1] | ||
| 80 | tarball = os.path.join(workdir, "%s-%s.tgz" % (base, j['version'])); | ||
| 81 | |||
| 82 | # TODO: real 'npm pack' does not include directories while 'tar' | ||
| 83 | # does. But this does not seem to matter... | ||
| 84 | subprocess.run(['tar', 'czf', tarball, | ||
| 85 | '--exclude', './node-modules', | ||
| 86 | '--exclude-vcs', | ||
| 87 | '--transform', r's,^\./,package/,', | ||
| 88 | '--mtime', '1985-10-26T08:15:00.000Z', | ||
| 89 | '.'], | ||
| 90 | check = True, cwd = srcdir) | ||
| 91 | |||
| 92 | return (tarball, j) | ||
| 93 | |||
| 94 | python npm_do_configure() { | ||
| 95 | """ | ||
| 96 | Step one: configure the npm cache and the main npm package | ||
| 97 | |||
| 98 | Every dependencies have been fetched and patched in the source directory. | ||
| 99 | They have to be packed (this remove unneeded files) and added to the npm | ||
| 100 | cache to be available for the next step. | ||
| 101 | |||
| 102 | The main package and its associated manifest file and shrinkwrap file have | ||
| 103 | to be configured to take into account these cached dependencies. | ||
| 104 | """ | ||
| 105 | import base64 | ||
| 106 | import copy | ||
| 107 | import json | ||
| 108 | import re | ||
| 109 | import shlex | ||
| 110 | import stat | ||
| 111 | import tempfile | ||
| 112 | from bb.fetch2.npm import NpmEnvironment | ||
| 113 | from bb.fetch2.npm import npm_unpack | ||
| 114 | from bb.fetch2.npm import npm_package | ||
| 115 | from bb.fetch2.npmsw import foreach_dependencies | ||
| 116 | from bb.progress import OutOfProgressHandler | ||
| 117 | from oe.npm_registry import NpmRegistry | ||
| 118 | |||
| 119 | bb.utils.remove(d.getVar("NPM_CACHE"), recurse=True) | ||
| 120 | bb.utils.remove(d.getVar("NPM_PACKAGE"), recurse=True) | ||
| 121 | |||
| 122 | env = NpmEnvironment(d, configs=npm_global_configs(d)) | ||
| 123 | registry = NpmRegistry(d.getVar('NPM_REGISTRY'), d.getVar('NPM_CACHE')) | ||
| 124 | |||
| 125 | def _npm_cache_add(tarball, pkg): | ||
| 126 | """Add tarball to local registry and register it in the | ||
| 127 | cache""" | ||
| 128 | registry.add_pkg(tarball, pkg) | ||
| 129 | |||
| 130 | def _npm_integrity(tarball): | ||
| 131 | """Return the npm integrity of a specified tarball""" | ||
| 132 | sha512 = bb.utils.sha512_file(tarball) | ||
| 133 | return "sha512-" + base64.b64encode(bytes.fromhex(sha512)).decode() | ||
| 134 | |||
| 135 | # Manage the manifest file and shrinkwrap files | ||
| 136 | orig_manifest_file = d.expand("${S}/package.json") | ||
| 137 | orig_shrinkwrap_file = d.expand("${S}/npm-shrinkwrap.json") | ||
| 138 | cached_manifest_file = d.expand("${NPM_PACKAGE}/package.json") | ||
| 139 | cached_shrinkwrap_file = d.expand("${NPM_PACKAGE}/npm-shrinkwrap.json") | ||
| 140 | |||
| 141 | with open(orig_manifest_file, "r") as f: | ||
| 142 | orig_manifest = json.load(f) | ||
| 143 | |||
| 144 | cached_manifest = copy.deepcopy(orig_manifest) | ||
| 145 | cached_manifest.pop("dependencies", None) | ||
| 146 | cached_manifest.pop("devDependencies", None) | ||
| 147 | |||
| 148 | has_shrinkwrap_file = True | ||
| 149 | |||
| 150 | try: | ||
| 151 | with open(orig_shrinkwrap_file, "r") as f: | ||
| 152 | orig_shrinkwrap = json.load(f) | ||
| 153 | except IOError: | ||
| 154 | has_shrinkwrap_file = False | ||
| 155 | |||
| 156 | if has_shrinkwrap_file: | ||
| 157 | if int(orig_shrinkwrap.get("lockfileVersion", 0)) < 2: | ||
| 158 | bb.fatal("%s: lockfileVersion version 2 or later is required" % orig_shrinkwrap_file) | ||
| 159 | |||
| 160 | cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap) | ||
| 161 | for package in orig_shrinkwrap["packages"]: | ||
| 162 | if package != "": | ||
| 163 | cached_shrinkwrap["packages"].pop(package, None) | ||
| 164 | cached_shrinkwrap["packages"][""].pop("dependencies", None) | ||
| 165 | cached_shrinkwrap["packages"][""].pop("devDependencies", None) | ||
| 166 | cached_shrinkwrap["packages"][""].pop("peerDependencies", None) | ||
| 167 | |||
| 168 | # Manage the dependencies | ||
| 169 | progress = OutOfProgressHandler(d, r"^(\d+)/(\d+)$") | ||
| 170 | progress_total = 1 # also count the main package | ||
| 171 | progress_done = 0 | ||
| 172 | |||
| 173 | def _count_dependency(name, params, destsuffix): | ||
| 174 | nonlocal progress_total | ||
| 175 | progress_total += 1 | ||
| 176 | |||
| 177 | def _cache_dependency(name, params, destsuffix): | ||
| 178 | with tempfile.TemporaryDirectory() as tmpdir: | ||
| 179 | # Add the dependency to the npm cache | ||
| 180 | destdir = os.path.join(d.getVar("S"), destsuffix) | ||
| 181 | (tarball, pkg) = npm_pack(env, destdir, tmpdir) | ||
| 182 | _npm_cache_add(tarball, pkg) | ||
| 183 | # Add its signature to the cached shrinkwrap | ||
| 184 | dep = params | ||
| 185 | dep["version"] = pkg['version'] | ||
| 186 | dep["integrity"] = _npm_integrity(tarball) | ||
| 187 | if params.get("dev", False): | ||
| 188 | dep["dev"] = True | ||
| 189 | if "devDependencies" not in cached_shrinkwrap["packages"][""]: | ||
| 190 | cached_shrinkwrap["packages"][""]["devDependencies"] = {} | ||
| 191 | cached_shrinkwrap["packages"][""]["devDependencies"][name] = pkg['version'] | ||
| 192 | |||
| 193 | else: | ||
| 194 | if "dependencies" not in cached_shrinkwrap["packages"][""]: | ||
| 195 | cached_shrinkwrap["packages"][""]["dependencies"] = {} | ||
| 196 | cached_shrinkwrap["packages"][""]["dependencies"][name] = pkg['version'] | ||
| 197 | |||
| 198 | cached_shrinkwrap["packages"][destsuffix] = dep | ||
| 199 | # Display progress | ||
| 200 | nonlocal progress_done | ||
| 201 | progress_done += 1 | ||
| 202 | progress.write("%d/%d" % (progress_done, progress_total)) | ||
| 203 | |||
| 204 | dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False) | ||
| 205 | |||
| 206 | if has_shrinkwrap_file: | ||
| 207 | foreach_dependencies(orig_shrinkwrap, _count_dependency, dev) | ||
| 208 | foreach_dependencies(orig_shrinkwrap, _cache_dependency, dev) | ||
| 209 | |||
| 210 | # Manage Peer Dependencies | ||
| 211 | if has_shrinkwrap_file: | ||
| 212 | packages = orig_shrinkwrap.get("packages", {}) | ||
| 213 | peer_deps = packages.get("", {}).get("peerDependencies", {}) | ||
| 214 | package_runtime_dependencies = d.getVar("RDEPENDS:%s" % d.getVar("PN")) | ||
| 215 | |||
| 216 | for peer_dep in peer_deps: | ||
| 217 | peer_dep_yocto_name = npm_package(peer_dep) | ||
| 218 | if peer_dep_yocto_name not in package_runtime_dependencies: | ||
| 219 | bb.warn(peer_dep + " is a peer dependencie that is not in RDEPENDS variable. " + | ||
| 220 | "Please add this peer dependencie to the RDEPENDS variable as %s and generate its recipe with devtool" | ||
| 221 | % peer_dep_yocto_name) | ||
| 222 | |||
| 223 | # Configure the main package | ||
| 224 | with tempfile.TemporaryDirectory() as tmpdir: | ||
| 225 | (tarball, _) = npm_pack(env, d.getVar("S"), tmpdir) | ||
| 226 | npm_unpack(tarball, d.getVar("NPM_PACKAGE"), d) | ||
| 227 | |||
| 228 | # Configure the cached manifest file and cached shrinkwrap file | ||
| 229 | def _update_manifest(depkey): | ||
| 230 | for name in orig_manifest.get(depkey, {}): | ||
| 231 | version = cached_shrinkwrap["packages"][""][depkey][name] | ||
| 232 | if depkey not in cached_manifest: | ||
| 233 | cached_manifest[depkey] = {} | ||
| 234 | cached_manifest[depkey][name] = version | ||
| 235 | |||
| 236 | if has_shrinkwrap_file: | ||
| 237 | _update_manifest("dependencies") | ||
| 238 | |||
| 239 | if dev: | ||
| 240 | if has_shrinkwrap_file: | ||
| 241 | _update_manifest("devDependencies") | ||
| 242 | |||
| 243 | os.chmod(cached_manifest_file, os.stat(cached_manifest_file).st_mode | stat.S_IWUSR) | ||
| 244 | with open(cached_manifest_file, "w") as f: | ||
| 245 | json.dump(cached_manifest, f, indent=2) | ||
| 246 | |||
| 247 | if has_shrinkwrap_file: | ||
| 248 | with open(cached_shrinkwrap_file, "w") as f: | ||
| 249 | json.dump(cached_shrinkwrap, f, indent=2) | ||
| 250 | } | ||
| 251 | |||
| 252 | python npm_do_compile() { | ||
| 253 | """ | ||
| 254 | Step two: install the npm package | ||
| 255 | |||
| 256 | Use the configured main package and the cached dependencies to run the | ||
| 257 | installation process. The installation is done in a directory which is | ||
| 258 | not the destination directory yet. | ||
| 259 | |||
| 260 | A combination of 'npm pack' and 'npm install' is used to ensure that the | ||
| 261 | installed files are actual copies instead of symbolic links (which is the | ||
| 262 | default npm behavior). | ||
| 263 | """ | ||
| 264 | import shlex | ||
| 265 | import tempfile | ||
| 266 | from bb.fetch2.npm import NpmEnvironment | ||
| 267 | |||
| 268 | bb.utils.remove(d.getVar("NPM_BUILD"), recurse=True) | ||
| 269 | |||
| 270 | with tempfile.TemporaryDirectory() as tmpdir: | ||
| 271 | args = [] | ||
| 272 | configs = npm_global_configs(d) | ||
| 273 | |||
| 274 | if bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False): | ||
| 275 | configs.append(("also", "development")) | ||
| 276 | else: | ||
| 277 | configs.append(("only", "production")) | ||
| 278 | |||
| 279 | # Report as many logs as possible for debugging purpose | ||
| 280 | configs.append(("loglevel", "silly")) | ||
| 281 | |||
| 282 | # Configure the installation to be done globally in the build directory | ||
| 283 | configs.append(("global", "true")) | ||
| 284 | configs.append(("prefix", d.getVar("NPM_BUILD"))) | ||
| 285 | |||
| 286 | # Add node-gyp configuration | ||
| 287 | configs.append(("arch", d.getVar("NPM_ARCH"))) | ||
| 288 | configs.append(("release", "true")) | ||
| 289 | configs.append(("nodedir", d.getVar("NPM_NODEDIR"))) | ||
| 290 | configs.append(("python", d.getVar("PYTHON"))) | ||
| 291 | |||
| 292 | env = NpmEnvironment(d, configs) | ||
| 293 | |||
| 294 | # Add node-pre-gyp configuration | ||
| 295 | args.append(("target_arch", d.getVar("NPM_ARCH"))) | ||
| 296 | args.append(("build-from-source", "true")) | ||
| 297 | |||
| 298 | # Don't install peer dependencies as they should be in RDEPENDS variable | ||
| 299 | args.append(("legacy-peer-deps", "true")) | ||
| 300 | |||
| 301 | # Pack and install the main package | ||
| 302 | (tarball, _) = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir) | ||
| 303 | cmd = "npm install %s %s" % (shlex.quote(tarball), d.getVar("EXTRA_OENPM")) | ||
| 304 | env.run(cmd, args=args) | ||
| 305 | } | ||
| 306 | |||
| 307 | npm_do_install() { | ||
| 308 | # Step three: final install | ||
| 309 | # | ||
| 310 | # The previous installation have to be filtered to remove some extra files. | ||
| 311 | |||
| 312 | rm -rf ${D} | ||
| 313 | |||
| 314 | # Copy the entire lib and bin directories | ||
| 315 | install -d ${D}/${nonarch_libdir} | ||
| 316 | cp --no-preserve=ownership --recursive ${NPM_BUILD}/lib/. ${D}/${nonarch_libdir} | ||
| 317 | |||
| 318 | if [ -d "${NPM_BUILD}/bin" ] | ||
| 319 | then | ||
| 320 | install -d ${D}/${bindir} | ||
| 321 | cp --no-preserve=ownership --recursive ${NPM_BUILD}/bin/. ${D}/${bindir} | ||
| 322 | fi | ||
| 323 | |||
| 324 | # If the package (or its dependencies) uses node-gyp to build native addons, | ||
| 325 | # object files, static libraries or other temporary files can be hidden in | ||
| 326 | # the lib directory. To reduce the package size and to avoid QA issues | ||
| 327 | # (staticdev with static library files) these files must be removed. | ||
| 328 | local GYP_REGEX=".*/build/Release/[^/]*.node" | ||
| 329 | |||
| 330 | # Remove any node-gyp directory in ${D} to remove temporary build files | ||
| 331 | for GYP_D_FILE in $(find ${D} -regex "${GYP_REGEX}") | ||
| 332 | do | ||
| 333 | local GYP_D_DIR=${GYP_D_FILE%/Release/*} | ||
| 334 | |||
| 335 | rm --recursive --force ${GYP_D_DIR} | ||
| 336 | done | ||
| 337 | |||
| 338 | # Copy only the node-gyp release files | ||
| 339 | for GYP_B_FILE in $(find ${NPM_BUILD} -regex "${GYP_REGEX}") | ||
| 340 | do | ||
| 341 | local GYP_D_FILE=${D}/${prefix}/${GYP_B_FILE#${NPM_BUILD}} | ||
| 342 | |||
| 343 | install -d ${GYP_D_FILE%/*} | ||
| 344 | install -m 755 ${GYP_B_FILE} ${GYP_D_FILE} | ||
| 345 | done | ||
| 346 | |||
| 347 | # Remove the shrinkwrap file which does not need to be packed | ||
| 348 | rm -f ${D}/${nonarch_libdir}/node_modules/*/npm-shrinkwrap.json | ||
| 349 | rm -f ${D}/${nonarch_libdir}/node_modules/@*/*/npm-shrinkwrap.json | ||
| 350 | } | ||
| 351 | |||
| 352 | FILES:${PN} += " \ | ||
| 353 | ${bindir} \ | ||
| 354 | ${nonarch_libdir} \ | ||
| 355 | " | ||
| 356 | |||
| 357 | EXPORT_FUNCTIONS do_configure do_compile do_install | ||
diff --git a/meta/classes-recipe/overlayfs-etc.bbclass b/meta/classes-recipe/overlayfs-etc.bbclass deleted file mode 100644 index d339fbbeee..0000000000 --- a/meta/classes-recipe/overlayfs-etc.bbclass +++ /dev/null | |||
| @@ -1,88 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Class for setting up /etc in overlayfs | ||
| 8 | # | ||
| 9 | # In order to have /etc directory in overlayfs a special handling at early boot stage is required | ||
| 10 | # The idea is to supply a custom init script that mounts /etc before launching actual init program, | ||
| 11 | # because the latter already requires /etc to be mounted | ||
| 12 | # | ||
| 13 | # The configuration must be machine specific. You should at least set these three variables: | ||
| 14 | # OVERLAYFS_ETC_MOUNT_POINT ?= "/data" | ||
| 15 | # OVERLAYFS_ETC_FSTYPE ?= "ext4" | ||
| 16 | # OVERLAYFS_ETC_DEVICE ?= "/dev/mmcblk0p2" | ||
| 17 | # | ||
| 18 | # To control more mount options you should consider setting mount options: | ||
| 19 | # OVERLAYFS_ETC_MOUNT_OPTIONS ?= "defaults" | ||
| 20 | # | ||
| 21 | # The class provides two options for /sbin/init generation | ||
| 22 | # 1. Default option is to rename original /sbin/init to /sbin/init.orig and place generated init under | ||
| 23 | # original name, i.e. /sbin/init. It has an advantage that you won't need to change any kernel | ||
| 24 | # parameters in order to make it work, but it poses a restriction that package-management can't | ||
| 25 | # be used, becaause updating init manager would remove generated script | ||
| 26 | # 2. If you are would like to keep original init as is, you can set | ||
| 27 | # OVERLAYFS_ETC_USE_ORIG_INIT_NAME = "0" | ||
| 28 | # Then generated init will be named /sbin/preinit and you would need to extend you kernel parameters | ||
| 29 | # manually in your bootloader configuration. | ||
| 30 | # | ||
| 31 | # Regardless which mode you choose, update and migration strategy of configuration files under /etc | ||
| 32 | # overlay is out of scope of this class | ||
| 33 | |||
| 34 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "overlayfs-etc", "create_overlayfs_etc_preinit", "", d)}' | ||
| 35 | IMAGE_FEATURES_CONFLICTS_overlayfs-etc = "${@ 'package-management' if bb.utils.to_boolean(d.getVar('OVERLAYFS_ETC_USE_ORIG_INIT_NAME'), True) else ''}" | ||
| 36 | |||
| 37 | OVERLAYFS_ETC_MOUNT_POINT ??= "" | ||
| 38 | OVERLAYFS_ETC_FSTYPE ??= "" | ||
| 39 | OVERLAYFS_ETC_DEVICE ??= "" | ||
| 40 | OVERLAYFS_ETC_USE_ORIG_INIT_NAME ??= "1" | ||
| 41 | OVERLAYFS_ETC_MOUNT_OPTIONS ??= "defaults" | ||
| 42 | OVERLAYFS_ETC_INIT_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-etc-preinit.sh.in" | ||
| 43 | OVERLAYFS_ETC_EXPOSE_LOWER ??= "0" | ||
| 44 | OVERLAYFS_ETC_CREATE_MOUNT_DIRS ??= "1" | ||
| 45 | |||
| 46 | python create_overlayfs_etc_preinit() { | ||
| 47 | overlayEtcMountPoint = d.getVar("OVERLAYFS_ETC_MOUNT_POINT") | ||
| 48 | overlayEtcFsType = d.getVar("OVERLAYFS_ETC_FSTYPE") | ||
| 49 | overlayEtcDevice = d.getVar("OVERLAYFS_ETC_DEVICE") | ||
| 50 | |||
| 51 | if not overlayEtcMountPoint: | ||
| 52 | bb.fatal("OVERLAYFS_ETC_MOUNT_POINT must be set in your MACHINE configuration") | ||
| 53 | if not overlayEtcDevice: | ||
| 54 | bb.fatal("OVERLAYFS_ETC_DEVICE must be set in your MACHINE configuration") | ||
| 55 | if not overlayEtcFsType: | ||
| 56 | bb.fatal("OVERLAYFS_ETC_FSTYPE should contain a valid file system type on {0}".format(overlayEtcDevice)) | ||
| 57 | |||
| 58 | with open(d.getVar("OVERLAYFS_ETC_INIT_TEMPLATE"), "r") as f: | ||
| 59 | PreinitTemplate = f.read() | ||
| 60 | |||
| 61 | useOrigInit = oe.types.boolean(d.getVar('OVERLAYFS_ETC_USE_ORIG_INIT_NAME')) | ||
| 62 | preinitPath = oe.path.join(d.getVar("IMAGE_ROOTFS"), d.getVar("base_sbindir"), "preinit") | ||
| 63 | initBaseName = oe.path.join(d.getVar("base_sbindir"), "init") | ||
| 64 | origInitNameSuffix = ".orig" | ||
| 65 | exposeLower = oe.types.boolean(d.getVar('OVERLAYFS_ETC_EXPOSE_LOWER')) | ||
| 66 | createMoundDirs = oe.types.boolean(d.getVar('OVERLAYFS_ETC_CREATE_MOUNT_DIRS')) | ||
| 67 | |||
| 68 | args = { | ||
| 69 | 'OVERLAYFS_ETC_MOUNT_POINT': overlayEtcMountPoint, | ||
| 70 | 'OVERLAYFS_ETC_MOUNT_OPTIONS': d.getVar('OVERLAYFS_ETC_MOUNT_OPTIONS'), | ||
| 71 | 'OVERLAYFS_ETC_FSTYPE': overlayEtcFsType, | ||
| 72 | 'OVERLAYFS_ETC_DEVICE': overlayEtcDevice, | ||
| 73 | 'SBIN_INIT_NAME': initBaseName + origInitNameSuffix if useOrigInit else initBaseName, | ||
| 74 | 'OVERLAYFS_ETC_EXPOSE_LOWER': "true" if exposeLower else "false", | ||
| 75 | 'CREATE_MOUNT_DIRS': "true" if createMoundDirs else "false" | ||
| 76 | } | ||
| 77 | |||
| 78 | if useOrigInit: | ||
| 79 | # rename original /sbin/init | ||
| 80 | origInit = oe.path.join(d.getVar("IMAGE_ROOTFS"), initBaseName) | ||
| 81 | bb.debug(1, "rootfs path %s, init path %s, test %s" % (d.getVar('IMAGE_ROOTFS'), origInit, d.getVar("IMAGE_ROOTFS"))) | ||
| 82 | bb.utils.rename(origInit, origInit + origInitNameSuffix) | ||
| 83 | preinitPath = origInit | ||
| 84 | |||
| 85 | with open(preinitPath, 'w') as f: | ||
| 86 | f.write(PreinitTemplate.format(**args)) | ||
| 87 | os.chmod(preinitPath, 0o755) | ||
| 88 | } | ||
diff --git a/meta/classes-recipe/overlayfs.bbclass b/meta/classes-recipe/overlayfs.bbclass deleted file mode 100644 index a82763ec10..0000000000 --- a/meta/classes-recipe/overlayfs.bbclass +++ /dev/null | |||
| @@ -1,142 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Class for generation of overlayfs mount units | ||
| 8 | # | ||
| 9 | # It's often desired in Embedded System design to have a read-only rootfs. | ||
| 10 | # But a lot of different applications might want to have a read-write access to | ||
| 11 | # some parts of a filesystem. It can be especially useful when your update mechanism | ||
| 12 | # overwrites the whole rootfs, but you want your application data to be preserved | ||
| 13 | # between updates. This class provides a way to achieve that by means | ||
| 14 | # of overlayfs and at the same time keeping the base rootfs read-only. | ||
| 15 | # | ||
| 16 | # Usage example. | ||
| 17 | # | ||
| 18 | # Set a mount point for a partition overlayfs is going to use as upper layer | ||
| 19 | # in your machine configuration. Underlying file system can be anything that | ||
| 20 | # is supported by overlayfs. This has to be done in your machine configuration. | ||
| 21 | # QA check fails to catch file existence if you redefine this variable in your recipe! | ||
| 22 | # | ||
| 23 | # OVERLAYFS_MOUNT_POINT[data] ?= "/data" | ||
| 24 | # | ||
| 25 | # Per default the class assumes you have a corresponding fstab entry or systemd | ||
| 26 | # mount unit (data.mount in this case) for this mount point installed on the | ||
| 27 | # image, for instance via a wks script or the systemd-machine-units recipe. | ||
| 28 | # | ||
| 29 | # If the mount point is handled somewhere else, e.g. custom boot or preinit | ||
| 30 | # scripts or in a initramfs, then this QA check can be skipped by adding | ||
| 31 | # mount-configured to the related OVERLAYFS_QA_SKIP flag: | ||
| 32 | # | ||
| 33 | # OVERLAYFS_QA_SKIP[data] = "mount-configured" | ||
| 34 | # | ||
| 35 | # To use the overlayfs, you just have to specify writable directories inside | ||
| 36 | # their recipe: | ||
| 37 | # | ||
| 38 | # OVERLAYFS_WRITABLE_PATHS[data] = "/usr/share/my-custom-application" | ||
| 39 | # | ||
| 40 | # To support several mount points you can use a different variable flag. Assume we | ||
| 41 | # want to have a writable location on the file system, but not interested where the data | ||
| 42 | # survive a reboot. Then we could have a mnt-overlay.mount unit for a tmpfs file system: | ||
| 43 | # | ||
| 44 | # OVERLAYFS_MOUNT_POINT[mnt-overlay] = "/mnt/overlay" | ||
| 45 | # OVERLAYFS_WRITABLE_PATHS[mnt-overlay] = "/usr/share/another-application" | ||
| 46 | # | ||
| 47 | # If your recipe deploys a systemd service, then it should require and be | ||
| 48 | # started after the ${PN}-overlays.service to make sure that all overlays are | ||
| 49 | # mounted beforehand. | ||
| 50 | # | ||
| 51 | # Note: the class does not support /etc directory itself, because systemd depends on it | ||
| 52 | # For /etc directory use overlayfs-etc class | ||
| 53 | |||
| 54 | REQUIRED_DISTRO_FEATURES += "systemd overlayfs" | ||
| 55 | |||
| 56 | inherit systemd features_check | ||
| 57 | |||
| 58 | OVERLAYFS_CREATE_DIRS_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-create-dirs.service.in" | ||
| 59 | OVERLAYFS_MOUNT_UNIT_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-unit.mount.in" | ||
| 60 | OVERLAYFS_ALL_OVERLAYS_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-all-overlays.service.in" | ||
| 61 | |||
| 62 | python do_create_overlayfs_units() { | ||
| 63 | from oe.overlayfs import mountUnitName | ||
| 64 | |||
| 65 | with open(d.getVar("OVERLAYFS_CREATE_DIRS_TEMPLATE"), "r") as f: | ||
| 66 | CreateDirsUnitTemplate = f.read() | ||
| 67 | with open(d.getVar("OVERLAYFS_MOUNT_UNIT_TEMPLATE"), "r") as f: | ||
| 68 | MountUnitTemplate = f.read() | ||
| 69 | with open(d.getVar("OVERLAYFS_ALL_OVERLAYS_TEMPLATE"), "r") as f: | ||
| 70 | AllOverlaysTemplate = f.read() | ||
| 71 | |||
| 72 | def prepareUnits(data, lower): | ||
| 73 | from oe.overlayfs import helperUnitName | ||
| 74 | |||
| 75 | args = { | ||
| 76 | 'DATA_MOUNT_POINT': data, | ||
| 77 | 'DATA_MOUNT_UNIT': mountUnitName(data), | ||
| 78 | 'CREATE_DIRS_SERVICE': helperUnitName(lower), | ||
| 79 | 'LOWERDIR': lower, | ||
| 80 | } | ||
| 81 | |||
| 82 | bb.debug(1, "Generate systemd unit %s" % mountUnitName(lower)) | ||
| 83 | with open(os.path.join(d.getVar('WORKDIR'), mountUnitName(lower)), 'w') as f: | ||
| 84 | f.write(MountUnitTemplate.format(**args)) | ||
| 85 | |||
| 86 | bb.debug(1, "Generate helper systemd unit %s" % helperUnitName(lower)) | ||
| 87 | with open(os.path.join(d.getVar('WORKDIR'), helperUnitName(lower)), 'w') as f: | ||
| 88 | f.write(CreateDirsUnitTemplate.format(**args)) | ||
| 89 | |||
| 90 | def prepareGlobalUnit(dependentUnits): | ||
| 91 | from oe.overlayfs import allOverlaysUnitName | ||
| 92 | args = { | ||
| 93 | 'ALL_OVERLAYFS_UNITS': " ".join(dependentUnits), | ||
| 94 | 'PN': d.getVar('PN') | ||
| 95 | } | ||
| 96 | |||
| 97 | bb.debug(1, "Generate systemd unit with all overlays %s" % allOverlaysUnitName(d)) | ||
| 98 | with open(os.path.join(d.getVar('WORKDIR'), allOverlaysUnitName(d)), 'w') as f: | ||
| 99 | f.write(AllOverlaysTemplate.format(**args)) | ||
| 100 | |||
| 101 | mountUnitList = [] | ||
| 102 | overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT") | ||
| 103 | for mountPoint in overlayMountPoints: | ||
| 104 | bb.debug(1, "Process variable flag %s" % mountPoint) | ||
| 105 | lowerList = d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint) | ||
| 106 | if not lowerList: | ||
| 107 | bb.note("No mount points defined for %s flag, skipping" % (mountPoint)) | ||
| 108 | continue | ||
| 109 | for lower in lowerList.split(): | ||
| 110 | bb.debug(1, "Prepare mount unit for %s with data mount point %s" % | ||
| 111 | (lower, d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint))) | ||
| 112 | prepareUnits(d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint), lower) | ||
| 113 | mountUnitList.append(mountUnitName(lower)) | ||
| 114 | |||
| 115 | # set up one unit, which depends on all mount units, so users can set | ||
| 116 | # only one dependency in their units to make sure software starts | ||
| 117 | # when all overlays are mounted | ||
| 118 | prepareGlobalUnit(mountUnitList) | ||
| 119 | } | ||
| 120 | |||
| 121 | # we need to generate file names early during parsing stage | ||
| 122 | python () { | ||
| 123 | from oe.overlayfs import strForBash, unitFileList | ||
| 124 | |||
| 125 | unitList = unitFileList(d) | ||
| 126 | for unit in unitList: | ||
| 127 | d.appendVar('SYSTEMD_SERVICE:' + d.getVar('PN'), ' ' + unit) | ||
| 128 | d.appendVar('FILES:' + d.getVar('PN'), ' ' + | ||
| 129 | d.getVar('systemd_system_unitdir') + '/' + strForBash(unit)) | ||
| 130 | |||
| 131 | d.setVar('OVERLAYFS_UNIT_LIST', ' '.join([strForBash(s) for s in unitList])) | ||
| 132 | } | ||
| 133 | |||
| 134 | do_install:append() { | ||
| 135 | install -d ${D}${systemd_system_unitdir} | ||
| 136 | for unit in ${OVERLAYFS_UNIT_LIST}; do | ||
| 137 | install -m 0444 ${WORKDIR}/${unit} ${D}${systemd_system_unitdir} | ||
| 138 | done | ||
| 139 | } | ||
| 140 | |||
| 141 | do_create_overlayfs_units[vardeps] += "OVERLAYFS_WRITABLE_PATHS" | ||
| 142 | addtask create_overlayfs_units before do_install | ||
diff --git a/meta/classes-recipe/packagegroup.bbclass b/meta/classes-recipe/packagegroup.bbclass deleted file mode 100644 index cf6fc354a8..0000000000 --- a/meta/classes-recipe/packagegroup.bbclass +++ /dev/null | |||
| @@ -1,70 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Class for packagegroup (package group) recipes | ||
| 8 | |||
| 9 | # By default, only the packagegroup package itself is in PACKAGES. | ||
| 10 | # -dbg and -dev flavours are handled by the anonfunc below. | ||
| 11 | # This means that packagegroup recipes used to build multiple packagegroup | ||
| 12 | # packages have to modify PACKAGES after inheriting packagegroup.bbclass. | ||
| 13 | PACKAGES = "${PN}" | ||
| 14 | |||
| 15 | # By default, packagegroup packages do not depend on a certain architecture. | ||
| 16 | # Only if dependencies are modified by MACHINE_FEATURES, packages | ||
| 17 | # need to be set to MACHINE_ARCH before inheriting packagegroup.bbclass | ||
| 18 | PACKAGE_ARCH ?= "all" | ||
| 19 | |||
| 20 | # Fully expanded - so it applies the overrides as well | ||
| 21 | PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}" | ||
| 22 | |||
| 23 | LICENSE ?= "MIT" | ||
| 24 | |||
| 25 | inherit_defer ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED') == 'all', 'allarch', '')} | ||
| 26 | |||
| 27 | # This automatically adds -dbg and -dev flavours of all PACKAGES | ||
| 28 | # to the list. Their dependencies (RRECOMMENDS) are handled as usual | ||
| 29 | # by package_depchains in a following step. | ||
| 30 | # Also mark all packages as ALLOW_EMPTY | ||
| 31 | python () { | ||
| 32 | packages = d.getVar('PACKAGES').split() | ||
| 33 | if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY') != '1': | ||
| 34 | types = ['', '-dbg', '-dev'] | ||
| 35 | if bb.utils.contains('DISTRO_FEATURES', 'ptest', True, False, d): | ||
| 36 | types.append('-ptest') | ||
| 37 | packages = [pkg + suffix for pkg in packages | ||
| 38 | for suffix in types] | ||
| 39 | d.setVar('PACKAGES', ' '.join(packages)) | ||
| 40 | for pkg in packages: | ||
| 41 | d.setVar('ALLOW_EMPTY:%s' % pkg, '1') | ||
| 42 | } | ||
| 43 | |||
| 44 | # We don't want to look at shared library dependencies for the | ||
| 45 | # dbg packages | ||
| 46 | DEPCHAIN_DBGDEFAULTDEPS = "1" | ||
| 47 | |||
| 48 | # We only need the packaging tasks - disable the rest | ||
| 49 | deltask do_fetch | ||
| 50 | deltask do_unpack | ||
| 51 | deltask do_patch | ||
| 52 | deltask do_configure | ||
| 53 | deltask do_compile | ||
| 54 | deltask do_install | ||
| 55 | deltask do_populate_sysroot | ||
| 56 | |||
| 57 | do_create_runtime_spdx[deptask] = "do_create_spdx" | ||
| 58 | do_create_runtime_spdx[rdeptask] = "" | ||
| 59 | |||
| 60 | INHIBIT_DEFAULT_DEPS = "1" | ||
| 61 | |||
| 62 | python () { | ||
| 63 | if bb.data.inherits_class('nativesdk', d): | ||
| 64 | return | ||
| 65 | initman = d.getVar("VIRTUAL-RUNTIME_init_manager") | ||
| 66 | if initman and initman in ['sysvinit', 'systemd'] and not bb.utils.contains('DISTRO_FEATURES', initman, True, False, d): | ||
| 67 | bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman) | ||
| 68 | } | ||
| 69 | |||
| 70 | CVE_PRODUCT = "" | ||
diff --git a/meta/classes-recipe/perl-version.bbclass b/meta/classes-recipe/perl-version.bbclass deleted file mode 100644 index 74e33175d9..0000000000 --- a/meta/classes-recipe/perl-version.bbclass +++ /dev/null | |||
| @@ -1,66 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | PERL_OWN_DIR = "" | ||
| 8 | |||
| 9 | # Determine the staged version of perl from the perl configuration file | ||
| 10 | # Assign vardepvalue, because otherwise signature is changed before and after | ||
| 11 | # perl is built (from None to real version in config.sh). | ||
| 12 | get_perl_version[vardepvalue] = "${PERL_OWN_DIR}" | ||
| 13 | def get_perl_version(d): | ||
| 14 | import re | ||
| 15 | cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh') | ||
| 16 | try: | ||
| 17 | f = open(cfg, 'r') | ||
| 18 | except IOError: | ||
| 19 | return None | ||
| 20 | l = f.readlines(); | ||
| 21 | f.close(); | ||
| 22 | r = re.compile(r"^version='(\d*\.\d*\.\d*)'") | ||
| 23 | for s in l: | ||
| 24 | m = r.match(s) | ||
| 25 | if m: | ||
| 26 | return m.group(1) | ||
| 27 | return None | ||
| 28 | |||
| 29 | |||
| 30 | # Determine the staged arch of perl from the perl configuration file | ||
| 31 | # Assign vardepvalue, because otherwise signature is changed before and after | ||
| 32 | # perl is built (from None to real version in config.sh). | ||
| 33 | def get_perl_arch(d): | ||
| 34 | import re | ||
| 35 | cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh') | ||
| 36 | try: | ||
| 37 | f = open(cfg, 'r') | ||
| 38 | except IOError: | ||
| 39 | return None | ||
| 40 | l = f.readlines(); | ||
| 41 | f.close(); | ||
| 42 | r = re.compile("^archname='([^']*)'") | ||
| 43 | for s in l: | ||
| 44 | m = r.match(s) | ||
| 45 | if m: | ||
| 46 | return m.group(1) | ||
| 47 | return None | ||
| 48 | |||
| 49 | # Determine the staged arch of perl-native from the perl configuration file | ||
| 50 | # Assign vardepvalue, because otherwise signature is changed before and after | ||
| 51 | # perl is built (from None to real version in config.sh). | ||
| 52 | def get_perl_hostarch(d): | ||
| 53 | import re | ||
| 54 | cfg = d.expand('${STAGING_LIBDIR_NATIVE}/perl5/config.sh') | ||
| 55 | try: | ||
| 56 | f = open(cfg, 'r') | ||
| 57 | except IOError: | ||
| 58 | return None | ||
| 59 | l = f.readlines(); | ||
| 60 | f.close(); | ||
| 61 | r = re.compile("^archname='([^']*)'") | ||
| 62 | for s in l: | ||
| 63 | m = r.match(s) | ||
| 64 | if m: | ||
| 65 | return m.group(1) | ||
| 66 | return None | ||
diff --git a/meta/classes-recipe/perlnative.bbclass b/meta/classes-recipe/perlnative.bbclass deleted file mode 100644 index d56ec4ae72..0000000000 --- a/meta/classes-recipe/perlnative.bbclass +++ /dev/null | |||
| @@ -1,9 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | EXTRANATIVEPATH += "perl-native" | ||
| 8 | DEPENDS += "perl-native" | ||
| 9 | OECMAKE_PERLNATIVE_DIR = "${STAGING_BINDIR_NATIVE}/perl-native" | ||
diff --git a/meta/classes-recipe/pixbufcache.bbclass b/meta/classes-recipe/pixbufcache.bbclass deleted file mode 100644 index c32673df55..0000000000 --- a/meta/classes-recipe/pixbufcache.bbclass +++ /dev/null | |||
| @@ -1,66 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # This class will generate the proper postinst/postrm scriptlets for pixbuf | ||
| 9 | # packages. | ||
| 10 | # | ||
| 11 | |||
| 12 | PIXBUF_PACKAGES ??= "${PN}" | ||
| 13 | |||
| 14 | PACKAGE_WRITE_DEPS += "qemuwrapper-cross gdk-pixbuf-native" | ||
| 15 | |||
| 16 | pixbufcache_common() { | ||
| 17 | if [ "x$D" != "x" ]; then | ||
| 18 | $INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} libdir=${libdir} \ | ||
| 19 | bindir=${bindir} base_libdir=${base_libdir} | ||
| 20 | else | ||
| 21 | |||
| 22 | # Update the pixbuf loaders in case they haven't been registered yet | ||
| 23 | ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache | ||
| 24 | |||
| 25 | if [ -x ${bindir}/gtk-update-icon-cache ] && [ -d ${datadir}/icons ]; then | ||
| 26 | for icondir in /usr/share/icons/*; do | ||
| 27 | if [ -d ${icondir} ]; then | ||
| 28 | gtk-update-icon-cache -t -q ${icondir} | ||
| 29 | fi | ||
| 30 | done | ||
| 31 | fi | ||
| 32 | fi | ||
| 33 | } | ||
| 34 | |||
| 35 | python populate_packages:append() { | ||
| 36 | pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES').split() | ||
| 37 | |||
| 38 | for pkg in pixbuf_pkgs: | ||
| 39 | bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg) | ||
| 40 | postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst') | ||
| 41 | if not postinst: | ||
| 42 | postinst = '#!/bin/sh\n' | ||
| 43 | postinst += d.getVar('pixbufcache_common') | ||
| 44 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 45 | |||
| 46 | postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm') | ||
| 47 | if not postrm: | ||
| 48 | postrm = '#!/bin/sh\n' | ||
| 49 | postrm += d.getVar('pixbufcache_common') | ||
| 50 | d.setVar('pkg_postrm:%s' % pkg, postrm) | ||
| 51 | } | ||
| 52 | |||
| 53 | gdkpixbuf_complete() { | ||
| 54 | GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1 | ||
| 55 | } | ||
| 56 | |||
| 57 | DEPENDS:append:class-native = " gdk-pixbuf-native" | ||
| 58 | SYSROOT_PREPROCESS_FUNCS:append:class-native = " pixbufcache_sstate_postinst" | ||
| 59 | |||
| 60 | pixbufcache_sstate_postinst() { | ||
| 61 | mkdir -p ${SYSROOT_DESTDIR}${bindir} | ||
| 62 | dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN} | ||
| 63 | echo '#!/bin/sh' > $dest | ||
| 64 | echo "${gdkpixbuf_complete}" >> $dest | ||
| 65 | chmod 0755 $dest | ||
| 66 | } | ||
diff --git a/meta/classes-recipe/pkgconfig.bbclass b/meta/classes-recipe/pkgconfig.bbclass deleted file mode 100644 index 1e1f3824dd..0000000000 --- a/meta/classes-recipe/pkgconfig.bbclass +++ /dev/null | |||
| @@ -1,8 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | DEPENDS:prepend = "pkgconfig-native " | ||
| 8 | |||
diff --git a/meta/classes-recipe/populate_sdk.bbclass b/meta/classes-recipe/populate_sdk.bbclass deleted file mode 100644 index caeef5d2b2..0000000000 --- a/meta/classes-recipe/populate_sdk.bbclass +++ /dev/null | |||
| @@ -1,13 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # The majority of populate_sdk is located in populate_sdk_base | ||
| 8 | # This chunk simply facilitates compatibility with SDK only recipes. | ||
| 9 | |||
| 10 | inherit populate_sdk_base | ||
| 11 | |||
| 12 | addtask populate_sdk after do_install before do_build | ||
| 13 | |||
diff --git a/meta/classes-recipe/populate_sdk_base.bbclass b/meta/classes-recipe/populate_sdk_base.bbclass deleted file mode 100644 index 8e671cf28f..0000000000 --- a/meta/classes-recipe/populate_sdk_base.bbclass +++ /dev/null | |||
| @@ -1,437 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | SDK_CLASSES += "${@bb.utils.contains("IMAGE_CLASSES", "testimage", "testsdk", "", d)}" | ||
| 8 | inherit_defer ${SDK_CLASSES} | ||
| 9 | |||
| 10 | PACKAGES = "" | ||
| 11 | |||
| 12 | # This exists as an optimization for SPDX processing to only run in image and | ||
| 13 | # SDK processing context. This class happens to be common to these usages. | ||
| 14 | SPDX_MULTILIB_SSTATE_ARCHS = "${@all_multilib_tune_values(d, 'SSTATE_ARCHS')}" | ||
| 15 | |||
| 16 | inherit image-postinst-intercepts image-artifact-names nopackages | ||
| 17 | |||
| 18 | # Wildcards specifying complementary packages to install for every package that has been explicitly | ||
| 19 | # installed into the rootfs | ||
| 20 | COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev' | ||
| 21 | COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev' | ||
| 22 | COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc' | ||
| 23 | COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg' | ||
| 24 | COMPLEMENTARY_GLOB[src-pkgs] = '*-src' | ||
| 25 | COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest ${MLPREFIX}ptest-runner' | ||
| 26 | COMPLEMENTARY_GLOB[bash-completion-pkgs] = '*-bash-completion' | ||
| 27 | COMPLEMENTARY_GLOB[zsh-completion-pkgs] = '*-zsh-completion' | ||
| 28 | |||
| 29 | def complementary_globs(featurevar, d): | ||
| 30 | all_globs = d.getVarFlags('COMPLEMENTARY_GLOB') | ||
| 31 | globs = [] | ||
| 32 | features = set((d.getVar(featurevar) or '').split()) | ||
| 33 | for name, glob in all_globs.items(): | ||
| 34 | if name in features: | ||
| 35 | globs.append(glob) | ||
| 36 | return ' '.join(globs) | ||
| 37 | |||
| 38 | SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs src-pkgs ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'doc-pkgs', '', d)}" | ||
| 39 | SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}' | ||
| 40 | SDKIMAGE_INSTALL_COMPLEMENTARY[vardeps] += "SDKIMAGE_FEATURES" | ||
| 41 | |||
| 42 | PACKAGE_ARCHS:append:task-populate-sdk = " sdk-provides-dummy-target" | ||
| 43 | SDK_PACKAGE_ARCHS += "sdk-provides-dummy-${SDKPKGSUFFIX}" | ||
| 44 | |||
| 45 | # List of locales to install, or "all" for all of them, or unset for none. | ||
| 46 | SDKIMAGE_LINGUAS ?= "all" | ||
| 47 | |||
| 48 | inherit_defer rootfs_${IMAGE_PKGTYPE} | ||
| 49 | |||
| 50 | SDK_DIR = "${WORKDIR}/sdk" | ||
| 51 | SDK_OUTPUT = "${SDK_DIR}/image" | ||
| 52 | SDK_DEPLOY = "${DEPLOY_DIR}/sdk" | ||
| 53 | |||
| 54 | SDKDEPLOYDIR = "${WORKDIR}/${SDKMACHINE}-deploy-${PN}-populate-sdk" | ||
| 55 | |||
| 56 | PSEUDO_INCLUDE_PATHS .= ",${SDK_DIR}" | ||
| 57 | |||
| 58 | B:task-populate-sdk = "${SDK_DIR}" | ||
| 59 | |||
| 60 | SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}" | ||
| 61 | |||
| 62 | SDK_TOOLCHAIN_LANGS ??= "" | ||
| 63 | SDK_TOOLCHAIN_LANGS:remove:sdkmingw32 = "rust" | ||
| 64 | # libstd-rs doesn't build for mips n32 with compiler constraint errors | ||
| 65 | SDK_TOOLCHAIN_LANGS:remove:mipsarchn32 = "rust" | ||
| 66 | # go will not build for x86-x32 or mingw | ||
| 67 | SDK_TOOLCHAIN_LANGS:remove:linux-gnux32 = "go" | ||
| 68 | SDK_TOOLCHAIN_LANGS:remove:riscv32 = "go" | ||
| 69 | SDK_TOOLCHAIN_LANGS:remove:sdkmingw32 = "go" | ||
| 70 | SDK_TOOLCHAIN_LANGS:remove:powerpc = "go" | ||
| 71 | |||
| 72 | TOOLCHAIN_HOST_TASK ?= " \ | ||
| 73 | nativesdk-packagegroup-sdk-host \ | ||
| 74 | packagegroup-cross-canadian-${MACHINE} \ | ||
| 75 | ${@bb.utils.contains('SDK_TOOLCHAIN_LANGS', 'go', 'packagegroup-go-cross-canadian-${MACHINE}', '', d)} \ | ||
| 76 | ${@bb.utils.contains('SDK_TOOLCHAIN_LANGS', 'rust', 'packagegroup-rust-cross-canadian-${MACHINE}', '', d)} \ | ||
| 77 | " | ||
| 78 | TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= "" | ||
| 79 | TOOLCHAIN_TARGET_TASK ?= " \ | ||
| 80 | ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} \ | ||
| 81 | ${@bb.utils.contains('SDK_TOOLCHAIN_LANGS', 'go', multilib_pkg_extend(d, 'packagegroup-go-sdk-target'), '', d)} \ | ||
| 82 | ${@bb.utils.contains('SDK_TOOLCHAIN_LANGS', 'rust', multilib_pkg_extend(d, 'libstd-rs'), '', d)} \ | ||
| 83 | target-sdk-provides-dummy \ | ||
| 84 | " | ||
| 85 | TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= "" | ||
| 86 | TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}" | ||
| 87 | |||
| 88 | # Default archived SDK's suffix | ||
| 89 | SDK_ARCHIVE_TYPE ?= "tar.xz" | ||
| 90 | SDK_XZ_COMPRESSION_LEVEL ?= "-9" | ||
| 91 | SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}" | ||
| 92 | SDK_ZIP_OPTIONS ?= "-y" | ||
| 93 | SDK_7ZIP_OPTIONS ?= "-mx=9 -mm=BZip2" | ||
| 94 | SDK_7ZIP_TYPE ?= "7z" | ||
| 95 | SDK_ZSTD_COMPRESSION_LEVEL = "-17" | ||
| 96 | |||
| 97 | # To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz | ||
| 98 | python () { | ||
| 99 | if d.getVar('SDK_ARCHIVE_TYPE') == 'zip': | ||
| 100 | d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native') | ||
| 101 | # SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR} | ||
| 102 | # recommand to cd into input dir first to avoid archive with buildpath | ||
| 103 | d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r ${SDK_ZIP_OPTIONS} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .') | ||
| 104 | elif d.getVar('SDK_ARCHIVE_TYPE') == '7zip': | ||
| 105 | d.setVar('SDK_ARCHIVE_DEPENDS', '7zip-native') | ||
| 106 | d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; 7za a -r ${SDK_7ZIP_OPTIONS} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_7ZIP_TYPE} .') | ||
| 107 | elif d.getVar('SDK_ARCHIVE_TYPE') == 'tar.zst': | ||
| 108 | d.setVar('SDK_ARCHIVE_DEPENDS', 'zstd-native') | ||
| 109 | d.setVar('SDK_ARCHIVE_CMD', | ||
| 110 | 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | zstd -f -k -T0 -c ${SDK_ZSTD_COMPRESSION_LEVEL} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}') | ||
| 111 | elif d.getVar('SDK_ARCHIVE_TYPE') == 'tar.xz': | ||
| 112 | d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native') | ||
| 113 | d.setVar('SDK_ARCHIVE_CMD', | ||
| 114 | 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}') | ||
| 115 | else: | ||
| 116 | bb.fatal("Invalid SDK_ARCHIVE_TYPE: %s, the supported SDK archive types are: zip, 7z, tar.xz, tar.zst" % d.getVar('SDK_ARCHIVE_TYPE')) | ||
| 117 | } | ||
| 118 | |||
| 119 | SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}" | ||
| 120 | SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native" | ||
| 121 | PATH:prepend = "${WORKDIR}/recipe-sysroot/${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:" | ||
| 122 | SDK_DEPENDS += "nativesdk-glibc-locale" | ||
| 123 | |||
| 124 | # We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it | ||
| 125 | # could be set to the MACHINE_ARCH | ||
| 126 | REAL_MULTIMACH_TARGET_SYS = "${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}" | ||
| 127 | |||
| 128 | PID = "${@os.getpid()}" | ||
| 129 | |||
| 130 | EXCLUDE_FROM_WORLD = "1" | ||
| 131 | |||
| 132 | SDK_PACKAGING_FUNC ?= "create_shar" | ||
| 133 | SDK_PRE_INSTALL_COMMAND ?= "" | ||
| 134 | SDK_POST_INSTALL_COMMAND ?= "" | ||
| 135 | SDK_RELOCATE_AFTER_INSTALL ?= "1" | ||
| 136 | |||
| 137 | SDKEXTPATH ??= "~/${@d.getVar('DISTRO')}_sdk" | ||
| 138 | SDK_TITLE ??= "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} SDK" | ||
| 139 | |||
| 140 | SDK_TARGET_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.target.manifest" | ||
| 141 | SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest" | ||
| 142 | SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest" | ||
| 143 | SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest" | ||
| 144 | |||
| 145 | SDK_PRUNE_SYSROOT_DIRS ?= "/dev" | ||
| 146 | |||
| 147 | python write_target_sdk_manifest () { | ||
| 148 | from oe.sdk import sdk_list_installed_packages | ||
| 149 | from oe.utils import format_pkg_list | ||
| 150 | sdkmanifestdir = os.path.dirname(d.getVar("SDK_TARGET_MANIFEST")) | ||
| 151 | pkgs = sdk_list_installed_packages(d, True) | ||
| 152 | if not os.path.exists(sdkmanifestdir): | ||
| 153 | bb.utils.mkdirhier(sdkmanifestdir) | ||
| 154 | with open(d.getVar('SDK_TARGET_MANIFEST'), 'w') as output: | ||
| 155 | output.write(format_pkg_list(pkgs, 'ver')) | ||
| 156 | } | ||
| 157 | |||
| 158 | sdk_prune_dirs () { | ||
| 159 | for d in ${SDK_PRUNE_SYSROOT_DIRS}; do | ||
| 160 | rm -rf ${SDK_OUTPUT}${SDKTARGETSYSROOT}$d | ||
| 161 | done | ||
| 162 | } | ||
| 163 | |||
| 164 | python write_sdk_test_data() { | ||
| 165 | from oe.data import export2json | ||
| 166 | testdata = "%s/%s.testdata.json" % (d.getVar('SDKDEPLOYDIR'), d.getVar('TOOLCHAIN_OUTPUTNAME')) | ||
| 167 | bb.utils.mkdirhier(os.path.dirname(testdata)) | ||
| 168 | export2json(d, testdata) | ||
| 169 | } | ||
| 170 | |||
| 171 | python write_host_sdk_manifest () { | ||
| 172 | from oe.sdk import sdk_list_installed_packages | ||
| 173 | from oe.utils import format_pkg_list | ||
| 174 | sdkmanifestdir = os.path.dirname(d.getVar("SDK_HOST_MANIFEST")) | ||
| 175 | pkgs = sdk_list_installed_packages(d, False) | ||
| 176 | if not os.path.exists(sdkmanifestdir): | ||
| 177 | bb.utils.mkdirhier(sdkmanifestdir) | ||
| 178 | with open(d.getVar('SDK_HOST_MANIFEST'), 'w') as output: | ||
| 179 | output.write(format_pkg_list(pkgs, 'ver')) | ||
| 180 | } | ||
| 181 | |||
| 182 | POPULATE_SDK_POST_TARGET_COMMAND:append = " write_sdk_test_data" | ||
| 183 | POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " write_target_sdk_manifest sdk_prune_dirs" | ||
| 184 | POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " write_host_sdk_manifest" | ||
| 185 | |||
| 186 | # Prepare the root links to point to the /usr counterparts. | ||
| 187 | create_merged_usr_symlinks() { | ||
| 188 | root="$1" | ||
| 189 | install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir} | ||
| 190 | ln -rs $root${base_bindir} $root/bin | ||
| 191 | ln -rs $root${base_sbindir} $root/sbin | ||
| 192 | ln -rs $root${base_libdir} $root/${baselib} | ||
| 193 | |||
| 194 | if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then | ||
| 195 | install -d $root${nonarch_base_libdir} | ||
| 196 | ln -rs $root${nonarch_base_libdir} $root/lib | ||
| 197 | fi | ||
| 198 | |||
| 199 | # create base links for multilibs | ||
| 200 | multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}" | ||
| 201 | for d in $multi_libdirs; do | ||
| 202 | install -d $root${exec_prefix}/$d | ||
| 203 | ln -rs $root${exec_prefix}/$d $root/$d | ||
| 204 | done | ||
| 205 | } | ||
| 206 | |||
| 207 | create_merged_usr_symlinks_sdk() { | ||
| 208 | create_merged_usr_symlinks ${SDK_OUTPUT}${SDKTARGETSYSROOT} | ||
| 209 | } | ||
| 210 | |||
| 211 | POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk', '',d)}" | ||
| 212 | |||
| 213 | SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC}' if '${SDK_PACKAGING_FUNC}' else ''}" | ||
| 214 | SDK_POSTPROCESS_COMMAND = "create_sdk_files check_sdk_sysroots archive_sdk ${SDK_PACKAGING_COMMAND}" | ||
| 215 | |||
| 216 | def populate_sdk_common(d): | ||
| 217 | from oe.sdk import populate_sdk | ||
| 218 | from oe.manifest import create_manifest, Manifest | ||
| 219 | import oe.packagedata | ||
| 220 | |||
| 221 | # Handle package exclusions | ||
| 222 | excl_pkgs = (d.getVar("PACKAGE_EXCLUDE") or "").split() | ||
| 223 | inst_pkgs = (d.getVar("PACKAGE_INSTALL") or "").split() | ||
| 224 | inst_attempt_pkgs = (d.getVar("PACKAGE_INSTALL_ATTEMPTONLY") or "").split() | ||
| 225 | |||
| 226 | d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs)) | ||
| 227 | d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs)) | ||
| 228 | |||
| 229 | for pkg in excl_pkgs: | ||
| 230 | if pkg in inst_pkgs: | ||
| 231 | bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs)) | ||
| 232 | inst_pkgs.remove(pkg) | ||
| 233 | |||
| 234 | if pkg in inst_attempt_pkgs: | ||
| 235 | bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs)) | ||
| 236 | inst_attempt_pkgs.remove(pkg) | ||
| 237 | |||
| 238 | d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs)) | ||
| 239 | d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs)) | ||
| 240 | |||
| 241 | pn = d.getVar('PN') | ||
| 242 | oe.packagedata.runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d) | ||
| 243 | oe.packagedata.runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d) | ||
| 244 | |||
| 245 | ld = bb.data.createCopy(d) | ||
| 246 | ld.setVar("PKGDATA_DIR", "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}/pkgdata") | ||
| 247 | oe.packagedata.runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld) | ||
| 248 | oe.packagedata.runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld) | ||
| 249 | d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK")) | ||
| 250 | d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY")) | ||
| 251 | |||
| 252 | # create target/host SDK manifests | ||
| 253 | create_manifest(d, manifest_dir=d.getVar('SDK_DIR'), | ||
| 254 | manifest_type=Manifest.MANIFEST_TYPE_SDK_HOST) | ||
| 255 | create_manifest(d, manifest_dir=d.getVar('SDK_DIR'), | ||
| 256 | manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET) | ||
| 257 | |||
| 258 | populate_sdk(d) | ||
| 259 | |||
| 260 | fakeroot python do_populate_sdk() { | ||
| 261 | populate_sdk_common(d) | ||
| 262 | } | ||
| 263 | SSTATETASKS += "do_populate_sdk" | ||
| 264 | SSTATE_SKIP_CREATION:task-populate-sdk = '1' | ||
| 265 | do_populate_sdk[cleandirs] += "${SDKDEPLOYDIR}" | ||
| 266 | do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}" | ||
| 267 | do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}" | ||
| 268 | do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}" | ||
| 269 | python do_populate_sdk_setscene () { | ||
| 270 | sstate_setscene(d) | ||
| 271 | } | ||
| 272 | addtask do_populate_sdk_setscene | ||
| 273 | |||
| 274 | fakeroot create_sdk_files() { | ||
| 275 | cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/ | ||
| 276 | |||
| 277 | # Replace the ##DEFAULT_INSTALL_DIR## with the correct pattern. | ||
| 278 | # Escape special characters like '+' and '.' in the SDKPATH | ||
| 279 | escaped_sdkpath=$(echo ${SDKPATH} |sed -e "s:[\+\.]:\\\\\\\\\0:g") | ||
| 280 | sed -i -e "s:##DEFAULT_INSTALL_DIR##:$escaped_sdkpath:" ${SDK_OUTPUT}/${SDKPATH}/relocate_sdk.py | ||
| 281 | |||
| 282 | mkdir -p ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/ | ||
| 283 | echo '${SDKPATHNATIVE}${libdir_nativesdk} | ||
| 284 | ${SDKPATHNATIVE}${base_libdir_nativesdk} | ||
| 285 | include /etc/ld.so.conf' > ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/ld.so.conf | ||
| 286 | } | ||
| 287 | |||
| 288 | python check_sdk_sysroots() { | ||
| 289 | # Fails build if there are broken or dangling symlinks in SDK sysroots | ||
| 290 | |||
| 291 | if d.getVar('CHECK_SDK_SYSROOTS') != '1': | ||
| 292 | # disabled, bail out | ||
| 293 | return | ||
| 294 | |||
| 295 | def norm_path(path): | ||
| 296 | return os.path.abspath(path) | ||
| 297 | |||
| 298 | # Get scan root | ||
| 299 | SCAN_ROOT = norm_path("%s/%s/sysroots/" % (d.getVar('SDK_OUTPUT'), | ||
| 300 | d.getVar('SDKPATH'))) | ||
| 301 | |||
| 302 | bb.note('Checking SDK sysroots at ' + SCAN_ROOT) | ||
| 303 | |||
| 304 | def check_symlink(linkPath): | ||
| 305 | if not os.path.islink(linkPath): | ||
| 306 | return | ||
| 307 | |||
| 308 | linkDirPath = os.path.dirname(linkPath) | ||
| 309 | |||
| 310 | targetPath = os.readlink(linkPath) | ||
| 311 | if not os.path.isabs(targetPath): | ||
| 312 | targetPath = os.path.join(linkDirPath, targetPath) | ||
| 313 | targetPath = norm_path(targetPath) | ||
| 314 | |||
| 315 | if SCAN_ROOT != os.path.commonprefix( [SCAN_ROOT, targetPath] ): | ||
| 316 | bb.error("Escaping symlink {0!s} --> {1!s}".format(linkPath, targetPath)) | ||
| 317 | return | ||
| 318 | |||
| 319 | if not os.path.exists(targetPath): | ||
| 320 | bb.error("Broken symlink {0!s} --> {1!s}".format(linkPath, targetPath)) | ||
| 321 | return | ||
| 322 | |||
| 323 | if os.path.isdir(targetPath): | ||
| 324 | dir_walk(targetPath) | ||
| 325 | |||
| 326 | def walk_error_handler(e): | ||
| 327 | bb.error(str(e)) | ||
| 328 | |||
| 329 | def dir_walk(rootDir): | ||
| 330 | for dirPath,subDirEntries,fileEntries in os.walk(rootDir, followlinks=False, onerror=walk_error_handler): | ||
| 331 | entries = subDirEntries + fileEntries | ||
| 332 | for e in entries: | ||
| 333 | ePath = os.path.join(dirPath, e) | ||
| 334 | check_symlink(ePath) | ||
| 335 | |||
| 336 | # start | ||
| 337 | dir_walk(SCAN_ROOT) | ||
| 338 | } | ||
| 339 | |||
| 340 | SDKTAROPTS = "--owner=root --group=root --clamp-mtime --mtime=@${SOURCE_DATE_EPOCH}" | ||
| 341 | |||
| 342 | fakeroot archive_sdk() { | ||
| 343 | # Package it up | ||
| 344 | mkdir -p ${SDKDEPLOYDIR} | ||
| 345 | ${SDK_ARCHIVE_CMD} | ||
| 346 | } | ||
| 347 | |||
| 348 | TOOLCHAIN_SHAR_EXT_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-extract.sh" | ||
| 349 | TOOLCHAIN_SHAR_REL_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-relocate.sh" | ||
| 350 | |||
| 351 | fakeroot create_shar() { | ||
| 352 | # copy in the template shar extractor script | ||
| 353 | cp ${TOOLCHAIN_SHAR_EXT_TMPL} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh | ||
| 354 | |||
| 355 | rm -f ${T}/pre_install_command ${T}/post_install_command | ||
| 356 | |||
| 357 | if [ "${SDK_RELOCATE_AFTER_INSTALL}" = "1" ] ; then | ||
| 358 | cp ${TOOLCHAIN_SHAR_REL_TMPL} ${T}/post_install_command | ||
| 359 | fi | ||
| 360 | cat << "EOF" >> ${T}/pre_install_command | ||
| 361 | ${SDK_PRE_INSTALL_COMMAND} | ||
| 362 | EOF | ||
| 363 | |||
| 364 | cat << "EOF" >> ${T}/post_install_command | ||
| 365 | ${SDK_POST_INSTALL_COMMAND} | ||
| 366 | EOF | ||
| 367 | sed -i -e '/@SDK_PRE_INSTALL_COMMAND@/r ${T}/pre_install_command' \ | ||
| 368 | -e '/@SDK_POST_INSTALL_COMMAND@/r ${T}/post_install_command' \ | ||
| 369 | ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh | ||
| 370 | |||
| 371 | # substitute variables | ||
| 372 | sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \ | ||
| 373 | -e 's#@SDKPATH@#${SDKPATH}#g' \ | ||
| 374 | -e 's#@SDKPATHINSTALL@#${SDKPATHINSTALL}#g' \ | ||
| 375 | -e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \ | ||
| 376 | -e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \ | ||
| 377 | -e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \ | ||
| 378 | -e 's#@SDK_TITLE@#${@d.getVar("SDK_TITLE").replace('&', '\\&')}#g' \ | ||
| 379 | -e 's#@SDK_VERSION@#${SDK_VERSION}#g' \ | ||
| 380 | -e '/@SDK_PRE_INSTALL_COMMAND@/d' \ | ||
| 381 | -e '/@SDK_POST_INSTALL_COMMAND@/d' \ | ||
| 382 | -e 's#@SDK_ARCHIVE_TYPE@#${SDK_ARCHIVE_TYPE}#g' \ | ||
| 383 | ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh | ||
| 384 | |||
| 385 | # add execution permission | ||
| 386 | chmod +x ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh | ||
| 387 | |||
| 388 | # append the SDK tarball | ||
| 389 | cat ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} >> ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh | ||
| 390 | |||
| 391 | # delete the old tarball, we don't need it anymore | ||
| 392 | rm ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} | ||
| 393 | } | ||
| 394 | |||
| 395 | populate_sdk_log_check() { | ||
| 396 | for target in $* | ||
| 397 | do | ||
| 398 | lf_path="`dirname ${BB_LOGFILE}`/log.do_$target.${PID}" | ||
| 399 | |||
| 400 | echo "log_check: Using $lf_path as logfile" | ||
| 401 | |||
| 402 | if [ -e "$lf_path" ]; then | ||
| 403 | ${IMAGE_PKGTYPE}_log_check $target $lf_path | ||
| 404 | else | ||
| 405 | echo "Cannot find logfile [$lf_path]" | ||
| 406 | fi | ||
| 407 | echo "Logfile is clean" | ||
| 408 | done | ||
| 409 | } | ||
| 410 | |||
| 411 | def sdk_command_variables(d): | ||
| 412 | return ['OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_PRE_TARGET_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS'] | ||
| 413 | |||
| 414 | def sdk_variables(d): | ||
| 415 | variables = ['BUILD_IMAGES_FROM_FEEDS','SDK_OS','SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT', | ||
| 416 | 'SDKTARGETSYSROOT','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS', | ||
| 417 | 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'IMAGE_INSTALL_DEBUGFS'] | ||
| 418 | variables.extend(sdk_command_variables(d)) | ||
| 419 | return " ".join(variables) | ||
| 420 | |||
| 421 | do_populate_sdk[vardeps] += "${@sdk_variables(d)}" | ||
| 422 | |||
| 423 | python () { | ||
| 424 | variables = sdk_command_variables(d) | ||
| 425 | for var in variables: | ||
| 426 | d.setVarFlag(var, 'vardeps', d.getVar(var)) | ||
| 427 | } | ||
| 428 | |||
| 429 | do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \ | ||
| 430 | ${TOOLCHAIN_SHAR_EXT_TMPL}:True" | ||
| 431 | |||
| 432 | do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}" | ||
| 433 | do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS').split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}" | ||
| 434 | do_populate_sdk[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('SDK_RDEPENDS').split()])}" | ||
| 435 | do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb do_package_qa" | ||
| 436 | do_populate_sdk[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}" | ||
| 437 | addtask populate_sdk | ||
diff --git a/meta/classes-recipe/populate_sdk_ext.bbclass b/meta/classes-recipe/populate_sdk_ext.bbclass deleted file mode 100644 index 2859320ddf..0000000000 --- a/meta/classes-recipe/populate_sdk_ext.bbclass +++ /dev/null | |||
| @@ -1,854 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Extensible SDK | ||
| 8 | |||
| 9 | inherit populate_sdk_base | ||
| 10 | |||
| 11 | # Used to override TOOLCHAIN_HOST_TASK in the eSDK case | ||
| 12 | TOOLCHAIN_HOST_TASK_ESDK = " \ | ||
| 13 | meta-environment-extsdk-${MACHINE} \ | ||
| 14 | " | ||
| 15 | |||
| 16 | SDK_RELOCATE_AFTER_INSTALL:task-populate-sdk-ext = "0" | ||
| 17 | |||
| 18 | SDK_EXT = "" | ||
| 19 | SDK_EXT:task-populate-sdk-ext = "-ext" | ||
| 20 | |||
| 21 | # Options are full or minimal | ||
| 22 | SDK_EXT_TYPE ?= "full" | ||
| 23 | SDK_INCLUDE_PKGDATA ?= "0" | ||
| 24 | SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE') == 'full' else '0'}" | ||
| 25 | SDK_INCLUDE_NATIVESDK ?= "0" | ||
| 26 | SDK_INCLUDE_BUILDTOOLS ?= '1' | ||
| 27 | |||
| 28 | SDK_RECRDEP_TASKS ?= "" | ||
| 29 | SDK_CUSTOM_TEMPLATECONF ?= "0" | ||
| 30 | |||
| 31 | ESDK_LOCALCONF_ALLOW ?= "" | ||
| 32 | ESDK_LOCALCONF_REMOVE ?= "CONF_VERSION \ | ||
| 33 | BB_NUMBER_THREADS \ | ||
| 34 | BB_NUMBER_PARSE_THREADS \ | ||
| 35 | PARALLEL_MAKE \ | ||
| 36 | PRSERV_HOST \ | ||
| 37 | SSTATE_MIRRORS \ | ||
| 38 | DL_DIR \ | ||
| 39 | SSTATE_DIR \ | ||
| 40 | TMPDIR \ | ||
| 41 | BB_SERVER_TIMEOUT \ | ||
| 42 | " | ||
| 43 | ESDK_CLASS_INHERIT_DISABLE ?= "buildhistory" | ||
| 44 | SDK_UPDATE_URL ?= "" | ||
| 45 | |||
| 46 | SDK_TARGETS ?= "${PN}" | ||
| 47 | |||
| 48 | def get_sdk_install_targets(d, images_only=False): | ||
| 49 | sdk_install_targets = '' | ||
| 50 | if images_only or d.getVar('SDK_EXT_TYPE') != 'minimal': | ||
| 51 | sdk_install_targets = d.getVar('SDK_TARGETS') | ||
| 52 | |||
| 53 | depd = d.getVar('BB_TASKDEPDATA', False) | ||
| 54 | tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d) | ||
| 55 | tasklist.remove('do_build') | ||
| 56 | for v in depd.values(): | ||
| 57 | if v[1] in tasklist: | ||
| 58 | if v[0] not in sdk_install_targets: | ||
| 59 | sdk_install_targets += ' {}'.format(v[0]) | ||
| 60 | |||
| 61 | if not images_only: | ||
| 62 | if d.getVar('SDK_INCLUDE_PKGDATA') == '1': | ||
| 63 | sdk_install_targets += ' meta-world-pkgdata:do_allpackagedata' | ||
| 64 | if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1': | ||
| 65 | sdk_install_targets += ' meta-extsdk-toolchain:do_populate_sysroot' | ||
| 66 | |||
| 67 | return sdk_install_targets | ||
| 68 | |||
| 69 | get_sdk_install_targets[vardepsexclude] = "BB_TASKDEPDATA" | ||
| 70 | |||
| 71 | OE_INIT_ENV_SCRIPT ?= "oe-init-build-env" | ||
| 72 | |||
| 73 | # The files from COREBASE that you want preserved in the COREBASE copied | ||
| 74 | # into the sdk. This allows someone to have their own setup scripts in | ||
| 75 | # COREBASE be preserved as well as untracked files. | ||
| 76 | COREBASE_FILES ?= " \ | ||
| 77 | oe-init-build-env \ | ||
| 78 | scripts \ | ||
| 79 | LICENSE \ | ||
| 80 | .templateconf \ | ||
| 81 | " | ||
| 82 | |||
| 83 | SDK_DIR:task-populate-sdk-ext = "${WORKDIR}/sdk-ext" | ||
| 84 | B:task-populate-sdk-ext = "${SDK_DIR}" | ||
| 85 | TOOLCHAINEXT_OUTPUTNAME ?= "${SDK_NAME}-toolchain-ext-${SDK_VERSION}" | ||
| 86 | TOOLCHAIN_OUTPUTNAME:task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}" | ||
| 87 | |||
| 88 | SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest" | ||
| 89 | SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest" | ||
| 90 | |||
| 91 | python write_target_sdk_ext_manifest () { | ||
| 92 | from oe.sdk import get_extra_sdkinfo | ||
| 93 | sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache') | ||
| 94 | extra_info = get_extra_sdkinfo(sstate_dir) | ||
| 95 | |||
| 96 | target = d.getVar('TARGET_SYS') | ||
| 97 | target_multimach = d.getVar('MULTIMACH_TARGET_SYS') | ||
| 98 | real_target_multimach = d.getVar('REAL_MULTIMACH_TARGET_SYS') | ||
| 99 | |||
| 100 | pkgs = {} | ||
| 101 | os.makedirs(os.path.dirname(d.getVar('SDK_EXT_TARGET_MANIFEST')), exist_ok=True) | ||
| 102 | with open(d.getVar('SDK_EXT_TARGET_MANIFEST'), 'w') as f: | ||
| 103 | for fn in extra_info['filesizes']: | ||
| 104 | info = fn.split(':') | ||
| 105 | if info[2] in (target, target_multimach, real_target_multimach) \ | ||
| 106 | or info[5] == 'allarch': | ||
| 107 | if not info[1] in pkgs: | ||
| 108 | f.write("%s %s %s\n" % (info[1], info[2], info[3])) | ||
| 109 | pkgs[info[1]] = {} | ||
| 110 | } | ||
| 111 | python write_host_sdk_ext_manifest () { | ||
| 112 | from oe.sdk import get_extra_sdkinfo | ||
| 113 | sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache') | ||
| 114 | extra_info = get_extra_sdkinfo(sstate_dir) | ||
| 115 | host = d.getVar('BUILD_SYS') | ||
| 116 | with open(d.getVar('SDK_EXT_HOST_MANIFEST'), 'w') as f: | ||
| 117 | for fn in extra_info['filesizes']: | ||
| 118 | info = fn.split(':') | ||
| 119 | if info[2] == host: | ||
| 120 | f.write("%s %s %s\n" % (info[1], info[2], info[3])) | ||
| 121 | } | ||
| 122 | |||
| 123 | SDK_POSTPROCESS_COMMAND:append:task-populate-sdk-ext = " write_target_sdk_ext_manifest write_host_sdk_ext_manifest" | ||
| 124 | |||
| 125 | SDK_TITLE:task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK" | ||
| 126 | |||
| 127 | def clean_esdk_builddir(d, sdkbasepath): | ||
| 128 | """Clean up traces of the fake build for create_filtered_tasklist()""" | ||
| 129 | import shutil | ||
| 130 | cleanpaths = ['cache', 'tmp'] | ||
| 131 | for pth in cleanpaths: | ||
| 132 | fullpth = os.path.join(sdkbasepath, pth) | ||
| 133 | if os.path.isdir(fullpth): | ||
| 134 | shutil.rmtree(fullpth) | ||
| 135 | elif os.path.isfile(fullpth): | ||
| 136 | os.remove(fullpth) | ||
| 137 | |||
| 138 | def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath): | ||
| 139 | """ | ||
| 140 | Create a filtered list of tasks. Also double-checks that the build system | ||
| 141 | within the SDK basically works and required sstate artifacts are available. | ||
| 142 | """ | ||
| 143 | import tempfile | ||
| 144 | import shutil | ||
| 145 | import oe.copy_buildsystem | ||
| 146 | |||
| 147 | # Create a temporary build directory that we can pass to the env setup script | ||
| 148 | shutil.copyfile(sdkbasepath + '/conf/local.conf', sdkbasepath + '/conf/local.conf.bak') | ||
| 149 | try: | ||
| 150 | with open(sdkbasepath + '/conf/local.conf', 'a') as f: | ||
| 151 | # Force the use of sstate from the build system | ||
| 152 | f.write('\nSSTATE_DIR:forcevariable = "%s"\n' % d.getVar('SSTATE_DIR')) | ||
| 153 | # Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it | ||
| 154 | f.write('TMPDIR:forcevariable = "${TOPDIR}/tmp"\n') | ||
| 155 | # Drop uninative if the build isn't using it (or else NATIVELSBSTRING will | ||
| 156 | # be different and we won't be able to find our native sstate) | ||
| 157 | if not bb.data.inherits_class('uninative', d): | ||
| 158 | f.write('INHERIT:remove = "uninative"\n') | ||
| 159 | |||
| 160 | # Unfortunately the default SDKPATH (or even a custom value) may contain characters that bitbake | ||
| 161 | # will not allow in its COREBASE path, so we need to rename the directory temporarily | ||
| 162 | temp_sdkbasepath = d.getVar('SDK_OUTPUT') + '/tmp-renamed-sdk' | ||
| 163 | # Delete any existing temp dir | ||
| 164 | try: | ||
| 165 | shutil.rmtree(temp_sdkbasepath) | ||
| 166 | except FileNotFoundError: | ||
| 167 | pass | ||
| 168 | bb.utils.rename(sdkbasepath, temp_sdkbasepath) | ||
| 169 | cmdprefix = '. %s .; ' % conf_initpath | ||
| 170 | logfile = d.getVar('WORKDIR') + '/tasklist_bb_log.txt' | ||
| 171 | try: | ||
| 172 | oe.copy_buildsystem.check_sstate_task_list(d, get_sdk_install_targets(d), tasklistfile, cmdprefix=cmdprefix, cwd=temp_sdkbasepath, logfile=logfile) | ||
| 173 | except bb.process.ExecutionError as e: | ||
| 174 | msg = 'Failed to generate filtered task list for extensible SDK:\n%s' % e.stdout.rstrip() | ||
| 175 | if 'attempted to execute unexpectedly and should have been setscened' in e.stdout: | ||
| 176 | msg += '\n----------\n\nNOTE: "attempted to execute unexpectedly and should have been setscened" errors indicate this may be caused by missing sstate artifacts that were likely produced in earlier builds, but have been subsequently deleted for some reason.\n' | ||
| 177 | bb.fatal(msg) | ||
| 178 | bb.utils.rename(temp_sdkbasepath, sdkbasepath) | ||
| 179 | # Clean out residue of running bitbake, which check_sstate_task_list() | ||
| 180 | # will effectively do | ||
| 181 | clean_esdk_builddir(d, sdkbasepath) | ||
| 182 | finally: | ||
| 183 | localconf = sdkbasepath + '/conf/local.conf' | ||
| 184 | if os.path.exists(localconf + '.bak'): | ||
| 185 | os.replace(localconf + '.bak', localconf) | ||
| 186 | |||
| 187 | def copy_bitbake_and_layers(d, baseoutpath, derivative): | ||
| 188 | oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT') | ||
| 189 | |||
| 190 | conf_bbpath = '' | ||
| 191 | conf_initpath = '' | ||
| 192 | core_meta_subdir = '' | ||
| 193 | |||
| 194 | # Copy in all metadata layers + bitbake (as repositories) | ||
| 195 | buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d) | ||
| 196 | |||
| 197 | if derivative: | ||
| 198 | workspace_name = 'orig-workspace' | ||
| 199 | else: | ||
| 200 | workspace_name = None | ||
| 201 | |||
| 202 | corebase, sdkbblayers = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers', workspace_name) | ||
| 203 | conf_bbpath = os.path.join('layers', corebase, 'bitbake') | ||
| 204 | |||
| 205 | for path in os.listdir(baseoutpath + '/layers'): | ||
| 206 | relpath = os.path.join('layers', path, oe_init_env_script) | ||
| 207 | if os.path.exists(os.path.join(baseoutpath, relpath)): | ||
| 208 | conf_initpath = relpath | ||
| 209 | |||
| 210 | relpath = os.path.join('layers', path, 'scripts', 'esdk-tools', 'devtool') | ||
| 211 | if os.path.exists(os.path.join(baseoutpath, relpath)): | ||
| 212 | esdk_tools_path = os.path.dirname(relpath) | ||
| 213 | |||
| 214 | relpath = os.path.join('layers', path, 'meta') | ||
| 215 | if os.path.exists(os.path.join(baseoutpath, relpath, 'lib', 'oe')): | ||
| 216 | core_meta_subdir = relpath | ||
| 217 | |||
| 218 | d.setVar('oe_init_build_env_path', conf_initpath) | ||
| 219 | d.setVar('esdk_tools_path', esdk_tools_path) | ||
| 220 | |||
| 221 | return (conf_initpath, conf_bbpath, core_meta_subdir, sdkbblayers) | ||
| 222 | |||
| 223 | def write_devtool_config(d, baseoutpath, conf_bbpath, conf_initpath, core_meta_subdir): | ||
| 224 | # Write out config file for devtool | ||
| 225 | import configparser | ||
| 226 | config = configparser.ConfigParser() | ||
| 227 | config.add_section('General') | ||
| 228 | config.set('General', 'bitbake_subdir', conf_bbpath) | ||
| 229 | config.set('General', 'init_path', conf_initpath) | ||
| 230 | config.set('General', 'core_meta_subdir', core_meta_subdir) | ||
| 231 | config.add_section('SDK') | ||
| 232 | config.set('SDK', 'sdk_targets', d.getVar('SDK_TARGETS')) | ||
| 233 | updateurl = d.getVar('SDK_UPDATE_URL') | ||
| 234 | if updateurl: | ||
| 235 | config.set('SDK', 'updateserver', updateurl) | ||
| 236 | bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf')) | ||
| 237 | with open(os.path.join(baseoutpath, 'conf', 'devtool.conf'), 'w') as f: | ||
| 238 | config.write(f) | ||
| 239 | |||
| 240 | def write_unlocked_sigs(d, baseoutpath): | ||
| 241 | unlockedsigs = os.path.join(baseoutpath, 'conf', 'unlocked-sigs.inc') | ||
| 242 | with open(unlockedsigs, 'w') as f: | ||
| 243 | pass | ||
| 244 | |||
| 245 | def write_bblayers_conf(d, baseoutpath, sdkbblayers): | ||
| 246 | # Create a layer for new recipes / appends | ||
| 247 | bbpath = d.getVar('BBPATH') | ||
| 248 | env = os.environ.copy() | ||
| 249 | env['PYTHONDONTWRITEBYTECODE'] = '1' | ||
| 250 | bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--layerseries', d.getVar("LAYERSERIES_CORENAMES"), '--create-only', os.path.join(baseoutpath, 'workspace')], env=env) | ||
| 251 | |||
| 252 | # Create bblayers.conf | ||
| 253 | bb.utils.mkdirhier(baseoutpath + '/conf') | ||
| 254 | with open(baseoutpath + '/conf/bblayers.conf', 'w') as f: | ||
| 255 | f.write('# WARNING: this configuration has been automatically generated and in\n') | ||
| 256 | f.write('# most cases should not be edited. If you need more flexibility than\n') | ||
| 257 | f.write('# this configuration provides, it is strongly suggested that you set\n') | ||
| 258 | f.write('# up a proper instance of the full build system and use that instead.\n\n') | ||
| 259 | |||
| 260 | # LCONF_VERSION may not be set, for example when using meta-poky | ||
| 261 | # so don't error if it isn't found | ||
| 262 | lconf_version = d.getVar('LCONF_VERSION', False) | ||
| 263 | if lconf_version is not None: | ||
| 264 | f.write('LCONF_VERSION = "%s"\n\n' % lconf_version) | ||
| 265 | |||
| 266 | f.write('BBPATH = "$' + '{TOPDIR}"\n') | ||
| 267 | f.write('SDKBASEMETAPATH = "$' + '{TOPDIR}"\n') | ||
| 268 | f.write('BBLAYERS := " \\\n') | ||
| 269 | for layerrelpath in sdkbblayers: | ||
| 270 | f.write(' $' + '{SDKBASEMETAPATH}/layers/%s \\\n' % layerrelpath) | ||
| 271 | f.write(' $' + '{SDKBASEMETAPATH}/workspace \\\n') | ||
| 272 | f.write(' "\n') | ||
| 273 | |||
| 274 | def copy_uninative(d, baseoutpath): | ||
| 275 | import shutil | ||
| 276 | |||
| 277 | uninative_checksum = None | ||
| 278 | |||
| 279 | # Copy uninative tarball | ||
| 280 | # For now this is where uninative.bbclass expects the tarball | ||
| 281 | if bb.data.inherits_class('uninative', d): | ||
| 282 | uninative_file = d.expand('${UNINATIVE_DLDIR}/' + d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH")) + '/${UNINATIVE_TARBALL}') | ||
| 283 | uninative_checksum = bb.utils.sha256_file(uninative_file) | ||
| 284 | uninative_outdir = '%s/downloads/uninative/%s' % (baseoutpath, uninative_checksum) | ||
| 285 | bb.utils.mkdirhier(uninative_outdir) | ||
| 286 | shutil.copy(uninative_file, uninative_outdir) | ||
| 287 | |||
| 288 | return uninative_checksum | ||
| 289 | |||
| 290 | def write_local_conf(d, baseoutpath, derivative, core_meta_subdir, uninative_checksum): | ||
| 291 | import shutil | ||
| 292 | |||
| 293 | #check if custome templateconf path is set | ||
| 294 | use_custom_templateconf = d.getVar('SDK_CUSTOM_TEMPLATECONF') | ||
| 295 | |||
| 296 | env_passthrough = (d.getVar('BB_ENV_PASSTHROUGH_ADDITIONS') or '').split() | ||
| 297 | env_passthrough_values = {} | ||
| 298 | |||
| 299 | # Create local.conf | ||
| 300 | builddir = d.getVar('TOPDIR') | ||
| 301 | if derivative and os.path.exists(builddir + '/conf/site.conf'): | ||
| 302 | shutil.copyfile(builddir + '/conf/site.conf', baseoutpath + '/conf/site.conf') | ||
| 303 | if derivative and os.path.exists(builddir + '/conf/auto.conf'): | ||
| 304 | shutil.copyfile(builddir + '/conf/auto.conf', baseoutpath + '/conf/auto.conf') | ||
| 305 | if derivative: | ||
| 306 | shutil.copyfile(builddir + '/conf/local.conf', baseoutpath + '/conf/local.conf') | ||
| 307 | else: | ||
| 308 | local_conf_allowed = (d.getVar('ESDK_LOCALCONF_ALLOW') or '').split() | ||
| 309 | local_conf_remove = (d.getVar('ESDK_LOCALCONF_REMOVE') or '').split() | ||
| 310 | def handle_var(varname, origvalue, op, newlines): | ||
| 311 | if varname in local_conf_remove or (origvalue.strip().startswith('/') and not varname in local_conf_allowed): | ||
| 312 | newlines.append('# Removed original setting of %s\n' % varname) | ||
| 313 | return None, op, 0, True | ||
| 314 | else: | ||
| 315 | if varname in env_passthrough: | ||
| 316 | env_passthrough_values[varname] = origvalue | ||
| 317 | return origvalue, op, 0, True | ||
| 318 | varlist = ['[^#=+ ]*'] | ||
| 319 | oldlines = [] | ||
| 320 | for conffile in ['site.conf', 'auto.conf', 'toolcfg.conf', 'local.conf']: | ||
| 321 | if os.path.exists(builddir + '/conf/' + conffile): | ||
| 322 | with open(builddir + '/conf/' + conffile, 'r') as f: | ||
| 323 | oldlines += f.readlines() | ||
| 324 | (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var) | ||
| 325 | |||
| 326 | with open(baseoutpath + '/conf/local.conf', 'w') as f: | ||
| 327 | f.write('# WARNING: this configuration has been automatically generated and in\n') | ||
| 328 | f.write('# most cases should not be edited. If you need more flexibility than\n') | ||
| 329 | f.write('# this configuration provides, it is strongly suggested that you set\n') | ||
| 330 | f.write('# up a proper instance of the full build system and use that instead.\n\n') | ||
| 331 | for line in newlines: | ||
| 332 | if line.strip() and not line.startswith('#'): | ||
| 333 | f.write(line) | ||
| 334 | # Write a newline just in case there's none at the end of the original | ||
| 335 | f.write('\n') | ||
| 336 | |||
| 337 | f.write('TMPDIR = "${TOPDIR}/tmp"\n') | ||
| 338 | f.write('DL_DIR = "${TOPDIR}/downloads"\n') | ||
| 339 | |||
| 340 | if bb.data.inherits_class('uninative', d): | ||
| 341 | f.write('INHERIT += "%s"\n' % 'uninative') | ||
| 342 | f.write('UNINATIVE_CHECKSUM[%s] = "%s"\n\n' % (d.getVar('BUILD_ARCH'), uninative_checksum)) | ||
| 343 | |||
| 344 | # CONF_VERSION may not be set, for example when using an empty local.conf | ||
| 345 | # generated with bitbake-setup, and it is not otherwise required to exist. | ||
| 346 | # Write it out only if it's defined. | ||
| 347 | conf_version = d.getVar('CONF_VERSION', False) | ||
| 348 | if conf_version is not None: | ||
| 349 | f.write('CONF_VERSION = "%s"\n\n' % conf_version) | ||
| 350 | |||
| 351 | # Some classes are not suitable for SDK, remove them from INHERIT | ||
| 352 | f.write('INHERIT:remove = "%s"\n' % d.getVar('ESDK_CLASS_INHERIT_DISABLE', False)) | ||
| 353 | |||
| 354 | # Bypass the default connectivity check if any | ||
| 355 | f.write('CONNECTIVITY_CHECK_URIS = ""\n\n') | ||
| 356 | |||
| 357 | # This warning will come out if reverse dependencies for a task | ||
| 358 | # don't have sstate as well as the task itself. We already know | ||
| 359 | # this will be the case for the extensible sdk, so turn off the | ||
| 360 | # warning. | ||
| 361 | f.write('SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK = "none"\n\n') | ||
| 362 | |||
| 363 | # Warn if the sigs in the locked-signature file don't match | ||
| 364 | # the sig computed from the metadata. | ||
| 365 | f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n') | ||
| 366 | |||
| 367 | # We want to be able to set this without a full reparse | ||
| 368 | f.write('BB_HASHCONFIG_IGNORE_VARS:append = " SIGGEN_UNLOCKED_RECIPES"\n\n') | ||
| 369 | |||
| 370 | # Set up which tasks are ignored for run on install | ||
| 371 | f.write('BB_SETSCENE_ENFORCE_IGNORE_TASKS = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n') | ||
| 372 | |||
| 373 | # Hide the config information from bitbake output (since it's fixed within the SDK) | ||
| 374 | f.write('BUILDCFG_HEADER = ""\n\n') | ||
| 375 | |||
| 376 | # Write METADATA_REVISION | ||
| 377 | # Needs distro override so it can override the value set in the bbclass code (later than local.conf) | ||
| 378 | f.write('METADATA_REVISION:%s = "%s"\n\n' % (d.getVar('DISTRO'), d.getVar('METADATA_REVISION'))) | ||
| 379 | |||
| 380 | f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n') | ||
| 381 | f.write('WITHIN_EXT_SDK = "1"\n\n') | ||
| 382 | |||
| 383 | if d.getVar("PRSERV_HOST"): | ||
| 384 | # Override this, we now include PR data, so it should only point ot the local database | ||
| 385 | f.write('PRSERV_HOST = "localhost:0"\n\n') | ||
| 386 | |||
| 387 | # Allow additional config through sdk-extra.conf | ||
| 388 | fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d) | ||
| 389 | if fn: | ||
| 390 | with open(fn, 'r') as xf: | ||
| 391 | for line in xf: | ||
| 392 | f.write(line) | ||
| 393 | |||
| 394 | # If you define a sdk_extraconf() function then it can contain additional config | ||
| 395 | # (Though this is awkward; sdk-extra.conf should probably be used instead) | ||
| 396 | extraconf = (d.getVar('sdk_extraconf') or '').strip() | ||
| 397 | if extraconf: | ||
| 398 | # Strip off any leading / trailing spaces | ||
| 399 | for line in extraconf.splitlines(): | ||
| 400 | f.write(line.strip() + '\n') | ||
| 401 | |||
| 402 | f.write('require conf/locked-sigs.inc\n') | ||
| 403 | f.write('require conf/unlocked-sigs.inc\n') | ||
| 404 | |||
| 405 | # Copy multiple configurations if they exist in the users config directory | ||
| 406 | if d.getVar('BBMULTICONFIG') is not None: | ||
| 407 | bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf', 'multiconfig')) | ||
| 408 | for mc in d.getVar('BBMULTICONFIG').split(): | ||
| 409 | dest_stub = "/conf/multiconfig/%s.conf" % (mc,) | ||
| 410 | if os.path.exists(builddir + dest_stub): | ||
| 411 | shutil.copyfile(builddir + dest_stub, baseoutpath + dest_stub) | ||
| 412 | |||
| 413 | # If PR Service is in use, we need to export this as well | ||
| 414 | bb.note('Do we have a pr database?') | ||
| 415 | if d.getVar("PRSERV_HOST"): | ||
| 416 | bb.note('Writing PR database...') | ||
| 417 | # Based on the code in classes/prexport.bbclass | ||
| 418 | import oe.prservice | ||
| 419 | #dump meta info of tables | ||
| 420 | localdata = d.createCopy() | ||
| 421 | localdata.setVar('PRSERV_DUMPOPT_COL', "1") | ||
| 422 | localdata.setVar('PRSERV_DUMPDIR', os.path.join(baseoutpath, 'conf')) | ||
| 423 | localdata.setVar('PRSERV_DUMPFILE', '${PRSERV_DUMPDIR}/prserv.inc') | ||
| 424 | |||
| 425 | bb.note('PR Database write to %s' % (localdata.getVar('PRSERV_DUMPFILE'))) | ||
| 426 | |||
| 427 | retval = oe.prservice.prserv_dump_db(localdata) | ||
| 428 | if not retval: | ||
| 429 | bb.error("prexport_handler: export failed!") | ||
| 430 | return | ||
| 431 | (metainfo, datainfo) = retval | ||
| 432 | oe.prservice.prserv_export_tofile(localdata, metainfo, datainfo, True) | ||
| 433 | |||
| 434 | # Use templateconf.cfg file from builddir if exists | ||
| 435 | if os.path.exists(builddir + '/conf/templateconf.cfg') and use_custom_templateconf == '1': | ||
| 436 | shutil.copyfile(builddir + '/conf/templateconf.cfg', baseoutpath + '/conf/templateconf.cfg') | ||
| 437 | else: | ||
| 438 | # Write a templateconf.cfg | ||
| 439 | with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f: | ||
| 440 | f.write('meta/conf/templates/default\n') | ||
| 441 | os.makedirs(os.path.join(baseoutpath, core_meta_subdir, 'conf/templates/default'), exist_ok=True) | ||
| 442 | |||
| 443 | # Ensure any variables set from the external environment (by way of | ||
| 444 | # BB_ENV_PASSTHROUGH_ADDITIONS) are set in the SDK's configuration | ||
| 445 | extralines = [] | ||
| 446 | for name, value in env_passthrough_values.items(): | ||
| 447 | actualvalue = d.getVar(name) or '' | ||
| 448 | if value != actualvalue: | ||
| 449 | extralines.append('%s = "%s"\n' % (name, actualvalue)) | ||
| 450 | if extralines: | ||
| 451 | with open(baseoutpath + '/conf/local.conf', 'a') as f: | ||
| 452 | f.write('\n') | ||
| 453 | f.write('# Extra settings from environment:\n') | ||
| 454 | for line in extralines: | ||
| 455 | f.write(line) | ||
| 456 | f.write('\n') | ||
| 457 | |||
| 458 | def prepare_locked_cache(d, baseoutpath, derivative, conf_initpath): | ||
| 459 | import shutil | ||
| 460 | |||
| 461 | # Filter the locked signatures file to just the sstate tasks we are interested in | ||
| 462 | excluded_targets = get_sdk_install_targets(d, images_only=True) | ||
| 463 | sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc' | ||
| 464 | lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc' | ||
| 465 | #nativesdk-only sigfile to merge into locked-sigs.inc | ||
| 466 | sdk_include_nativesdk = (d.getVar("SDK_INCLUDE_NATIVESDK") == '1') | ||
| 467 | nativesigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc' | ||
| 468 | nativesigfile_pruned = d.getVar('WORKDIR') + '/locked-sigs_nativesdk_pruned.inc' | ||
| 469 | |||
| 470 | if sdk_include_nativesdk: | ||
| 471 | oe.copy_buildsystem.prune_lockedsigs([], | ||
| 472 | excluded_targets.split(), | ||
| 473 | nativesigfile, | ||
| 474 | True, | ||
| 475 | nativesigfile_pruned) | ||
| 476 | |||
| 477 | oe.copy_buildsystem.merge_lockedsigs([], | ||
| 478 | sigfile, | ||
| 479 | nativesigfile_pruned, | ||
| 480 | sigfile) | ||
| 481 | |||
| 482 | oe.copy_buildsystem.prune_lockedsigs([], | ||
| 483 | excluded_targets.split(), | ||
| 484 | sigfile, | ||
| 485 | False, | ||
| 486 | lockedsigs_pruned) | ||
| 487 | |||
| 488 | sstate_out = baseoutpath + '/sstate-cache' | ||
| 489 | bb.utils.remove(sstate_out, True) | ||
| 490 | |||
| 491 | # uninative.bbclass sets NATIVELSBSTRING to 'universal' | ||
| 492 | fixedlsbstring = "universal" if bb.data.inherits_class('uninative', d) else "" | ||
| 493 | |||
| 494 | sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1') | ||
| 495 | sdk_ext_type = d.getVar('SDK_EXT_TYPE') | ||
| 496 | if (sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative) and not sdk_include_nativesdk: | ||
| 497 | # Create the filtered task list used to generate the sstate cache shipped with the SDK | ||
| 498 | tasklistfn = d.getVar('WORKDIR') + '/tasklist.txt' | ||
| 499 | create_filtered_tasklist(d, baseoutpath, tasklistfn, conf_initpath) | ||
| 500 | else: | ||
| 501 | tasklistfn = None | ||
| 502 | |||
| 503 | # Add packagedata if enabled | ||
| 504 | if d.getVar('SDK_INCLUDE_PKGDATA') == '1': | ||
| 505 | lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base.inc' | ||
| 506 | lockedsigs_copy = d.getVar('WORKDIR') + '/locked-sigs-copy.inc' | ||
| 507 | shutil.move(lockedsigs_pruned, lockedsigs_base) | ||
| 508 | oe.copy_buildsystem.merge_lockedsigs(['do_packagedata'], | ||
| 509 | lockedsigs_base, | ||
| 510 | d.getVar('STAGING_DIR_HOST') + '/world-pkgdata/locked-sigs-pkgdata.inc', | ||
| 511 | lockedsigs_pruned, | ||
| 512 | lockedsigs_copy) | ||
| 513 | |||
| 514 | if sdk_include_toolchain: | ||
| 515 | lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base2.inc' | ||
| 516 | lockedsigs_toolchain = d.expand("${STAGING_DIR}/${TUNE_PKGARCH}/meta-extsdk-toolchain/locked-sigs/locked-sigs-extsdk-toolchain.inc") | ||
| 517 | shutil.move(lockedsigs_pruned, lockedsigs_base) | ||
| 518 | oe.copy_buildsystem.merge_lockedsigs([], | ||
| 519 | lockedsigs_base, | ||
| 520 | lockedsigs_toolchain, | ||
| 521 | lockedsigs_pruned) | ||
| 522 | oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_toolchain, | ||
| 523 | d.getVar('SSTATE_DIR'), | ||
| 524 | sstate_out, d, | ||
| 525 | fixedlsbstring, | ||
| 526 | filterfile=tasklistfn) | ||
| 527 | |||
| 528 | if sdk_ext_type == 'minimal': | ||
| 529 | if derivative: | ||
| 530 | # Assume the user is not going to set up an additional sstate | ||
| 531 | # mirror, thus we need to copy the additional artifacts (from | ||
| 532 | # workspace recipes) into the derivative SDK | ||
| 533 | lockedsigs_orig = d.getVar('TOPDIR') + '/conf/locked-sigs.inc' | ||
| 534 | if os.path.exists(lockedsigs_orig): | ||
| 535 | lockedsigs_extra = d.getVar('WORKDIR') + '/locked-sigs-extra.inc' | ||
| 536 | oe.copy_buildsystem.merge_lockedsigs(None, | ||
| 537 | lockedsigs_orig, | ||
| 538 | lockedsigs_pruned, | ||
| 539 | None, | ||
| 540 | lockedsigs_extra) | ||
| 541 | oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_extra, | ||
| 542 | d.getVar('SSTATE_DIR'), | ||
| 543 | sstate_out, d, | ||
| 544 | fixedlsbstring, | ||
| 545 | filterfile=tasklistfn) | ||
| 546 | else: | ||
| 547 | oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned, | ||
| 548 | d.getVar('SSTATE_DIR'), | ||
| 549 | sstate_out, d, | ||
| 550 | fixedlsbstring, | ||
| 551 | filterfile=tasklistfn) | ||
| 552 | |||
| 553 | # We don't need sstate do_package files | ||
| 554 | for root, dirs, files in os.walk(sstate_out): | ||
| 555 | for name in files: | ||
| 556 | if name.endswith("_package.tar.zst"): | ||
| 557 | f = os.path.join(root, name) | ||
| 558 | os.remove(f) | ||
| 559 | |||
| 560 | def write_manifest(d, baseoutpath): | ||
| 561 | import glob | ||
| 562 | |||
| 563 | # Write manifest file | ||
| 564 | # Note: at the moment we cannot include the env setup script here to keep | ||
| 565 | # it updated, since it gets modified during SDK installation (see | ||
| 566 | # sdk_ext_postinst() below) thus the checksum we take here would always | ||
| 567 | # be different. | ||
| 568 | manifest_file_list = ['conf/*'] | ||
| 569 | if d.getVar('BBMULTICONFIG') is not None: | ||
| 570 | manifest_file_list.append('conf/multiconfig/*') | ||
| 571 | |||
| 572 | esdk_manifest_excludes = (d.getVar('ESDK_MANIFEST_EXCLUDES') or '').split() | ||
| 573 | esdk_manifest_excludes_list = [] | ||
| 574 | for exclude_item in esdk_manifest_excludes: | ||
| 575 | esdk_manifest_excludes_list += glob.glob(os.path.join(baseoutpath, exclude_item)) | ||
| 576 | manifest_file = os.path.join(baseoutpath, 'conf', 'sdk-conf-manifest') | ||
| 577 | with open(manifest_file, 'w') as f: | ||
| 578 | for item in manifest_file_list: | ||
| 579 | for fn in glob.glob(os.path.join(baseoutpath, item)): | ||
| 580 | if fn == manifest_file or os.path.isdir(fn): | ||
| 581 | continue | ||
| 582 | if fn in esdk_manifest_excludes_list: | ||
| 583 | continue | ||
| 584 | chksum = bb.utils.sha256_file(fn) | ||
| 585 | f.write('%s\t%s\n' % (chksum, os.path.relpath(fn, baseoutpath))) | ||
| 586 | |||
| 587 | |||
| 588 | python copy_buildsystem () { | ||
| 589 | import oe.copy_buildsystem | ||
| 590 | |||
| 591 | baseoutpath = d.getVar('SDK_OUTPUT') + '/' + d.getVar('SDKPATH') | ||
| 592 | |||
| 593 | # Determine if we're building a derivative extensible SDK (from devtool build-sdk) | ||
| 594 | derivative = (d.getVar('SDK_DERIVATIVE') or '') == '1' | ||
| 595 | |||
| 596 | conf_initpath, conf_bbpath, core_meta_subdir, sdkbblayers = copy_bitbake_and_layers(d, baseoutpath, derivative) | ||
| 597 | |||
| 598 | write_devtool_config(d, baseoutpath, conf_bbpath, conf_initpath, core_meta_subdir) | ||
| 599 | |||
| 600 | write_unlocked_sigs(d, baseoutpath) | ||
| 601 | |||
| 602 | write_bblayers_conf(d, baseoutpath, sdkbblayers) | ||
| 603 | |||
| 604 | uninative_checksum = copy_uninative(d, baseoutpath) | ||
| 605 | |||
| 606 | write_local_conf(d, baseoutpath, derivative, core_meta_subdir, uninative_checksum) | ||
| 607 | |||
| 608 | prepare_locked_cache(d, baseoutpath, derivative, conf_initpath) | ||
| 609 | |||
| 610 | write_manifest(d, baseoutpath) | ||
| 611 | |||
| 612 | } | ||
| 613 | |||
| 614 | def get_current_buildtools(d): | ||
| 615 | """Get the file name of the current buildtools installer""" | ||
| 616 | import glob | ||
| 617 | btfiles = glob.glob(os.path.join(d.getVar('SDK_DEPLOY'), '*-buildtools-nativesdk-standalone-*.sh')) | ||
| 618 | btfiles.sort(key=os.path.getctime) | ||
| 619 | return os.path.basename(btfiles[-1]) | ||
| 620 | |||
| 621 | def get_sdk_required_utilities(buildtools_fn, d): | ||
| 622 | """Find required utilities that aren't provided by the buildtools""" | ||
| 623 | sanity_required_utilities = (d.getVar('SANITY_REQUIRED_UTILITIES') or '').split() | ||
| 624 | sanity_required_utilities.append(d.expand('${BUILD_PREFIX}gcc')) | ||
| 625 | sanity_required_utilities.append(d.expand('${BUILD_PREFIX}g++')) | ||
| 626 | if buildtools_fn: | ||
| 627 | buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY'), buildtools_fn) | ||
| 628 | filelist, _ = bb.process.run('%s -l' % buildtools_installer) | ||
| 629 | else: | ||
| 630 | buildtools_installer = None | ||
| 631 | filelist = "" | ||
| 632 | localdata = bb.data.createCopy(d) | ||
| 633 | localdata.setVar('SDKPATH', '.') | ||
| 634 | sdkpathnative = localdata.getVar('SDKPATHNATIVE') | ||
| 635 | sdkbindirs = [localdata.getVar('bindir_nativesdk'), | ||
| 636 | localdata.getVar('sbindir_nativesdk'), | ||
| 637 | localdata.getVar('base_bindir_nativesdk'), | ||
| 638 | localdata.getVar('base_sbindir_nativesdk')] | ||
| 639 | for line in filelist.splitlines(): | ||
| 640 | splitline = line.split() | ||
| 641 | if len(splitline) > 5: | ||
| 642 | fn = splitline[5] | ||
| 643 | if not fn.startswith('./'): | ||
| 644 | fn = './%s' % fn | ||
| 645 | if fn.startswith(sdkpathnative): | ||
| 646 | relpth = '/' + os.path.relpath(fn, sdkpathnative) | ||
| 647 | for bindir in sdkbindirs: | ||
| 648 | if relpth.startswith(bindir): | ||
| 649 | relpth = os.path.relpath(relpth, bindir) | ||
| 650 | if relpth in sanity_required_utilities: | ||
| 651 | sanity_required_utilities.remove(relpth) | ||
| 652 | break | ||
| 653 | return ' '.join(sanity_required_utilities) | ||
| 654 | |||
| 655 | install_tools() { | ||
| 656 | touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase | ||
| 657 | |||
| 658 | # find latest buildtools-tarball and install it | ||
| 659 | if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then | ||
| 660 | install ${SDK_DEPLOY}/${SDK_BUILDTOOLS_INSTALLER} ${SDK_OUTPUT}/${SDKPATH} | ||
| 661 | fi | ||
| 662 | |||
| 663 | install -m 0644 ${COREBASE}/meta/files/ext-sdk-prepare.py ${SDK_OUTPUT}/${SDKPATH} | ||
| 664 | } | ||
| 665 | do_populate_sdk_ext[file-checksums] += "${COREBASE}/meta/files/ext-sdk-prepare.py:True" | ||
| 666 | |||
| 667 | sdk_ext_preinst() { | ||
| 668 | # Since bitbake won't run as root it doesn't make sense to try and install | ||
| 669 | # the extensible sdk as root. | ||
| 670 | if [ "`id -u`" = "0" ]; then | ||
| 671 | echo "ERROR: The extensible sdk cannot be installed as root." | ||
| 672 | exit 1 | ||
| 673 | fi | ||
| 674 | if ! command -v locale > /dev/null; then | ||
| 675 | echo "ERROR: The installer requires the locale command, please install it first" | ||
| 676 | exit 1 | ||
| 677 | fi | ||
| 678 | # Check setting of LC_ALL set above | ||
| 679 | canonicalised_locale=`echo $LC_ALL | sed 's/UTF-8/utf8/'` | ||
| 680 | if ! locale -a | grep -q $canonicalised_locale ; then | ||
| 681 | echo "ERROR: the installer requires the $LC_ALL locale to be installed (but not selected), please install it first" | ||
| 682 | exit 1 | ||
| 683 | fi | ||
| 684 | # The relocation script used by buildtools installer requires python | ||
| 685 | if ! command -v python3 > /dev/null; then | ||
| 686 | echo "ERROR: The installer requires python3, please install it first" | ||
| 687 | exit 1 | ||
| 688 | fi | ||
| 689 | missing_utils="" | ||
| 690 | for util in ${SDK_REQUIRED_UTILITIES}; do | ||
| 691 | if ! command -v $util > /dev/null; then | ||
| 692 | missing_utils="$missing_utils $util" | ||
| 693 | fi | ||
| 694 | done | ||
| 695 | if [ -n "$missing_utils" ] ; then | ||
| 696 | echo "ERROR: the SDK requires the following missing utilities, please install them: $missing_utils" | ||
| 697 | exit 1 | ||
| 698 | fi | ||
| 699 | SDK_EXTENSIBLE="1" | ||
| 700 | if [ "$publish" = "1" ] && [ "${SDK_EXT_TYPE}" = "minimal" ] ; then | ||
| 701 | EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache" | ||
| 702 | fi | ||
| 703 | } | ||
| 704 | SDK_PRE_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_preinst}" | ||
| 705 | |||
| 706 | # FIXME this preparation should be done as part of the SDK construction | ||
| 707 | sdk_ext_postinst() { | ||
| 708 | printf "\nExtracting buildtools...\n" | ||
| 709 | cd $target_sdk_dir | ||
| 710 | env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}" | ||
| 711 | if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then | ||
| 712 | printf "buildtools\ny" | ./${SDK_BUILDTOOLS_INSTALLER} > buildtools.log || { printf 'ERROR: buildtools installation failed:\n' ; cat buildtools.log ; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; } | ||
| 713 | |||
| 714 | # Delete the buildtools tar file since it won't be used again | ||
| 715 | rm -f ./${SDK_BUILDTOOLS_INSTALLER} | ||
| 716 | # We don't need the log either since it succeeded | ||
| 717 | rm -f buildtools.log | ||
| 718 | |||
| 719 | # Make sure when the user sets up the environment, they also get | ||
| 720 | # the buildtools-tarball tools in their path. | ||
| 721 | echo "# Save and reset OECORE_NATIVE_SYSROOT as buildtools may change it" >> $env_setup_script | ||
| 722 | echo "SAVED=\"\$OECORE_NATIVE_SYSROOT\"" >> $env_setup_script | ||
| 723 | echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script | ||
| 724 | echo "export OECORE_NATIVE_SYSROOT=\"\$SAVED\"" >> $env_setup_script | ||
| 725 | fi | ||
| 726 | |||
| 727 | # Allow bitbake environment setup to be ran as part of this sdk. | ||
| 728 | echo "export OE_SKIP_SDK_CHECK=1" >> $env_setup_script | ||
| 729 | # Work around runqemu not knowing how to get this information within the eSDK | ||
| 730 | echo "export DEPLOY_DIR_IMAGE=$target_sdk_dir/tmp/${@os.path.relpath(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('TMPDIR'))}" >> $env_setup_script | ||
| 731 | |||
| 732 | # A bit of another hack, but we need this in the path only for devtool | ||
| 733 | # so put it at the end of $PATH. | ||
| 734 | echo "export PATH=\"$target_sdk_dir/${esdk_tools_path}:\$PATH\"" >> $env_setup_script | ||
| 735 | |||
| 736 | echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script | ||
| 737 | |||
| 738 | # Warn if trying to use external bitbake and the ext SDK together | ||
| 739 | echo "(which bitbake > /dev/null 2>&1 && echo 'WARNING: attempting to use the extensible SDK in an environment set up to run bitbake - this may lead to unexpected results. Please source this script in a new shell session instead.') || true" >> $env_setup_script | ||
| 740 | |||
| 741 | if [ "$prepare_buildsystem" != "no" -a -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then | ||
| 742 | printf "Preparing build system...\n" | ||
| 743 | # dash which is /bin/sh on Ubuntu will not preserve the | ||
| 744 | # current working directory when first ran, nor will it set $1 when | ||
| 745 | # sourcing a script. That is why this has to look so ugly. | ||
| 746 | LOGFILE="$target_sdk_dir/preparing_build_system.log" | ||
| 747 | sh -c ". buildtools/environment-setup* > $LOGFILE 2>&1 && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE 2>&1 && python3 $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; } | ||
| 748 | fi | ||
| 749 | if [ -e $target_sdk_dir/ext-sdk-prepare.py ]; then | ||
| 750 | rm $target_sdk_dir/ext-sdk-prepare.py | ||
| 751 | fi | ||
| 752 | echo done | ||
| 753 | } | ||
| 754 | |||
| 755 | SDK_POST_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_postinst}" | ||
| 756 | |||
| 757 | SDK_POSTPROCESS_COMMAND:prepend:task-populate-sdk-ext = "copy_buildsystem install_tools " | ||
| 758 | |||
| 759 | SDK_INSTALL_TARGETS = "" | ||
| 760 | fakeroot python do_populate_sdk_ext() { | ||
| 761 | # FIXME hopefully we can remove this restriction at some point, but uninative | ||
| 762 | # currently forces this upon us | ||
| 763 | if d.getVar('SDK_ARCH') != d.getVar('BUILD_ARCH'): | ||
| 764 | bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH'), d.getVar('BUILD_ARCH'))) | ||
| 765 | |||
| 766 | # FIXME hopefully we can remove this restriction at some point, but the eSDK | ||
| 767 | # can only be built for the primary (default) multiconfig | ||
| 768 | if d.getVar('BB_CURRENT_MC') != '': | ||
| 769 | bb.fatal('The extensible SDK can currently only be built for the default multiconfig. Currently trying to build for %s.' % d.getVar('BB_CURRENT_MC')) | ||
| 770 | |||
| 771 | # eSDK dependencies don't use the traditional variables and things don't work properly if they are set | ||
| 772 | d.setVar("TOOLCHAIN_HOST_TASK", "${TOOLCHAIN_HOST_TASK_ESDK}") | ||
| 773 | d.setVar("TOOLCHAIN_TARGET_TASK", "") | ||
| 774 | |||
| 775 | d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d)) | ||
| 776 | if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1': | ||
| 777 | buildtools_fn = get_current_buildtools(d) | ||
| 778 | else: | ||
| 779 | buildtools_fn = None | ||
| 780 | d.setVar('SDK_REQUIRED_UTILITIES', get_sdk_required_utilities(buildtools_fn, d)) | ||
| 781 | d.setVar('SDK_BUILDTOOLS_INSTALLER', buildtools_fn) | ||
| 782 | d.setVar('SDKDEPLOYDIR', '${SDKEXTDEPLOYDIR}') | ||
| 783 | # ESDKs have a libc from the buildtools so ensure we don't ship linguas twice | ||
| 784 | d.delVar('SDKIMAGE_LINGUAS') | ||
| 785 | if d.getVar("SDK_INCLUDE_NATIVESDK") == '1': | ||
| 786 | generate_nativesdk_lockedsigs(d) | ||
| 787 | populate_sdk_common(d) | ||
| 788 | } | ||
| 789 | |||
| 790 | def generate_nativesdk_lockedsigs(d): | ||
| 791 | import oe.copy_buildsystem | ||
| 792 | sigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc' | ||
| 793 | oe.copy_buildsystem.generate_locked_sigs(sigfile, d) | ||
| 794 | |||
| 795 | def get_ext_sdk_depends(d): | ||
| 796 | # Note: the deps varflag is a list not a string, so we need to specify expand=False | ||
| 797 | deps = d.getVarFlag('do_image_complete', 'deps', False) | ||
| 798 | pn = d.getVar('PN') | ||
| 799 | deplist = ['%s:%s' % (pn, dep) for dep in deps] | ||
| 800 | tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d) | ||
| 801 | tasklist.append('do_rootfs') | ||
| 802 | for task in tasklist: | ||
| 803 | deplist.extend((d.getVarFlag(task, 'depends') or '').split()) | ||
| 804 | return ' '.join(deplist) | ||
| 805 | |||
| 806 | python do_sdk_depends() { | ||
| 807 | # We have to do this separately in its own task so we avoid recursing into | ||
| 808 | # dependencies we don't need to (e.g. buildtools-tarball) and bringing those | ||
| 809 | # into the SDK's sstate-cache | ||
| 810 | import oe.copy_buildsystem | ||
| 811 | sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc' | ||
| 812 | oe.copy_buildsystem.generate_locked_sigs(sigfile, d) | ||
| 813 | } | ||
| 814 | addtask sdk_depends | ||
| 815 | |||
| 816 | do_sdk_depends[dirs] = "${WORKDIR}" | ||
| 817 | do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)} meta-extsdk-toolchain:do_populate_sysroot" | ||
| 818 | do_sdk_depends[recrdeptask] = "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}" | ||
| 819 | do_sdk_depends[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy ${SDK_RECRDEP_TASKS}" | ||
| 820 | do_sdk_depends[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('TOOLCHAIN_HOST_TASK_ESDK').split()])}" | ||
| 821 | |||
| 822 | do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}" | ||
| 823 | |||
| 824 | do_populate_sdk_ext[depends] = "${@d.getVarFlag('do_populate_sdk', 'depends', False)} \ | ||
| 825 | ${@'buildtools-tarball:do_populate_sdk' if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1' else ''} \ | ||
| 826 | ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA') == '1' else ''} \ | ||
| 827 | ${@'meta-extsdk-toolchain:do_locked_sigs' if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1' else ''}" | ||
| 828 | |||
| 829 | # We must avoid depending on do_build here if rm_work.bbclass is active, | ||
| 830 | # because otherwise do_rm_work may run before do_populate_sdk_ext itself. | ||
| 831 | # We can't mark do_populate_sdk_ext and do_sdk_depends as having to | ||
| 832 | # run before do_rm_work, because then they would also run as part | ||
| 833 | # of normal builds. | ||
| 834 | do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':' + (d.getVar('RM_WORK_BUILD_WITHOUT') or 'do_build') for x in d.getVar('SDK_TARGETS').split()])}" | ||
| 835 | |||
| 836 | # Make sure code changes can result in rebuild | ||
| 837 | do_populate_sdk_ext[vardeps] += "copy_buildsystem \ | ||
| 838 | sdk_ext_postinst" | ||
| 839 | |||
| 840 | # Since any change in the metadata of any layer should cause a rebuild of the | ||
| 841 | # sdk(since the layers are put in the sdk) set the task to nostamp so it | ||
| 842 | # always runs. | ||
| 843 | do_populate_sdk_ext[nostamp] = "1" | ||
| 844 | |||
| 845 | SDKEXTDEPLOYDIR = "${WORKDIR}/deploy-${PN}-populate-sdk-ext" | ||
| 846 | |||
| 847 | SSTATETASKS += "do_populate_sdk_ext" | ||
| 848 | SSTATE_SKIP_CREATION:task-populate-sdk-ext = '1' | ||
| 849 | do_populate_sdk_ext[cleandirs] = "${SDKEXTDEPLOYDIR}" | ||
| 850 | do_populate_sdk_ext[sstate-inputdirs] = "${SDKEXTDEPLOYDIR}" | ||
| 851 | do_populate_sdk_ext[sstate-outputdirs] = "${SDK_DEPLOY}" | ||
| 852 | do_populate_sdk_ext[stamp-extra-info] = "${MACHINE_ARCH}" | ||
| 853 | |||
| 854 | addtask populate_sdk_ext after do_sdk_depends | ||
diff --git a/meta/classes-recipe/ptest-cargo.bbclass b/meta/classes-recipe/ptest-cargo.bbclass deleted file mode 100644 index 30463e117b..0000000000 --- a/meta/classes-recipe/ptest-cargo.bbclass +++ /dev/null | |||
| @@ -1,152 +0,0 @@ | |||
| 1 | inherit cargo ptest | ||
| 2 | |||
| 3 | RUST_TEST_ARGS ??= "" | ||
| 4 | RUST_TEST_ARGS[doc] = "Arguments to give to the test binaries (e.g. --shuffle)" | ||
| 5 | |||
| 6 | # I didn't find a cleaner way to share data between compile and install tasks | ||
| 7 | CARGO_TEST_BINARIES_FILES ?= "${B}/test_binaries_list" | ||
| 8 | |||
| 9 | # Sadly, generated test binaries have no deterministic names (https://github.com/rust-lang/cargo/issues/1924) | ||
| 10 | # This forces us to parse the cargo output in json format to find those test binaries. | ||
| 11 | python do_compile_ptest_cargo() { | ||
| 12 | import subprocess | ||
| 13 | import json | ||
| 14 | |||
| 15 | cargo = bb.utils.which(d.getVar("PATH"), d.getVar("CARGO")) | ||
| 16 | cargo_build_flags = d.getVar("CARGO_BUILD_FLAGS") | ||
| 17 | packageconfig_confargs = d.getVar("PACKAGECONFIG_CONFARGS") | ||
| 18 | rust_flags = d.getVar("RUSTFLAGS") | ||
| 19 | manifest_path = d.getVar("CARGO_MANIFEST_PATH") | ||
| 20 | project_manifest_path = os.path.normpath(manifest_path) | ||
| 21 | manifest_dir = os.path.dirname(manifest_path) | ||
| 22 | |||
| 23 | env = os.environ.copy() | ||
| 24 | env['RUSTFLAGS'] = rust_flags | ||
| 25 | cmd = f"{cargo} build --tests --message-format json {cargo_build_flags} {packageconfig_confargs}" | ||
| 26 | bb.note(f"Building tests with cargo ({cmd})") | ||
| 27 | |||
| 28 | try: | ||
| 29 | proc = subprocess.Popen(cmd, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) | ||
| 30 | except subprocess.CalledProcessError as e: | ||
| 31 | bb.fatal(f"Cannot build test with cargo: {e}") | ||
| 32 | |||
| 33 | lines = [] | ||
| 34 | for line in proc.stdout: | ||
| 35 | data = line.strip('\n') | ||
| 36 | lines.append(data) | ||
| 37 | bb.note(data) | ||
| 38 | proc.communicate() | ||
| 39 | if proc.returncode != 0: | ||
| 40 | bb.fatal(f"Unable to compile test with cargo, '{cmd}' failed") | ||
| 41 | |||
| 42 | # Definition of the format: https://doc.rust-lang.org/cargo/reference/external-tools.html#json-messages | ||
| 43 | test_bins = [] | ||
| 44 | for line in lines: | ||
| 45 | try: | ||
| 46 | data = json.loads(line) | ||
| 47 | except json.JSONDecodeError: | ||
| 48 | # skip lines that are not a json | ||
| 49 | pass | ||
| 50 | else: | ||
| 51 | try: | ||
| 52 | # Filter the test packages coming from the current project: | ||
| 53 | # - test binaries from the root manifest | ||
| 54 | # - test binaries from sub manifest of the current project if any | ||
| 55 | current_manifest_path = os.path.normpath(data['manifest_path']) | ||
| 56 | common_path = os.path.commonpath([current_manifest_path, project_manifest_path]) | ||
| 57 | if common_path in [manifest_dir, current_manifest_path]: | ||
| 58 | if (data['target']['test'] or data['target']['doctest']) and data['executable']: | ||
| 59 | test_bins.append(data['executable']) | ||
| 60 | except (KeyError, ValueError) as e: | ||
| 61 | # skip lines that do not meet the requirements | ||
| 62 | pass | ||
| 63 | |||
| 64 | # All rust project will generate at least one unit test binary | ||
| 65 | # It will just run a test suite with 0 tests, if the project didn't define some | ||
| 66 | # So it is not expected to have an empty list here | ||
| 67 | if not test_bins: | ||
| 68 | bb.fatal("Unable to find any test binaries") | ||
| 69 | |||
| 70 | cargo_test_binaries_file = d.getVar('CARGO_TEST_BINARIES_FILES') | ||
| 71 | bb.note(f"Found {len(test_bins)} tests, write their paths into {cargo_test_binaries_file}") | ||
| 72 | with open(cargo_test_binaries_file, "w") as f: | ||
| 73 | for test_bin in sorted(test_bins): | ||
| 74 | f.write(f"{test_bin}\n") | ||
| 75 | |||
| 76 | } | ||
| 77 | |||
| 78 | python do_install_ptest_cargo() { | ||
| 79 | import shutil | ||
| 80 | import textwrap | ||
| 81 | |||
| 82 | dest_dir = d.getVar("D") | ||
| 83 | pn = d.getVar("PN") | ||
| 84 | ptest_path = d.getVar("PTEST_PATH") | ||
| 85 | cargo_test_binaries_file = d.getVar('CARGO_TEST_BINARIES_FILES') | ||
| 86 | rust_test_args = d.getVar('RUST_TEST_ARGS') or "" | ||
| 87 | |||
| 88 | ptest_dir = os.path.join(dest_dir, ptest_path.lstrip('/')) | ||
| 89 | os.makedirs(ptest_dir, exist_ok=True) | ||
| 90 | |||
| 91 | test_bins = [] | ||
| 92 | with open(cargo_test_binaries_file, "r") as f: | ||
| 93 | for line in f.readlines(): | ||
| 94 | test_bins.append(line.strip('\n')) | ||
| 95 | |||
| 96 | test_paths = [] | ||
| 97 | for test_bin in test_bins: | ||
| 98 | shutil.copy2(test_bin, ptest_dir) | ||
| 99 | test_paths.append(os.path.join(ptest_path, os.path.basename(test_bin))) | ||
| 100 | |||
| 101 | ptest_script = os.path.join(ptest_dir, "run-ptest") | ||
| 102 | script_exists = os.path.exists(ptest_script) | ||
| 103 | with open(ptest_script, "a") as f: | ||
| 104 | if not script_exists: | ||
| 105 | f.write("#!/bin/sh\n") | ||
| 106 | else: | ||
| 107 | f.write(f"\necho \"\"\n") | ||
| 108 | f.write(f"echo \"## starting to run rust tests ##\"\n") | ||
| 109 | f.write("if [ -z \"$rc\" ]; then rc=0; fi\n") | ||
| 110 | for test_path in test_paths: | ||
| 111 | script = textwrap.dedent(f"""\ | ||
| 112 | if ! {test_path} {rust_test_args} | ||
| 113 | then | ||
| 114 | rc=1 | ||
| 115 | echo "FAIL: {test_path}" | ||
| 116 | else | ||
| 117 | echo "PASS: {test_path}" | ||
| 118 | fi | ||
| 119 | """) | ||
| 120 | f.write(script) | ||
| 121 | |||
| 122 | f.write("exit $rc\n") | ||
| 123 | |||
| 124 | if not script_exists: | ||
| 125 | os.chmod(ptest_script, 0o755) | ||
| 126 | |||
| 127 | # this is chown -R root:root ${D}${PTEST_PATH} | ||
| 128 | for root, dirs, files in os.walk(ptest_dir): | ||
| 129 | for d in dirs: | ||
| 130 | shutil.chown(os.path.join(root, d), "root", "root") | ||
| 131 | for f in files: | ||
| 132 | shutil.chown(os.path.join(root, f), "root", "root") | ||
| 133 | } | ||
| 134 | |||
| 135 | do_install_ptest_cargo[dirs] = "${B}" | ||
| 136 | do_install_ptest_cargo[doc] = "Create or update the run-ptest script with rust test binaries generated" | ||
| 137 | do_compile_ptest_cargo[dirs] = "${B}" | ||
| 138 | do_compile_ptest_cargo[doc] = "Generate rust test binaries through cargo" | ||
| 139 | |||
| 140 | addtask compile_ptest_cargo after do_compile before do_compile_ptest_base | ||
| 141 | addtask install_ptest_cargo after do_install_ptest_base before do_package | ||
| 142 | |||
| 143 | python () { | ||
| 144 | if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d): | ||
| 145 | d.setVarFlag('do_install_ptest_cargo', 'fakeroot', '1') | ||
| 146 | d.setVarFlag('do_install_ptest_cargo', 'umask', '022') | ||
| 147 | |||
| 148 | # Remove all '*ptest_cargo' tasks when ptest is not enabled | ||
| 149 | if not(d.getVar('PTEST_ENABLED') == "1"): | ||
| 150 | for i in ['do_compile_ptest_cargo', 'do_install_ptest_cargo']: | ||
| 151 | bb.build.deltask(i, d) | ||
| 152 | } | ||
diff --git a/meta/classes-recipe/ptest-gnome.bbclass b/meta/classes-recipe/ptest-gnome.bbclass deleted file mode 100644 index d4ad22d85d..0000000000 --- a/meta/classes-recipe/ptest-gnome.bbclass +++ /dev/null | |||
| @@ -1,14 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit ptest | ||
| 8 | |||
| 9 | EXTRA_OECONF:append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}" | ||
| 10 | |||
| 11 | FILES:${PN}-ptest += "${libexecdir}/installed-tests/ \ | ||
| 12 | ${datadir}/installed-tests/" | ||
| 13 | |||
| 14 | RDEPENDS:${PN}-ptest += "gnome-desktop-testing" | ||
diff --git a/meta/classes-recipe/ptest-perl.bbclass b/meta/classes-recipe/ptest-perl.bbclass deleted file mode 100644 index a4a9d40d52..0000000000 --- a/meta/classes-recipe/ptest-perl.bbclass +++ /dev/null | |||
| @@ -1,36 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit ptest | ||
| 8 | |||
| 9 | FILESEXTRAPATHS:prepend := "${COREBASE}/meta/files:" | ||
| 10 | |||
| 11 | SRC_URI += "file://ptest-perl/run-ptest" | ||
| 12 | |||
| 13 | do_install_ptest_perl() { | ||
| 14 | install -d ${D}${PTEST_PATH} | ||
| 15 | if [ ! -f ${D}${PTEST_PATH}/run-ptest ]; then | ||
| 16 | install -m 0755 ${UNPACKDIR}/ptest-perl/run-ptest ${D}${PTEST_PATH} | ||
| 17 | fi | ||
| 18 | cp -r ${B}/t ${D}${PTEST_PATH} | ||
| 19 | chown -R root:root ${D}${PTEST_PATH} | ||
| 20 | } | ||
| 21 | |||
| 22 | FILES:${PN}-ptest:prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest " | ||
| 23 | |||
| 24 | RDEPENDS:${PN}-ptest:prepend = "perl " | ||
| 25 | |||
| 26 | addtask install_ptest_perl after do_install_ptest_base before do_package | ||
| 27 | |||
| 28 | python () { | ||
| 29 | if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d): | ||
| 30 | d.setVarFlag('do_install_ptest_perl', 'fakeroot', '1') | ||
| 31 | |||
| 32 | # Remove all '*ptest_perl' tasks when ptest is not enabled | ||
| 33 | if not(d.getVar('PTEST_ENABLED') == "1"): | ||
| 34 | for i in ['do_install_ptest_perl']: | ||
| 35 | bb.build.deltask(i, d) | ||
| 36 | } | ||
diff --git a/meta/classes-recipe/ptest-python-pytest.bbclass b/meta/classes-recipe/ptest-python-pytest.bbclass deleted file mode 100644 index a4615e12bf..0000000000 --- a/meta/classes-recipe/ptest-python-pytest.bbclass +++ /dev/null | |||
| @@ -1,37 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit ptest | ||
| 8 | |||
| 9 | # Overridable configuration for the directory within the source tree | ||
| 10 | # containing the pytest files | ||
| 11 | PTEST_PYTEST_DIR ?= "tests" | ||
| 12 | |||
| 13 | do_install_ptest() { | ||
| 14 | # Check if the recipe provides its own version of run-ptest | ||
| 15 | # If nothing exists in the SRC_URI, dynamically create a | ||
| 16 | # run-test script of "last resort" that has the default | ||
| 17 | # pytest behavior. | ||
| 18 | # | ||
| 19 | # Users can override this behavior by simply including a | ||
| 20 | # custom script (run-ptest) in the source file list | ||
| 21 | if [ ! -f "${UNPACKDIR}/run-ptest" ]; then | ||
| 22 | cat > ${D}${PTEST_PATH}/run-ptest << EOF | ||
| 23 | #!/bin/sh | ||
| 24 | pytest --automake | ||
| 25 | EOF | ||
| 26 | # Ensure the newly created script has the execute bit set | ||
| 27 | chmod 755 ${D}${PTEST_PATH}/run-ptest | ||
| 28 | fi | ||
| 29 | if [ -d "${S}/${PTEST_PYTEST_DIR}" ]; then | ||
| 30 | install -d ${D}${PTEST_PATH}/${PTEST_PYTEST_DIR} | ||
| 31 | cp -rf ${S}/${PTEST_PYTEST_DIR}/* ${D}${PTEST_PATH}/${PTEST_PYTEST_DIR}/ | ||
| 32 | fi | ||
| 33 | } | ||
| 34 | |||
| 35 | FILES:${PN}-ptest:prepend = "${PTEST_PATH}/*" | ||
| 36 | |||
| 37 | RDEPENDS:${PN}-ptest:prepend = "python3-pytest python3-unittest-automake-output " | ||
diff --git a/meta/classes-recipe/ptest.bbclass b/meta/classes-recipe/ptest.bbclass deleted file mode 100644 index 64c4bb9788..0000000000 --- a/meta/classes-recipe/ptest.bbclass +++ /dev/null | |||
| @@ -1,142 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | SUMMARY:${PN}-ptest ?= "${SUMMARY} - Package test files" | ||
| 8 | DESCRIPTION:${PN}-ptest ?= "${DESCRIPTION} \ | ||
| 9 | This package contains a test directory ${PTEST_PATH} for package test purposes." | ||
| 10 | |||
| 11 | PTEST_PATH ?= "${libdir}/${BPN}/ptest" | ||
| 12 | PTEST_BUILD_HOST_FILES ?= "Makefile" | ||
| 13 | PTEST_BUILD_HOST_PATTERN ?= "" | ||
| 14 | PTEST_PARALLEL_MAKE ?= "${PARALLEL_MAKE}" | ||
| 15 | PTEST_PARALLEL_MAKEINST ?= "${PARALLEL_MAKEINST}" | ||
| 16 | EXTRA_OEMAKE:prepend:task-compile-ptest-base = "${PTEST_PARALLEL_MAKE} " | ||
| 17 | EXTRA_OEMAKE:prepend:task-install-ptest-base = "${PTEST_PARALLEL_MAKEINST} " | ||
| 18 | |||
| 19 | FILES:${PN}-ptest += "${PTEST_PATH}" | ||
| 20 | SECTION:${PN}-ptest = "devel" | ||
| 21 | ALLOW_EMPTY:${PN}-ptest = "1" | ||
| 22 | PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}" | ||
| 23 | PTEST_ENABLED:class-native = "" | ||
| 24 | PTEST_ENABLED:class-nativesdk = "" | ||
| 25 | PTEST_ENABLED:class-cross-canadian = "" | ||
| 26 | RDEPENDS:${PN}-ptest += "${PN}" | ||
| 27 | RDEPENDS:${PN}-ptest:class-native = "" | ||
| 28 | RDEPENDS:${PN}-ptest:class-nativesdk = "" | ||
| 29 | RRECOMMENDS:${PN}-ptest += "ptest-runner" | ||
| 30 | |||
| 31 | PACKAGES =+ "${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}" | ||
| 32 | |||
| 33 | require conf/distro/include/ptest-packagelists.inc | ||
| 34 | |||
| 35 | do_configure_ptest() { | ||
| 36 | : | ||
| 37 | } | ||
| 38 | |||
| 39 | do_configure_ptest_base() { | ||
| 40 | do_configure_ptest | ||
| 41 | } | ||
| 42 | |||
| 43 | do_compile_ptest() { | ||
| 44 | : | ||
| 45 | } | ||
| 46 | |||
| 47 | do_compile_ptest_base() { | ||
| 48 | do_compile_ptest | ||
| 49 | } | ||
| 50 | |||
| 51 | do_install_ptest() { | ||
| 52 | : | ||
| 53 | } | ||
| 54 | |||
| 55 | do_install_ptest_base() { | ||
| 56 | if [ -f ${UNPACKDIR}/run-ptest ]; then | ||
| 57 | install -D ${UNPACKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest | ||
| 58 | fi | ||
| 59 | |||
| 60 | grep -q install-ptest: Makefile 2>/dev/null && oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest | ||
| 61 | |||
| 62 | do_install_ptest | ||
| 63 | chown -R root:root ${D}${PTEST_PATH} | ||
| 64 | |||
| 65 | # Strip build host paths from any installed Makefile | ||
| 66 | for filename in ${PTEST_BUILD_HOST_FILES}; do | ||
| 67 | for installed_ptest_file in $(find ${D}${PTEST_PATH} -type f -name $filename); do | ||
| 68 | bbnote "Stripping host paths from: $installed_ptest_file" | ||
| 69 | sed -e 's#${HOSTTOOLS_DIR}/*##g' \ | ||
| 70 | -e 's#${WORKDIR}/*=#.=#g' \ | ||
| 71 | -e 's#${WORKDIR}/*##g' \ | ||
| 72 | -i $installed_ptest_file | ||
| 73 | if [ -n "${PTEST_BUILD_HOST_PATTERN}" ]; then | ||
| 74 | sed -E '/${PTEST_BUILD_HOST_PATTERN}/d' \ | ||
| 75 | -i $installed_ptest_file | ||
| 76 | fi | ||
| 77 | done | ||
| 78 | done | ||
| 79 | } | ||
| 80 | |||
| 81 | PTEST_BINDIR_PKGD_PATH = "${PKGD}${PTEST_PATH}/bin" | ||
| 82 | |||
| 83 | # This function needs to run after apply_update_alternative_renames because the | ||
| 84 | # aforementioned function will update the ALTERNATIVE_LINK_NAME flag. Append is | ||
| 85 | # used here to make this function to run as late as possible. | ||
| 86 | PACKAGE_PREPROCESS_FUNCS:append = "${@bb.utils.contains('PTEST_BINDIR', '1', \ | ||
| 87 | bb.utils.contains('PTEST_ENABLED', '1', ' ptest_update_alternatives', '', d), '', d)}" | ||
| 88 | |||
| 89 | python ptest_update_alternatives() { | ||
| 90 | """ | ||
| 91 | This function will generate the symlinks in the PTEST_BINDIR_PKGD_PATH | ||
| 92 | to match the renamed binaries by update-alternatives. | ||
| 93 | """ | ||
| 94 | |||
| 95 | if not bb.data.inherits_class('update-alternatives', d) \ | ||
| 96 | or not update_alternatives_enabled(d): | ||
| 97 | return | ||
| 98 | |||
| 99 | bb.note("Generating symlinks for ptest") | ||
| 100 | bin_paths = { d.getVar("bindir"), d.getVar("base_bindir"), | ||
| 101 | d.getVar("sbindir"), d.getVar("base_sbindir") } | ||
| 102 | ptest_bindir = d.getVar("PTEST_BINDIR_PKGD_PATH") | ||
| 103 | os.mkdir(ptest_bindir) | ||
| 104 | for pkg in (d.getVar('PACKAGES') or "").split(): | ||
| 105 | alternatives = update_alternatives_alt_targets(d, pkg) | ||
| 106 | for alt_name, alt_link, alt_target, _ in alternatives: | ||
| 107 | # Some alternatives are for man pages, | ||
| 108 | # check if the alternative is in PATH | ||
| 109 | if os.path.dirname(alt_link) in bin_paths: | ||
| 110 | os.symlink(alt_target, os.path.join(ptest_bindir, alt_name)) | ||
| 111 | } | ||
| 112 | |||
| 113 | do_configure_ptest_base[dirs] = "${B}" | ||
| 114 | do_compile_ptest_base[dirs] = "${B}" | ||
| 115 | do_install_ptest_base[dirs] = "${B}" | ||
| 116 | do_install_ptest_base[cleandirs] = "${D}${PTEST_PATH}" | ||
| 117 | |||
| 118 | addtask configure_ptest_base after do_configure before do_compile | ||
| 119 | addtask compile_ptest_base after do_compile before do_install | ||
| 120 | addtask install_ptest_base after do_install before do_package do_populate_sysroot | ||
| 121 | |||
| 122 | python () { | ||
| 123 | if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d): | ||
| 124 | d.setVarFlag('do_install_ptest_base', 'fakeroot', '1') | ||
| 125 | d.setVarFlag('do_install_ptest_base', 'umask', '022') | ||
| 126 | |||
| 127 | # Remove all '*ptest_base' tasks when ptest is not enabled | ||
| 128 | if not(d.getVar('PTEST_ENABLED') == "1"): | ||
| 129 | for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']: | ||
| 130 | bb.build.deltask(i, d) | ||
| 131 | } | ||
| 132 | |||
| 133 | QARECIPETEST[missing-ptest] = "package_qa_check_missing_ptest" | ||
| 134 | def package_qa_check_missing_ptest(pn, d): | ||
| 135 | # This checks that ptest package is actually included | ||
| 136 | # in standard oe-core ptest images - only for oe-core recipes | ||
| 137 | if not 'meta/recipes' in d.getVar('FILE') or not(d.getVar('PTEST_ENABLED') == "1"): | ||
| 138 | return | ||
| 139 | |||
| 140 | enabled_ptests = " ".join([d.getVar('PTESTS_FAST'), d.getVar('PTESTS_SLOW'), d.getVar('PTESTS_PROBLEMS')]).split() | ||
| 141 | if pn.replace(d.getVar('MLPREFIX'), '') not in enabled_ptests: | ||
| 142 | oe.qa.handle_error("missing-ptest", "supports ptests but is not included in oe-core's ptest-packagelists.inc", d) | ||
diff --git a/meta/classes-recipe/pypi.bbclass b/meta/classes-recipe/pypi.bbclass deleted file mode 100644 index 1372d85e8d..0000000000 --- a/meta/classes-recipe/pypi.bbclass +++ /dev/null | |||
| @@ -1,57 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | def pypi_package(d): | ||
| 8 | bpn = d.getVar('BPN') | ||
| 9 | if bpn.startswith('python-'): | ||
| 10 | return bpn[7:] | ||
| 11 | elif bpn.startswith('python3-'): | ||
| 12 | return bpn[8:] | ||
| 13 | return bpn | ||
| 14 | |||
| 15 | # The PyPi package name (defaults to PN without the python3- prefix) | ||
| 16 | PYPI_PACKAGE ?= "${@pypi_package(d)}" | ||
| 17 | # The file extension of the source archive | ||
| 18 | PYPI_PACKAGE_EXT ?= "tar.gz" | ||
| 19 | # An optional prefix for the download file in the case of name collisions | ||
| 20 | PYPI_ARCHIVE_NAME_PREFIX ?= "" | ||
| 21 | |||
| 22 | def pypi_src_uri(d): | ||
| 23 | """ | ||
| 24 | Construct a source URL as per https://warehouse.pypa.io/api-reference/integration-guide.html#predictable-urls. | ||
| 25 | """ | ||
| 26 | package = d.getVar('PYPI_PACKAGE') | ||
| 27 | archive_name = d.expand('${PYPI_PACKAGE}-${PV}.${PYPI_PACKAGE_EXT}') | ||
| 28 | archive_downloadname = d.getVar('PYPI_ARCHIVE_NAME_PREFIX') + archive_name | ||
| 29 | return 'https://files.pythonhosted.org/packages/source/%s/%s/%s;downloadfilename=%s' % (package[0], package, archive_name, archive_downloadname) | ||
| 30 | |||
| 31 | def pypi_normalize(d): | ||
| 32 | """" | ||
| 33 | Normalize the package names to match PEP625 (https://peps.python.org/pep-0625/). | ||
| 34 | For non-compliant packages, maintainers can set UPSTREAM_CHECK_PYPI_PACKAGE to override the normalization | ||
| 35 | """ | ||
| 36 | import re | ||
| 37 | return re.sub(r"[-_.]+", "-", d.getVar('PYPI_PACKAGE')).lower() | ||
| 38 | |||
| 39 | PYPI_SRC_URI ?= "${@pypi_src_uri(d)}" | ||
| 40 | |||
| 41 | HOMEPAGE ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/" | ||
| 42 | SECTION = "devel/python" | ||
| 43 | SRC_URI:prepend = "${PYPI_SRC_URI} " | ||
| 44 | S = "${UNPACKDIR}/${PYPI_PACKAGE}-${PV}" | ||
| 45 | |||
| 46 | # Replace any '_' characters in the pypi URI with '-'s to follow the PyPi website naming conventions | ||
| 47 | UPSTREAM_CHECK_PYPI_PACKAGE ?= "${@pypi_normalize(d)}" | ||
| 48 | |||
| 49 | # Use the simple repository API rather than the potentially unstable project URL | ||
| 50 | # More information on the pypi API specification is avaialble here: | ||
| 51 | # https://packaging.python.org/en/latest/specifications/simple-repository-api/ | ||
| 52 | # | ||
| 53 | # NOTE: All URLs for the simple API MUST request canonical normalized URLs per the spec | ||
| 54 | UPSTREAM_CHECK_URI ?= "https://pypi.org/simple/${@pypi_normalize(d)}/" | ||
| 55 | UPSTREAM_CHECK_REGEX ?= "${UPSTREAM_CHECK_PYPI_PACKAGE}-(?P<pver>(\d+[\.\-_]*)+).(tar\.gz|tgz|zip|tar\.bz2)" | ||
| 56 | |||
| 57 | CVE_PRODUCT ?= "python:${PYPI_PACKAGE}" | ||
diff --git a/meta/classes-recipe/python3-dir.bbclass b/meta/classes-recipe/python3-dir.bbclass deleted file mode 100644 index 0f4e7e7773..0000000000 --- a/meta/classes-recipe/python3-dir.bbclass +++ /dev/null | |||
| @@ -1,11 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | PYTHON_BASEVERSION = "3.13" | ||
| 8 | PYTHON_ABI = "" | ||
| 9 | PYTHON_DIR = "python${PYTHON_BASEVERSION}" | ||
| 10 | PYTHON_PN = "python3" | ||
| 11 | PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages" | ||
diff --git a/meta/classes-recipe/python3native.bbclass b/meta/classes-recipe/python3native.bbclass deleted file mode 100644 index da1283d6b3..0000000000 --- a/meta/classes-recipe/python3native.bbclass +++ /dev/null | |||
| @@ -1,30 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit python3-dir | ||
| 8 | |||
| 9 | PYTHON = "${STAGING_BINDIR_NATIVE}/python3-native/python3" | ||
| 10 | EXTRANATIVEPATH += "python3-native" | ||
| 11 | DEPENDS:append = " python3-native " | ||
| 12 | |||
| 13 | # python-config and other scripts are using sysconfig modules | ||
| 14 | # which we patch to access these variables | ||
| 15 | export STAGING_INCDIR | ||
| 16 | export STAGING_LIBDIR | ||
| 17 | |||
| 18 | # Packages can use | ||
| 19 | # find_package(PythonInterp REQUIRED) | ||
| 20 | # find_package(PythonLibs REQUIRED) | ||
| 21 | # which ends up using libs/includes from build host | ||
| 22 | # Therefore pre-empt that effort | ||
| 23 | export PYTHON_LIBRARY = "${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so" | ||
| 24 | export PYTHON_INCLUDE_DIR = "${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}" | ||
| 25 | |||
| 26 | # suppress host user's site-packages dirs. | ||
| 27 | export PYTHONNOUSERSITE = "1" | ||
| 28 | |||
| 29 | # autoconf macros will use their internal default preference otherwise | ||
| 30 | export PYTHON | ||
diff --git a/meta/classes-recipe/python3targetconfig.bbclass b/meta/classes-recipe/python3targetconfig.bbclass deleted file mode 100644 index 08bc619398..0000000000 --- a/meta/classes-recipe/python3targetconfig.bbclass +++ /dev/null | |||
| @@ -1,41 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit python3native | ||
| 8 | |||
| 9 | EXTRA_PYTHON_DEPENDS ?= "" | ||
| 10 | EXTRA_PYTHON_DEPENDS:class-target = "python3" | ||
| 11 | DEPENDS:append = " ${EXTRA_PYTHON_DEPENDS}" | ||
| 12 | |||
| 13 | setup_target_config() { | ||
| 14 | export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata" | ||
| 15 | export PYTHONPATH=${STAGING_LIBDIR}/python-sysconfigdata:$PYTHONPATH | ||
| 16 | export PATH=${STAGING_EXECPREFIXDIR}/python-target-config/:$PATH | ||
| 17 | } | ||
| 18 | |||
| 19 | do_configure:prepend:class-target() { | ||
| 20 | setup_target_config | ||
| 21 | } | ||
| 22 | |||
| 23 | do_compile:prepend:class-target() { | ||
| 24 | setup_target_config | ||
| 25 | } | ||
| 26 | |||
| 27 | do_install:prepend:class-target() { | ||
| 28 | setup_target_config | ||
| 29 | } | ||
| 30 | |||
| 31 | do_configure:prepend:class-nativesdk() { | ||
| 32 | setup_target_config | ||
| 33 | } | ||
| 34 | |||
| 35 | do_compile:prepend:class-nativesdk() { | ||
| 36 | setup_target_config | ||
| 37 | } | ||
| 38 | |||
| 39 | do_install:prepend:class-nativesdk() { | ||
| 40 | setup_target_config | ||
| 41 | } | ||
diff --git a/meta/classes-recipe/python_flit_core.bbclass b/meta/classes-recipe/python_flit_core.bbclass deleted file mode 100644 index cb40ab0faf..0000000000 --- a/meta/classes-recipe/python_flit_core.bbclass +++ /dev/null | |||
| @@ -1,14 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit python_pep517 | ||
| 8 | |||
| 9 | DEPENDS += "python3-flit-core-native" | ||
| 10 | |||
| 11 | python_flit_core_do_manual_build () { | ||
| 12 | cd ${PEP517_SOURCE_PATH} | ||
| 13 | nativepython3 -m flit_core.wheel --outdir ${PEP517_WHEEL_PATH} . | ||
| 14 | } | ||
diff --git a/meta/classes-recipe/python_hatchling.bbclass b/meta/classes-recipe/python_hatchling.bbclass deleted file mode 100644 index 6765ddbe8c..0000000000 --- a/meta/classes-recipe/python_hatchling.bbclass +++ /dev/null | |||
| @@ -1,27 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit python_pep517 | ||
| 8 | |||
| 9 | DEPENDS += "python3-hatchling-native" | ||
| 10 | |||
| 11 | # delete nested, empty directories from the python site-packages path. Make | ||
| 12 | # sure that we remove the native ones for target builds as well | ||
| 13 | hatchling_rm_emptydirs:class-target () { | ||
| 14 | find ${STAGING_LIBDIR}/${PYTHON_DIR}/site-packages/* -depth -type d -empty -delete | ||
| 15 | find ${STAGING_LIBDIR_NATIVE}/${PYTHON_DIR}/site-packages/* -depth -type d -empty -delete | ||
| 16 | } | ||
| 17 | |||
| 18 | hatchling_rm_emptydirs:class-native () { | ||
| 19 | find ${STAGING_LIBDIR_NATIVE}/${PYTHON_DIR}/site-packages/* -depth -type d -empty -delete | ||
| 20 | } | ||
| 21 | |||
| 22 | # Define a default empty version of hatchling_rm_emptydirs to appease bitbake | ||
| 23 | hatchling_rm_emptydirs () { | ||
| 24 | : | ||
| 25 | } | ||
| 26 | |||
| 27 | do_prepare_recipe_sysroot[postfuncs] += " hatchling_rm_emptydirs" | ||
diff --git a/meta/classes-recipe/python_maturin.bbclass b/meta/classes-recipe/python_maturin.bbclass deleted file mode 100644 index 5892bf5ecd..0000000000 --- a/meta/classes-recipe/python_maturin.bbclass +++ /dev/null | |||
| @@ -1,17 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit python_pyo3 python_pep517 | ||
| 8 | |||
| 9 | DEPENDS += "python3-maturin-native" | ||
| 10 | |||
| 11 | python_maturin_do_configure() { | ||
| 12 | python_pyo3_do_configure | ||
| 13 | cargo_common_do_configure | ||
| 14 | python_pep517_do_configure | ||
| 15 | } | ||
| 16 | |||
| 17 | EXPORT_FUNCTIONS do_configure | ||
diff --git a/meta/classes-recipe/python_mesonpy.bbclass b/meta/classes-recipe/python_mesonpy.bbclass deleted file mode 100644 index 3613ea19c7..0000000000 --- a/meta/classes-recipe/python_mesonpy.bbclass +++ /dev/null | |||
| @@ -1,46 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit meson python_pep517 | ||
| 8 | |||
| 9 | # meson_do_qa_configure does the wrong thing here because | ||
| 10 | # mesonpy runs "meson setup ..." in do_compile context. | ||
| 11 | # Make it a dummy function. | ||
| 12 | meson_do_qa_configure () { | ||
| 13 | : | ||
| 14 | } | ||
| 15 | |||
| 16 | # This prevents the meson error: | ||
| 17 | # ERROR: Got argument buildtype as both -Dbuildtype and --buildtype. Pick one. | ||
| 18 | MESONOPTS:remove = "--buildtype ${MESON_BUILDTYPE}" | ||
| 19 | |||
| 20 | DEPENDS += "python3-wheel-native python3-meson-python-native" | ||
| 21 | |||
| 22 | def mesonpy_get_args(d): | ||
| 23 | vars = ['MESONOPTS', 'MESON_CROSS_FILE', 'EXTRA_OEMESON'] | ||
| 24 | varlist = [] | ||
| 25 | for var in vars: | ||
| 26 | value = d.getVar(var) | ||
| 27 | vallist = value.split() | ||
| 28 | for elem in vallist: | ||
| 29 | varlist.append("-Csetup-args=" + elem) | ||
| 30 | return ' '.join(varlist) | ||
| 31 | |||
| 32 | PEP517_BUILD_OPTS = "-Cbuilddir='${B}' ${@mesonpy_get_args(d)}" | ||
| 33 | |||
| 34 | python_mesonpy_do_configure () { | ||
| 35 | python_pep517_do_configure | ||
| 36 | } | ||
| 37 | |||
| 38 | python_mesonpy_do_compile () { | ||
| 39 | python_pep517_do_compile | ||
| 40 | } | ||
| 41 | |||
| 42 | python_mesonpy_do_install () { | ||
| 43 | python_pep517_do_install | ||
| 44 | } | ||
| 45 | |||
| 46 | EXPORT_FUNCTIONS do_configure do_compile do_install | ||
diff --git a/meta/classes-recipe/python_pdm.bbclass b/meta/classes-recipe/python_pdm.bbclass deleted file mode 100644 index 9a34d98422..0000000000 --- a/meta/classes-recipe/python_pdm.bbclass +++ /dev/null | |||
| @@ -1,9 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit python_pep517 | ||
| 8 | |||
| 9 | DEPENDS += "python3-pdm-backend-native" | ||
diff --git a/meta/classes-recipe/python_pep517.bbclass b/meta/classes-recipe/python_pep517.bbclass deleted file mode 100644 index 128c943794..0000000000 --- a/meta/classes-recipe/python_pep517.bbclass +++ /dev/null | |||
| @@ -1,68 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Common infrastructure for Python packages that use PEP-517 compliant packaging. | ||
| 8 | # https://www.python.org/dev/peps/pep-0517/ | ||
| 9 | # | ||
| 10 | # This class will build a wheel in do_compile, and use pypa/installer to install | ||
| 11 | # it in do_install. | ||
| 12 | |||
| 13 | inherit python3native python3-dir setuptools3-base | ||
| 14 | |||
| 15 | DEPENDS:append = " python3-build-native python3-installer-native" | ||
| 16 | |||
| 17 | # Where to execute the build process from | ||
| 18 | PEP517_SOURCE_PATH ?= "${S}" | ||
| 19 | |||
| 20 | # The directory where wheels will be written | ||
| 21 | PEP517_WHEEL_PATH ?= "${WORKDIR}/dist" | ||
| 22 | |||
| 23 | # Other options to pass to build | ||
| 24 | PEP517_BUILD_OPTS ?= "" | ||
| 25 | |||
| 26 | # The interpreter to use for installed scripts | ||
| 27 | PEP517_INSTALL_PYTHON = "python3" | ||
| 28 | PEP517_INSTALL_PYTHON:class-native = "nativepython3" | ||
| 29 | |||
| 30 | # pypa/installer option to control the bytecode compilation | ||
| 31 | INSTALL_WHEEL_COMPILE_BYTECODE ?= "--compile-bytecode=0" | ||
| 32 | |||
| 33 | # PEP517 doesn't have a specific configure step, so set an empty do_configure to avoid | ||
| 34 | # running base_do_configure. | ||
| 35 | python_pep517_do_configure () { | ||
| 36 | : | ||
| 37 | } | ||
| 38 | |||
| 39 | # When we have Python 3.11 we can parse pyproject.toml to determine the build | ||
| 40 | # API entry point directly | ||
| 41 | python_pep517_do_compile () { | ||
| 42 | pyproject-build --no-isolation --wheel --outdir ${PEP517_WHEEL_PATH} ${PEP517_SOURCE_PATH} ${PEP517_BUILD_OPTS} | ||
| 43 | } | ||
| 44 | do_compile[cleandirs] += "${PEP517_WHEEL_PATH}" | ||
| 45 | |||
| 46 | python_pep517_do_install () { | ||
| 47 | COUNT=$(find ${PEP517_WHEEL_PATH} -name '*.whl' -maxdepth 1 | wc -l) | ||
| 48 | if test $COUNT -eq 0; then | ||
| 49 | bbfatal No wheels found in ${PEP517_WHEEL_PATH} | ||
| 50 | elif test $COUNT -gt 1; then | ||
| 51 | bbfatal More than one wheel found in ${PEP517_WHEEL_PATH}, this should not happen | ||
| 52 | fi | ||
| 53 | |||
| 54 | nativepython3 -m installer ${INSTALL_WHEEL_COMPILE_BYTECODE} --interpreter "${USRBINPATH}/env ${PEP517_INSTALL_PYTHON}" --destdir=${D} ${PEP517_WHEEL_PATH}/*.whl | ||
| 55 | |||
| 56 | find ${D} -path *.dist-info/RECORD -delete | ||
| 57 | } | ||
| 58 | |||
| 59 | # A manual do_install that just uses unzip for bootstrapping purposes. Callers should DEPEND on unzip-native. | ||
| 60 | python_pep517_do_bootstrap_install () { | ||
| 61 | install -d ${D}${PYTHON_SITEPACKAGES_DIR} | ||
| 62 | unzip -d ${D}${PYTHON_SITEPACKAGES_DIR} ${PEP517_WHEEL_PATH}/*.whl | ||
| 63 | } | ||
| 64 | |||
| 65 | EXPORT_FUNCTIONS do_configure do_compile do_install | ||
| 66 | |||
| 67 | # Tell externalsrc this changing means it needs to reconfigure | ||
| 68 | CONFIGURE_FILES += "pyproject.toml" | ||
diff --git a/meta/classes-recipe/python_poetry_core.bbclass b/meta/classes-recipe/python_poetry_core.bbclass deleted file mode 100644 index 35a2f137cb..0000000000 --- a/meta/classes-recipe/python_poetry_core.bbclass +++ /dev/null | |||
| @@ -1,9 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit python_pep517 | ||
| 8 | |||
| 9 | DEPENDS += "python3-poetry-core-native" | ||
diff --git a/meta/classes-recipe/python_pyo3.bbclass b/meta/classes-recipe/python_pyo3.bbclass deleted file mode 100644 index 7f5a00f584..0000000000 --- a/meta/classes-recipe/python_pyo3.bbclass +++ /dev/null | |||
| @@ -1,36 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # This class helps make sure that Python extensions built with PyO3 | ||
| 9 | # and setuptools_rust properly set up the environment for cross compilation | ||
| 10 | # | ||
| 11 | |||
| 12 | inherit cargo python3-dir siteinfo | ||
| 13 | |||
| 14 | export PYO3_CROSS = "1" | ||
| 15 | export PYO3_CROSS_PYTHON_VERSION = "${PYTHON_BASEVERSION}" | ||
| 16 | export PYO3_CROSS_LIB_DIR = "${STAGING_LIBDIR}" | ||
| 17 | export CARGO_BUILD_TARGET = "${RUST_HOST_SYS}" | ||
| 18 | export RUSTFLAGS | ||
| 19 | export PYO3_PYTHON = "${PYTHON}" | ||
| 20 | export PYO3_CONFIG_FILE = "${WORKDIR}/pyo3.config" | ||
| 21 | |||
| 22 | python_pyo3_do_configure () { | ||
| 23 | cat > ${WORKDIR}/pyo3.config << EOF | ||
| 24 | implementation=CPython | ||
| 25 | version=${PYTHON_BASEVERSION} | ||
| 26 | shared=true | ||
| 27 | abi3=false | ||
| 28 | lib_name=${PYTHON_DIR} | ||
| 29 | lib_dir=${STAGING_LIBDIR} | ||
| 30 | pointer_width=${SITEINFO_BITS} | ||
| 31 | build_flags=WITH_THREAD | ||
| 32 | suppress_build_script_link_lines=false | ||
| 33 | EOF | ||
| 34 | } | ||
| 35 | |||
| 36 | EXPORT_FUNCTIONS do_configure | ||
diff --git a/meta/classes-recipe/python_setuptools3_rust.bbclass b/meta/classes-recipe/python_setuptools3_rust.bbclass deleted file mode 100644 index d3d7590cbe..0000000000 --- a/meta/classes-recipe/python_setuptools3_rust.bbclass +++ /dev/null | |||
| @@ -1,17 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit python_pyo3 python_setuptools_build_meta | ||
| 8 | |||
| 9 | DEPENDS += "python3-setuptools-rust-native" | ||
| 10 | |||
| 11 | python_setuptools3_rust_do_configure() { | ||
| 12 | python_pyo3_do_configure | ||
| 13 | cargo_common_do_configure | ||
| 14 | python_pep517_do_configure | ||
| 15 | } | ||
| 16 | |||
| 17 | EXPORT_FUNCTIONS do_configure | ||
diff --git a/meta/classes-recipe/python_setuptools_build_meta.bbclass b/meta/classes-recipe/python_setuptools_build_meta.bbclass deleted file mode 100644 index c7b12e5309..0000000000 --- a/meta/classes-recipe/python_setuptools_build_meta.bbclass +++ /dev/null | |||
| @@ -1,9 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit python_pep517 | ||
| 8 | |||
| 9 | DEPENDS += "python3-setuptools-native python3-wheel-native" | ||
diff --git a/meta/classes-recipe/qemu.bbclass b/meta/classes-recipe/qemu.bbclass deleted file mode 100644 index f83faf8049..0000000000 --- a/meta/classes-recipe/qemu.bbclass +++ /dev/null | |||
| @@ -1,28 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # This class contains functions for recipes that need QEMU or test for its | ||
| 9 | # existence. | ||
| 10 | # | ||
| 11 | |||
| 12 | def qemu_target_binary(data): | ||
| 13 | return oe.qemu.qemu_target_binary(data) | ||
| 14 | |||
| 15 | def qemu_wrapper_cmdline(data, rootfs_path, library_paths): | ||
| 16 | return oe.qemu.qemu_wrapper_cmdline(data, rootfs_path, library_paths) | ||
| 17 | |||
| 18 | def qemu_run_binary(data, rootfs_path, binary): | ||
| 19 | return oe.qemu.qemu_run_binary(data, rootfs_path, binary) | ||
| 20 | |||
| 21 | # QEMU_EXTRAOPTIONS is not meant to be directly used, the extensions are | ||
| 22 | # PACKAGE_ARCH, *NOT* overrides. | ||
| 23 | # In some cases (e.g. ppc) simply being arch specific (apparently) isn't good | ||
| 24 | # enough and a PACKAGE_ARCH specific -cpu option is needed (hence we have to do | ||
| 25 | # this dance). For others (e.g. arm) a -cpu option is not necessary, since the | ||
| 26 | # qemu-arm default CPU supports all required architecture levels. | ||
| 27 | QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS:tune-%s" % d.getVar('TUNE_PKGARCH')) or ""}" | ||
| 28 | QEMU_OPTIONS[vardeps] += "QEMU_EXTRAOPTIONS:tune-${TUNE_PKGARCH}" | ||
diff --git a/meta/classes-recipe/qemuboot.bbclass b/meta/classes-recipe/qemuboot.bbclass deleted file mode 100644 index 69cd12ec54..0000000000 --- a/meta/classes-recipe/qemuboot.bbclass +++ /dev/null | |||
| @@ -1,190 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Help runqemu boot target board, "QB" means Qemu Boot, the following | ||
| 8 | # vars can be set in conf files, such as <bsp.conf> to make it can be | ||
| 9 | # boot by runqemu: | ||
| 10 | # | ||
| 11 | # QB_SYSTEM_NAME: qemu name, e.g., "qemu-system-i386" | ||
| 12 | # | ||
| 13 | # QB_OPT_APPEND: options to append to qemu, e.g., "-device usb-mouse" | ||
| 14 | # | ||
| 15 | # QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage" | ||
| 16 | # e.g., "bzImage-initramfs-qemux86-64.bin" if INITRAMFS_IMAGE_BUNDLE is set to 1. | ||
| 17 | # | ||
| 18 | # QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4" | ||
| 19 | # | ||
| 20 | # QB_MEM: memory, e.g., "-m 512" | ||
| 21 | # | ||
| 22 | # QB_MACHINE: qemu machine, e.g., "-machine virt" | ||
| 23 | # | ||
| 24 | # QB_CPU: qemu cpu, e.g., "-cpu qemu32" | ||
| 25 | # | ||
| 26 | # QB_CPU_KVM: the similar to QB_CPU, but used when kvm, e.g., '-cpu kvm64', | ||
| 27 | # set it when support kvm. | ||
| 28 | # | ||
| 29 | # QB_SMP: amount of CPU cores inside qemu guest, each mapped to a thread on the host, | ||
| 30 | # e.g. "-smp 8". | ||
| 31 | # | ||
| 32 | # QB_KERNEL_CMDLINE_APPEND: options to append to kernel's -append | ||
| 33 | # option, e.g., "console=ttyS0 console=tty" | ||
| 34 | # | ||
| 35 | # QB_DTB: qemu dtb name | ||
| 36 | # | ||
| 37 | # QB_AUDIO_DRV: qemu audio driver, e.g., "alsa", set it when support audio | ||
| 38 | # | ||
| 39 | # QB_AUDIO_OPT: qemu audio option, e.g., "-device AC97", used | ||
| 40 | # when QB_AUDIO_DRV is set. | ||
| 41 | # | ||
| 42 | # QB_RNG: Pass-through for host random number generator, it can speedup boot | ||
| 43 | # in system mode, where system is experiencing entropy starvation | ||
| 44 | # | ||
| 45 | # QB_KERNEL_ROOT: kernel's root, e.g., /dev/vda | ||
| 46 | # By default "/dev/vda rw" gets passed to the kernel. | ||
| 47 | # To mount the rootfs read-only QB_KERNEL_ROOT can be set to e.g. "/dev/vda ro". | ||
| 48 | # | ||
| 49 | # QB_NETWORK_DEVICE: network device, e.g., "-device virtio-net-pci,netdev=net0,mac=@MAC@", | ||
| 50 | # it needs work with QB_TAP_OPT and QB_SLIRP_OPT. | ||
| 51 | # Note, runqemu will replace @MAC@ with a predefined mac, you can set | ||
| 52 | # a custom one, but that may cause conflicts when multiple qemus are | ||
| 53 | # running on the same host. | ||
| 54 | # Note: If more than one interface of type -device virtio-net-device gets added, | ||
| 55 | # QB_NETWORK_DEVICE:prepend might be used, since Qemu enumerates the eth* | ||
| 56 | # devices in reverse order to -device arguments. | ||
| 57 | # | ||
| 58 | # QB_TAP_OPT: network option for 'tap' mode, e.g., | ||
| 59 | # "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no" | ||
| 60 | # Note, runqemu will replace "@TAP@" with the one which is used, such as tap0, tap1 ... | ||
| 61 | # | ||
| 62 | # QB_SLIRP_OPT: network option for SLIRP mode, e.g., -netdev user,id=net0" | ||
| 63 | # | ||
| 64 | # QB_CMDLINE_IP_SLIRP: If QB_NETWORK_DEVICE adds more than one network interface to qemu, usually the | ||
| 65 | # ip= kernel command line argument needs to be changed accordingly. Details are documented | ||
| 66 | # in the kernel documentation https://www.kernel.org/doc/Documentation/filesystems/nfs/nfsroot.txt | ||
| 67 | # Example to configure only the first interface: "ip=eth0:dhcp" | ||
| 68 | # QB_CMDLINE_IP_TAP: This parameter is similar to the QB_CMDLINE_IP_SLIRP parameter. Since the tap interface requires | ||
| 69 | # static IP configuration @CLIENT@ and @GATEWAY@ place holders are replaced by the IP and the gateway | ||
| 70 | # address of the qemu guest by runqemu. | ||
| 71 | # Example: "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0" | ||
| 72 | # | ||
| 73 | # QB_ROOTFS_OPT: used as rootfs, e.g., | ||
| 74 | # "-drive id=disk0,file=@ROOTFS@,if=none,format=raw -device virtio-blk-device,drive=disk0" | ||
| 75 | # Note, runqemu will replace "@ROOTFS@" with the one which is used, such as core-image-minimal-qemuarm64.ext4. | ||
| 76 | # | ||
| 77 | # QB_SERIAL_OPT: serial port, e.g., "-serial mon:stdio" | ||
| 78 | # | ||
| 79 | # QB_TCPSERIAL_OPT: tcp serial port option, e.g., | ||
| 80 | # " -device virtio-serial-device -chardev socket,id=virtcon,port=@PORT@,host=127.0.0.1 -device virtconsole,chardev=virtcon" | ||
| 81 | # Note, runqemu will replace "@PORT@" with the port number which is used. | ||
| 82 | # | ||
| 83 | # QB_ROOTFS_EXTRA_OPT: extra options to be appended to the rootfs device in case there is none specified by QB_ROOTFS_OPT. | ||
| 84 | # Can be used to automatically determine the image from the other variables | ||
| 85 | # but define things link 'bootindex' when booting from EFI or 'readonly' when using squashfs | ||
| 86 | # without the need to specify a dedicated qemu configuration | ||
| 87 | # | ||
| 88 | # QB_GRAPHICS: QEMU video card type (e.g. "-vga std") | ||
| 89 | # QB_NFSROOTFS_EXTRA_OPT: extra options to be appended to the nfs rootfs options in kernel boot arg, e.g., | ||
| 90 | # "wsize=4096,rsize=4096" | ||
| 91 | # | ||
| 92 | # Usage: | ||
| 93 | # IMAGE_CLASSES += "qemuboot" | ||
| 94 | # See "runqemu help" for more info | ||
| 95 | |||
| 96 | QB_MEM ?= "-m 256" | ||
| 97 | QB_SMP ?= "" | ||
| 98 | QB_SERIAL_OPT ?= "-serial mon:stdio -serial null" | ||
| 99 | QB_DEFAULT_KERNEL ?= "${@bb.utils.contains("INITRAMFS_IMAGE_BUNDLE", "1", "${KERNEL_IMAGETYPE}-${INITRAMFS_LINK_NAME}.bin", "${KERNEL_IMAGETYPE}", d)}" | ||
| 100 | QB_DEFAULT_FSTYPE ?= "ext4.zst" | ||
| 101 | QB_RNG ?= "-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0" | ||
| 102 | QB_OPT_APPEND ?= "" | ||
| 103 | QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@" | ||
| 104 | |||
| 105 | # qemurunner needs ip information first, so append QB_NO_PNI | ||
| 106 | # | ||
| 107 | QB_NO_PNI ?= "${@bb.utils.contains('DISTRO_FEATURES', 'pni-names', '', 'net.ifnames=0', d)}" | ||
| 108 | QB_CMDLINE_IP_SLIRP ?= "ip=dhcp" | ||
| 109 | QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8 ${QB_NO_PNI}" | ||
| 110 | |||
| 111 | QB_ROOTFS_EXTRA_OPT ?= "" | ||
| 112 | QB_GRAPHICS ?= "" | ||
| 113 | QB_NFSROOTFS_EXTRA_OPT ?= "" | ||
| 114 | |||
| 115 | # With 6.5+ (specifically, if DMA_BOUNCE_UNALIGNED_KMALLOC is set) the SW IO TLB | ||
| 116 | # is used, and it defaults to 64MB. This is too much when there's only 256MB of | ||
| 117 | # RAM, so request 0 slabs and lets the kernel round up to the appropriate minimum | ||
| 118 | # (1MB, typically). In virtual hardware there's very little need for these bounce | ||
| 119 | # buffers, so the 64MB would be mostly wasted. | ||
| 120 | QB_KERNEL_CMDLINE_APPEND:append = " swiotlb=0" | ||
| 121 | |||
| 122 | # This should be kept align with ROOT_VM | ||
| 123 | QB_DRIVE_TYPE ?= "/dev/sd" | ||
| 124 | |||
| 125 | inherit image-artifact-names | ||
| 126 | |||
| 127 | # Create qemuboot.conf | ||
| 128 | addtask do_write_qemuboot_conf after do_rootfs before do_image | ||
| 129 | |||
| 130 | def qemuboot_vars(d): | ||
| 131 | build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE', | ||
| 132 | 'KERNEL_IMAGETYPE', 'KERNEL_IMAGE_NAME', | ||
| 133 | 'KERNEL_IMAGE_BIN_EXT', 'IMAGE_NAME', 'IMAGE_LINK_NAME', | ||
| 134 | 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE', | ||
| 135 | 'STAGING_DIR_HOST', 'SERIAL_CONSOLES', 'UNINATIVE_LOADER'] | ||
| 136 | return build_vars + [k for k in d.keys() if k.startswith('QB_')] | ||
| 137 | |||
| 138 | do_write_qemuboot_conf[vardeps] += "${@' '.join(qemuboot_vars(d))}" | ||
| 139 | do_write_qemuboot_conf[vardepsexclude] += "TOPDIR" | ||
| 140 | python do_write_qemuboot_conf() { | ||
| 141 | import configparser | ||
| 142 | |||
| 143 | qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_NAME')) | ||
| 144 | if d.getVar('IMAGE_LINK_NAME'): | ||
| 145 | qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME')) | ||
| 146 | else: | ||
| 147 | qemuboot_link = "" | ||
| 148 | finalpath = d.getVar("DEPLOY_DIR_IMAGE") | ||
| 149 | topdir = d.getVar('TOPDIR') | ||
| 150 | cf = configparser.ConfigParser() | ||
| 151 | cf.add_section('config_bsp') | ||
| 152 | for k in sorted(qemuboot_vars(d)): | ||
| 153 | if ":" in k: | ||
| 154 | continue | ||
| 155 | # qemu-helper-native sysroot is not removed by rm_work and | ||
| 156 | # contains all tools required by runqemu | ||
| 157 | if k == 'STAGING_BINDIR_NATIVE': | ||
| 158 | val = os.path.join(d.getVar('BASE_WORKDIR'), d.getVar('BUILD_SYS'), | ||
| 159 | 'qemu-helper-native/1.0/recipe-sysroot-native/usr/bin/') | ||
| 160 | else: | ||
| 161 | val = d.getVar(k) | ||
| 162 | if val is None: | ||
| 163 | continue | ||
| 164 | # we only want to write out relative paths so that we can relocate images | ||
| 165 | # and still run them | ||
| 166 | if val.startswith(topdir): | ||
| 167 | val = os.path.relpath(val, finalpath) | ||
| 168 | cf.set('config_bsp', k, '%s' % val) | ||
| 169 | |||
| 170 | # QB_DEFAULT_KERNEL's value of KERNEL_IMAGETYPE is the name of a symlink | ||
| 171 | # to the kernel file, which hinders relocatability of the qb conf. | ||
| 172 | # Read the link and replace it with the full filename of the target. | ||
| 173 | kernel_link = os.path.join(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('QB_DEFAULT_KERNEL')) | ||
| 174 | kernel = os.path.realpath(kernel_link) | ||
| 175 | # we only want to write out relative paths so that we can relocate images | ||
| 176 | # and still run them | ||
| 177 | kernel = os.path.relpath(kernel, finalpath) | ||
| 178 | cf.set('config_bsp', 'QB_DEFAULT_KERNEL', kernel) | ||
| 179 | |||
| 180 | bb.utils.mkdirhier(os.path.dirname(qemuboot)) | ||
| 181 | with open(qemuboot, 'w') as f: | ||
| 182 | cf.write(f) | ||
| 183 | |||
| 184 | if qemuboot_link and qemuboot_link != qemuboot: | ||
| 185 | if os.path.lexists(qemuboot_link): | ||
| 186 | os.remove(qemuboot_link) | ||
| 187 | os.symlink(os.path.basename(qemuboot), qemuboot_link) | ||
| 188 | } | ||
| 189 | |||
| 190 | EXTRA_IMAGEDEPENDS += "qemu-system-native qemu-helper-native:do_addto_recipe_sysroot" | ||
diff --git a/meta/classes-recipe/rootfs-postcommands.bbclass b/meta/classes-recipe/rootfs-postcommands.bbclass deleted file mode 100644 index 8b5822a0b5..0000000000 --- a/meta/classes-recipe/rootfs-postcommands.bbclass +++ /dev/null | |||
| @@ -1,566 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Zap the root password if empty-root-password feature is not enabled | ||
| 8 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "empty-root-password", "", "zap_empty_root_password ",d)}' | ||
| 9 | |||
| 10 | # Allow dropbear/openssh to accept logins from accounts with an empty password string if allow-empty-password is enabled | ||
| 11 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "allow-empty-password", "ssh_allow_empty_password ", "",d)}' | ||
| 12 | |||
| 13 | # Allow dropbear/openssh to accept root logins if allow-root-login is enabled | ||
| 14 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "allow-root-login", "ssh_allow_root_login ", "",d)}' | ||
| 15 | |||
| 16 | # Autologin the root user on the serial console, if empty-root-password and serial-autologin-root are active | ||
| 17 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", [ 'empty-root-password', 'serial-autologin-root' ], "serial_autologin_root ", "",d)}' | ||
| 18 | |||
| 19 | # Enable postinst logging if post-install-logging is enabled | ||
| 20 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "post-install-logging", "postinst_enable_logging ", "",d)}' | ||
| 21 | |||
| 22 | # Create /etc/timestamp during image construction to give a reasonably sane default time setting | ||
| 23 | ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp " | ||
| 24 | |||
| 25 | # Tweak files in /etc if read-only-rootfs is enabled | ||
| 26 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook ", "",d)}' | ||
| 27 | |||
| 28 | # We also need to do the same for the kernel boot parameters, | ||
| 29 | # otherwise kernel or initramfs end up mounting the rootfs read/write | ||
| 30 | # (the default) if supported by the underlying storage. | ||
| 31 | # | ||
| 32 | # We do this with :append because the default value might get set later with ?= | ||
| 33 | # and we don't want to disable such a default that by setting a value here. | ||
| 34 | APPEND:append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}' | ||
| 35 | |||
| 36 | # Generates test data file with data store variables expanded in json format | ||
| 37 | ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data " | ||
| 38 | |||
| 39 | # Write manifest | ||
| 40 | IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}.manifest" | ||
| 41 | ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest" | ||
| 42 | # Set default postinst log file | ||
| 43 | POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log" | ||
| 44 | # Set default target for systemd images | ||
| 45 | SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains_any("IMAGE_FEATURES", [ "x11-base", "weston" ], "graphical.target", "multi-user.target", d)}' | ||
| 46 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target systemd_sysusers_check systemd_handle_machine_id", "", d)}' | ||
| 47 | |||
| 48 | ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile' | ||
| 49 | |||
| 50 | ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "overlayfs", "overlayfs_qa_check overlayfs_postprocess", "", d)}' | ||
| 51 | |||
| 52 | inherit image-artifact-names | ||
| 53 | |||
| 54 | # Sort the user and group entries in /etc by ID in order to make the content | ||
| 55 | # deterministic. Package installs are not deterministic, causing the ordering | ||
| 56 | # of entries to change between builds. In case that this isn't desired, | ||
| 57 | # the command can be overridden. | ||
| 58 | SORT_PASSWD_POSTPROCESS_COMMAND ??= "tidy_shadowutils_files" | ||
| 59 | ROOTFS_POSTPROCESS_COMMAND += '${SORT_PASSWD_POSTPROCESS_COMMAND}' | ||
| 60 | |||
| 61 | # | ||
| 62 | # Note that useradd-staticids.bbclass has to be used to ensure that | ||
| 63 | # the numeric IDs of dynamically created entries remain stable. | ||
| 64 | # | ||
| 65 | ROOTFS_POSTPROCESS_COMMAND += 'rootfs_reproducible' | ||
| 66 | |||
| 67 | # Resolve the ID as described in the sysusers.d(5) manual: ID can be a numeric | ||
| 68 | # uid, a couple uid:gid or uid:groupname or it is '-' meaning leaving it | ||
| 69 | # automatic or it can be a path. In the latter, the uid/gid matches the | ||
| 70 | # user/group owner of that file. | ||
| 71 | def resolve_sysusers_id(d, sid): | ||
| 72 | # If the id is a path, the uid/gid matchs to the target's uid/gid in the | ||
| 73 | # rootfs. | ||
| 74 | if '/' in sid: | ||
| 75 | try: | ||
| 76 | osstat = os.stat(os.path.join(d.getVar('IMAGE_ROOTFS'), sid)) | ||
| 77 | except FileNotFoundError: | ||
| 78 | bb.error('sysusers.d: file %s is required but it does not exist in the rootfs', sid) | ||
| 79 | return ('-', '-') | ||
| 80 | return (osstat.st_uid, osstat.st_gid) | ||
| 81 | # Else it is a uid:gid or uid:groupname syntax | ||
| 82 | if ':' in sid: | ||
| 83 | return sid.split(':') | ||
| 84 | else: | ||
| 85 | return (sid, '-') | ||
| 86 | |||
| 87 | # Check a user exists in the rootfs password file and return its properties | ||
| 88 | def check_user_exists(d, uname=None, uid=None): | ||
| 89 | with open(os.path.join(d.getVar('IMAGE_ROOTFS'), 'etc/passwd'), 'r') as pwfile: | ||
| 90 | for line in pwfile: | ||
| 91 | (name, _, u_id, gid, comment, homedir, ushell) = line.strip().split(':') | ||
| 92 | if uname == name or uid == u_id: | ||
| 93 | return (name, u_id, gid, comment or '-', homedir or '/', ushell or '-') | ||
| 94 | return None | ||
| 95 | |||
| 96 | # Check a group exists in the rootfs group file and return its properties | ||
| 97 | def check_group_exists(d, gname=None, gid=None): | ||
| 98 | with open(os.path.join(d.getVar('IMAGE_ROOTFS'), 'etc/group'), 'r') as gfile: | ||
| 99 | for line in gfile: | ||
| 100 | (name, _, g_id, _) = line.strip().split(':') | ||
| 101 | if name == gname or g_id == gid: | ||
| 102 | return (name, g_id) | ||
| 103 | return None | ||
| 104 | |||
| 105 | def compare_users(user, e_user): | ||
| 106 | # user and e_user must not have None values. Unset values must be '-'. | ||
| 107 | (name, uid, gid, comment, homedir, ushell) = user | ||
| 108 | (e_name, e_uid, e_gid, e_comment, e_homedir, e_ushell) = e_user | ||
| 109 | # Ignore 'uid', 'gid' or 'homedir' if they are not set | ||
| 110 | # Ignore 'shell' and 'ushell' if one is not set | ||
| 111 | return name == e_name \ | ||
| 112 | and (uid == '-' or uid == e_uid) \ | ||
| 113 | and (gid == '-' or gid == e_gid) \ | ||
| 114 | and (homedir == '-' or e_homedir == '-' or homedir == e_homedir) \ | ||
| 115 | and (ushell == '-' or e_ushell == '-' or ushell == e_ushell) | ||
| 116 | |||
| 117 | # Open sysusers.d configuration files and parse each line to check the users and | ||
| 118 | # groups are already defined in /etc/passwd and /etc/groups with similar | ||
| 119 | # properties. Refer to the sysusers.d(5) manual for its syntax. | ||
| 120 | python systemd_sysusers_check() { | ||
| 121 | import glob | ||
| 122 | import re | ||
| 123 | |||
| 124 | pattern_comment = r'(-|\"[^:\"]+\")' | ||
| 125 | pattern_word = r'[^\s]+' | ||
| 126 | pattern_line = r'(' + pattern_word + r')\s+(' + pattern_word + r')\s+(' + pattern_word + r')(\s+' \ | ||
| 127 | + pattern_comment + r')?' + r'(\s+(' + pattern_word + r'))?' + r'(\s+(' + pattern_word + r'))?' | ||
| 128 | |||
| 129 | for conffile in glob.glob(os.path.join(d.getVar('IMAGE_ROOTFS'), 'usr/lib/sysusers.d/*.conf')): | ||
| 130 | with open(conffile, 'r') as f: | ||
| 131 | for line in f: | ||
| 132 | line = line.strip() | ||
| 133 | if not len(line) or line[0] == '#': continue | ||
| 134 | ret = re.fullmatch(pattern_line, line.strip()) | ||
| 135 | if not ret: continue | ||
| 136 | (stype, sname, sid, _, scomment, _, shomedir, _, sshell) = ret.groups() | ||
| 137 | if stype == 'u': | ||
| 138 | if sid: | ||
| 139 | (suid, sgid) = resolve_sysusers_id(d, sid) | ||
| 140 | if sgid.isalpha(): | ||
| 141 | sgid = check_group_exists(d, gname=sgid) | ||
| 142 | elif sgid.isdigit(): | ||
| 143 | check_group_exists(d, gid=sgid) | ||
| 144 | else: | ||
| 145 | sgid = '-' | ||
| 146 | else: | ||
| 147 | suid = '-' | ||
| 148 | sgid = '-' | ||
| 149 | scomment = scomment.replace('"', '') if scomment else '-' | ||
| 150 | shomedir = shomedir or '-' | ||
| 151 | sshell = sshell or '-' | ||
| 152 | e_user = check_user_exists(d, uname=sname) | ||
| 153 | if not e_user: | ||
| 154 | bb.warn('User %s has never been defined' % sname) | ||
| 155 | elif not compare_users((sname, suid, sgid, scomment, shomedir, sshell), e_user): | ||
| 156 | bb.warn('User %s has been defined as (%s) but sysusers.d expects it as (%s)' | ||
| 157 | % (sname, ', '.join(e_user), | ||
| 158 | ', '.join((sname, suid, sgid, scomment, shomedir, sshell)))) | ||
| 159 | elif stype == 'g': | ||
| 160 | gid = sid or '-' | ||
| 161 | if '/' in gid: | ||
| 162 | (_, gid) = resolve_sysusers_id(d, sid) | ||
| 163 | e_group = check_group_exists(d, gname=sname) | ||
| 164 | if not e_group: | ||
| 165 | bb.warn('Group %s has never been defined' % sname) | ||
| 166 | elif gid != '-': | ||
| 167 | (_, e_gid) = e_group | ||
| 168 | if gid != e_gid: | ||
| 169 | bb.warn('Group %s has been defined with id (%s) but sysusers.d expects gid (%s)' | ||
| 170 | % (sname, e_gid, gid)) | ||
| 171 | elif stype == 'm': | ||
| 172 | check_user_exists(d, sname) | ||
| 173 | check_group_exists(d, sid) | ||
| 174 | } | ||
| 175 | |||
| 176 | systemd_handle_machine_id() { | ||
| 177 | if ${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "true", "false", d)}; then | ||
| 178 | # Create machine-id | ||
| 179 | # 20:12 < mezcalero> koen: you have three options: a) run systemd-machine-id-setup at install time, b) have / read-only and an empty file there (for stateless) and c) boot with / writable | ||
| 180 | touch ${IMAGE_ROOTFS}${sysconfdir}/machine-id | ||
| 181 | fi | ||
| 182 | # In order to be backward compatible with the previous OE-core specific (re)implementation of systemctl | ||
| 183 | # we need to touch machine-id when handling presets and when the rootfs is NOT stateless | ||
| 184 | if ${@ 'true' if not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else 'false'}; then | ||
| 185 | touch ${IMAGE_ROOTFS}${sysconfdir}/machine-id | ||
| 186 | if [ -e ${IMAGE_ROOTFS}${root_prefix}/lib/systemd/systemd ]; then | ||
| 187 | systemctl --root="${IMAGE_ROOTFS}" --preset-mode=enable-only preset-all | ||
| 188 | systemctl --root="${IMAGE_ROOTFS}" --global --preset-mode=enable-only preset-all | ||
| 189 | fi | ||
| 190 | fi | ||
| 191 | } | ||
| 192 | |||
| 193 | # | ||
| 194 | # A hook function to support read-only-rootfs IMAGE_FEATURES | ||
| 195 | # | ||
| 196 | read_only_rootfs_hook () { | ||
| 197 | # Tweak the mount option and fs_passno for rootfs in fstab | ||
| 198 | if [ -f ${IMAGE_ROOTFS}/etc/fstab ]; then | ||
| 199 | sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab | ||
| 200 | fi | ||
| 201 | |||
| 202 | # Tweak the "mount -o remount,rw /" command in busybox-inittab inittab | ||
| 203 | if [ -f ${IMAGE_ROOTFS}/etc/inittab ]; then | ||
| 204 | sed -i 's|/bin/mount -o remount,rw /|/bin/mount -o remount,ro /|' ${IMAGE_ROOTFS}/etc/inittab | ||
| 205 | fi | ||
| 206 | |||
| 207 | # If we're using openssh and the /etc/ssh directory has no pre-generated keys, | ||
| 208 | # we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly | ||
| 209 | # and the keys under /var/run/ssh. | ||
| 210 | # If overlayfs-etc is used this is not done as /etc is treated as writable | ||
| 211 | # If stateless-rootfs is enabled this is always done as we don't want to save keys then | ||
| 212 | if ${@ 'true' if not bb.utils.contains('IMAGE_FEATURES', 'overlayfs-etc', True, False, d) or bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else 'false'}; then | ||
| 213 | if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then | ||
| 214 | if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then | ||
| 215 | echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh | ||
| 216 | echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh | ||
| 217 | else | ||
| 218 | echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh | ||
| 219 | echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh | ||
| 220 | fi | ||
| 221 | fi | ||
| 222 | |||
| 223 | # Also tweak the key location for dropbear in the same way. | ||
| 224 | if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then | ||
| 225 | if [ ! -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then | ||
| 226 | if ! grep -q "^DROPBEAR_RSAKEY_DIR=" ${IMAGE_ROOTFS}/etc/default/dropbear ; then | ||
| 227 | echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear | ||
| 228 | fi | ||
| 229 | fi | ||
| 230 | fi | ||
| 231 | fi | ||
| 232 | |||
| 233 | if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then | ||
| 234 | # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes | ||
| 235 | if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then | ||
| 236 | sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS | ||
| 237 | fi | ||
| 238 | # Run populate-volatile.sh at rootfs time to set up basic files | ||
| 239 | # and directories to support read-only rootfs. | ||
| 240 | if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then | ||
| 241 | ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh | ||
| 242 | fi | ||
| 243 | fi | ||
| 244 | } | ||
| 245 | |||
| 246 | # | ||
| 247 | # This function disallows empty root passwords | ||
| 248 | # | ||
| 249 | zap_empty_root_password () { | ||
| 250 | if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then | ||
| 251 | sed --follow-symlinks -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow | ||
| 252 | fi | ||
| 253 | if [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then | ||
| 254 | sed --follow-symlinks -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd | ||
| 255 | fi | ||
| 256 | } | ||
| 257 | |||
| 258 | # | ||
| 259 | # allow dropbear/openssh to accept logins from accounts with an empty password string | ||
| 260 | # | ||
| 261 | ssh_allow_empty_password () { | ||
| 262 | for config in sshd_config sshd_config_readonly; do | ||
| 263 | if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then | ||
| 264 | sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config | ||
| 265 | fi | ||
| 266 | done | ||
| 267 | |||
| 268 | if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then | ||
| 269 | if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then | ||
| 270 | if ! grep -q "DROPBEAR_EXTRA_ARGS=.*-B" ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear ; then | ||
| 271 | sed -i 's/^DROPBEAR_EXTRA_ARGS="*\([^"]*\)"*/DROPBEAR_EXTRA_ARGS="\1 -B"/' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear | ||
| 272 | fi | ||
| 273 | else | ||
| 274 | printf '\nDROPBEAR_EXTRA_ARGS="-B"\n' >> ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear | ||
| 275 | fi | ||
| 276 | fi | ||
| 277 | |||
| 278 | if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then | ||
| 279 | for f in `find ${IMAGE_ROOTFS}${sysconfdir}/pam.d/* -type f -exec test -e {} \; -print` | ||
| 280 | do | ||
| 281 | sed -i 's/nullok_secure/nullok/' $f | ||
| 282 | done | ||
| 283 | fi | ||
| 284 | } | ||
| 285 | |||
| 286 | # | ||
| 287 | # allow dropbear/openssh to accept root logins | ||
| 288 | # | ||
| 289 | ssh_allow_root_login () { | ||
| 290 | for config in sshd_config sshd_config_readonly; do | ||
| 291 | if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then | ||
| 292 | sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config | ||
| 293 | fi | ||
| 294 | done | ||
| 295 | |||
| 296 | if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then | ||
| 297 | if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then | ||
| 298 | sed -i '/^DROPBEAR_EXTRA_ARGS=/ s/-w//' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear | ||
| 299 | sed -i '/^# Disallow root/d' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear | ||
| 300 | fi | ||
| 301 | fi | ||
| 302 | } | ||
| 303 | |||
| 304 | # | ||
| 305 | # Autologin the 'root' user on the serial terminal, | ||
| 306 | # if empty-root-password' AND 'serial-autologin-root are enabled | ||
| 307 | # | ||
| 308 | serial_autologin_root () { | ||
| 309 | if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then | ||
| 310 | # add autologin option to util-linux getty only | ||
| 311 | sed -i 's/options="/&--autologin root /' \ | ||
| 312 | "${IMAGE_ROOTFS}${base_bindir}/start_getty" | ||
| 313 | elif ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then | ||
| 314 | if [ -e ${IMAGE_ROOTFS}${systemd_system_unitdir}/serial-getty@.service ]; then | ||
| 315 | sed -i '/^\s*ExecStart\b/ s/getty /&--autologin root /' \ | ||
| 316 | "${IMAGE_ROOTFS}${systemd_system_unitdir}/serial-getty@.service" | ||
| 317 | fi | ||
| 318 | fi | ||
| 319 | } | ||
| 320 | |||
| 321 | python tidy_shadowutils_files () { | ||
| 322 | import oe.rootfspostcommands | ||
| 323 | oe.rootfspostcommands.tidy_shadowutils_files(d.expand('${IMAGE_ROOTFS}${sysconfdir}')) | ||
| 324 | } | ||
| 325 | |||
| 326 | python sort_passwd () { | ||
| 327 | """ | ||
| 328 | Deprecated in the favour of tidy_shadowutils_files. | ||
| 329 | """ | ||
| 330 | import oe.rootfspostcommands | ||
| 331 | bb.warn('[sort_passwd] You are using a deprecated function for ' | ||
| 332 | 'SORT_PASSWD_POSTPROCESS_COMMAND. The default one is now called ' | ||
| 333 | '"tidy_shadowutils_files".') | ||
| 334 | oe.rootfspostcommands.tidy_shadowutils_files(d.expand('${IMAGE_ROOTFS}${sysconfdir}')) | ||
| 335 | } | ||
| 336 | |||
| 337 | # | ||
| 338 | # Enable postinst logging | ||
| 339 | # | ||
| 340 | postinst_enable_logging () { | ||
| 341 | mkdir -p ${IMAGE_ROOTFS}${sysconfdir}/default | ||
| 342 | echo "POSTINST_LOGGING=1" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst | ||
| 343 | echo "LOGFILE=${POSTINST_LOGFILE}" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst | ||
| 344 | } | ||
| 345 | |||
| 346 | # | ||
| 347 | # Modify systemd default target | ||
| 348 | # | ||
| 349 | set_systemd_default_target () { | ||
| 350 | if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_system_unitdir}/${SYSTEMD_DEFAULT_TARGET} ]; then | ||
| 351 | ln -sf ${systemd_system_unitdir}/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target | ||
| 352 | fi | ||
| 353 | } | ||
| 354 | |||
| 355 | # If /var/volatile is not empty, we have seen problems where programs such as the | ||
| 356 | # journal make assumptions based on the contents of /var/volatile. The journal | ||
| 357 | # would then write to /var/volatile before it was mounted, thus hiding the | ||
| 358 | # items previously written. | ||
| 359 | # | ||
| 360 | # This change is to attempt to fix those types of issues in a way that doesn't | ||
| 361 | # affect users that may not be using /var/volatile. | ||
| 362 | empty_var_volatile () { | ||
| 363 | if [ -e ${IMAGE_ROOTFS}/etc/fstab ]; then | ||
| 364 | match=`awk '$1 !~ "#" && $2 ~ /\/var\/volatile/{print $2}' ${IMAGE_ROOTFS}/etc/fstab 2> /dev/null` | ||
| 365 | if [ -n "$match" ]; then | ||
| 366 | find ${IMAGE_ROOTFS}/var/volatile -mindepth 1 -delete | ||
| 367 | fi | ||
| 368 | fi | ||
| 369 | } | ||
| 370 | |||
| 371 | # Turn any symbolic /sbin/init link into a file | ||
| 372 | remove_init_link () { | ||
| 373 | if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then | ||
| 374 | LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init` | ||
| 375 | rm ${IMAGE_ROOTFS}/sbin/init | ||
| 376 | cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init | ||
| 377 | fi | ||
| 378 | } | ||
| 379 | |||
| 380 | python write_image_manifest () { | ||
| 381 | from oe.rootfs import image_list_installed_packages | ||
| 382 | from oe.utils import format_pkg_list | ||
| 383 | |||
| 384 | deploy_dir = d.getVar('IMGDEPLOYDIR') | ||
| 385 | link_name = d.getVar('IMAGE_LINK_NAME') | ||
| 386 | manifest_name = d.getVar('IMAGE_MANIFEST') | ||
| 387 | |||
| 388 | if not manifest_name: | ||
| 389 | return | ||
| 390 | |||
| 391 | pkgs = image_list_installed_packages(d) | ||
| 392 | with open(manifest_name, 'w+') as image_manifest: | ||
| 393 | image_manifest.write(format_pkg_list(pkgs, "ver")) | ||
| 394 | |||
| 395 | if os.path.exists(manifest_name) and link_name: | ||
| 396 | manifest_link = deploy_dir + "/" + link_name + ".manifest" | ||
| 397 | if manifest_link != manifest_name: | ||
| 398 | if os.path.lexists(manifest_link): | ||
| 399 | os.remove(manifest_link) | ||
| 400 | os.symlink(os.path.basename(manifest_name), manifest_link) | ||
| 401 | } | ||
| 402 | |||
| 403 | # Can be used to create /etc/timestamp during image construction to give a reasonably | ||
| 404 | # sane default time setting | ||
| 405 | rootfs_update_timestamp () { | ||
| 406 | if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then | ||
| 407 | # Convert UTC into %4Y%2m%2d%2H%2M%2S | ||
| 408 | sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S` | ||
| 409 | else | ||
| 410 | sformatted=`date -u +%4Y%2m%2d%2H%2M%2S` | ||
| 411 | fi | ||
| 412 | echo $sformatted > ${IMAGE_ROOTFS}/etc/timestamp | ||
| 413 | bbnote "rootfs_update_timestamp: set /etc/timestamp to $sformatted" | ||
| 414 | } | ||
| 415 | |||
| 416 | # Prevent X from being started | ||
| 417 | rootfs_no_x_startup () { | ||
| 418 | if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then | ||
| 419 | chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm | ||
| 420 | fi | ||
| 421 | } | ||
| 422 | |||
| 423 | rootfs_trim_schemas () { | ||
| 424 | for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas | ||
| 425 | do | ||
| 426 | # Need this in case no files exist | ||
| 427 | if [ -e $schema ]; then | ||
| 428 | oe-trim-schemas $schema > $schema.new | ||
| 429 | mv $schema.new $schema | ||
| 430 | fi | ||
| 431 | done | ||
| 432 | } | ||
| 433 | |||
| 434 | rootfs_check_host_user_contaminated () { | ||
| 435 | contaminated="${S}/host-user-contaminated.txt" | ||
| 436 | HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)" | ||
| 437 | HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)" | ||
| 438 | |||
| 439 | find "${IMAGE_ROOTFS}" -path "${IMAGE_ROOTFS}/home" -prune -o \ | ||
| 440 | -user "$HOST_USER_UID" -print -o -group "$HOST_USER_GID" -print >"$contaminated" | ||
| 441 | |||
| 442 | sed -e "s,${IMAGE_ROOTFS},," $contaminated | while read line; do | ||
| 443 | bbwarn "Path in the rootfs is owned by the same user or group as the user running bitbake:" $line `ls -lan ${IMAGE_ROOTFS}/$line` | ||
| 444 | done | ||
| 445 | |||
| 446 | if [ -s "$contaminated" ]; then | ||
| 447 | bbwarn "/etc/passwd:" `cat ${IMAGE_ROOTFS}/etc/passwd` | ||
| 448 | bbwarn "/etc/group:" `cat ${IMAGE_ROOTFS}/etc/group` | ||
| 449 | fi | ||
| 450 | } | ||
| 451 | |||
| 452 | # Make any absolute links in a sysroot relative | ||
| 453 | rootfs_sysroot_relativelinks () { | ||
| 454 | sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT} | ||
| 455 | } | ||
| 456 | |||
| 457 | # Generated test data json file | ||
| 458 | python write_image_test_data() { | ||
| 459 | from oe.data import export2json | ||
| 460 | |||
| 461 | deploy_dir = d.getVar('IMGDEPLOYDIR') | ||
| 462 | link_name = d.getVar('IMAGE_LINK_NAME') | ||
| 463 | testdata_name = os.path.join(deploy_dir, "%s.testdata.json" % d.getVar('IMAGE_NAME')) | ||
| 464 | |||
| 465 | searchString = "%s/"%(d.getVar("TOPDIR")).replace("//","/") | ||
| 466 | export2json(d, testdata_name, searchString=searchString, replaceString="") | ||
| 467 | |||
| 468 | if os.path.exists(testdata_name) and link_name: | ||
| 469 | testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name) | ||
| 470 | if testdata_link != testdata_name: | ||
| 471 | if os.path.lexists(testdata_link): | ||
| 472 | os.remove(testdata_link) | ||
| 473 | os.symlink(os.path.basename(testdata_name), testdata_link) | ||
| 474 | } | ||
| 475 | write_image_test_data[vardepsexclude] += "TOPDIR" | ||
| 476 | |||
| 477 | # Check for unsatisfied recommendations (RRECOMMENDS) | ||
| 478 | python rootfs_log_check_recommends() { | ||
| 479 | log_path = d.expand("${T}/log.do_rootfs") | ||
| 480 | with open(log_path, 'r') as log: | ||
| 481 | for line in log: | ||
| 482 | if 'log_check' in line: | ||
| 483 | continue | ||
| 484 | |||
| 485 | if 'unsatisfied recommendation for' in line: | ||
| 486 | bb.warn('[log_check] %s: %s' % (d.getVar('PN'), line)) | ||
| 487 | } | ||
| 488 | |||
| 489 | # Perform any additional adjustments needed to make rootf binary reproducible | ||
| 490 | rootfs_reproducible () { | ||
| 491 | if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then | ||
| 492 | # Convert UTC into %4Y%2m%2d%2H%2M%2S | ||
| 493 | sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S` | ||
| 494 | echo $sformatted > ${IMAGE_ROOTFS}/etc/version | ||
| 495 | bbnote "rootfs_reproducible: set /etc/version to $sformatted" | ||
| 496 | |||
| 497 | if [ -d ${IMAGE_ROOTFS}${sysconfdir}/gconf ]; then | ||
| 498 | find ${IMAGE_ROOTFS}${sysconfdir}/gconf -name '%gconf.xml' -print0 | xargs -0r \ | ||
| 499 | sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g' | ||
| 500 | fi | ||
| 501 | |||
| 502 | if [ -f ${IMAGE_ROOTFS}${localstatedir}/lib/opkg/status ]; then | ||
| 503 | sed -i 's/^Installed-Time: .*/Installed-Time: ${REPRODUCIBLE_TIMESTAMP_ROOTFS}/' ${IMAGE_ROOTFS}${localstatedir}/lib/opkg/status | ||
| 504 | fi | ||
| 505 | fi | ||
| 506 | } | ||
| 507 | |||
| 508 | # Perform a dumb check for unit existence, not its validity | ||
| 509 | python overlayfs_qa_check() { | ||
| 510 | from oe.overlayfs import mountUnitName | ||
| 511 | |||
| 512 | overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT") or {} | ||
| 513 | imagepath = d.getVar("IMAGE_ROOTFS") | ||
| 514 | sysconfdir = d.getVar("sysconfdir") | ||
| 515 | searchpaths = [oe.path.join(imagepath, sysconfdir, "systemd", "system"), | ||
| 516 | oe.path.join(imagepath, d.getVar("systemd_system_unitdir"))] | ||
| 517 | fstabpath = oe.path.join(imagepath, sysconfdir, "fstab") | ||
| 518 | |||
| 519 | if not any(os.path.exists(path) for path in [*searchpaths, fstabpath]): | ||
| 520 | return | ||
| 521 | |||
| 522 | fstabDevices = [] | ||
| 523 | if os.path.isfile(fstabpath): | ||
| 524 | with open(fstabpath, 'r') as f: | ||
| 525 | for line in f: | ||
| 526 | if line[0] == '#': | ||
| 527 | continue | ||
| 528 | path = line.split(maxsplit=2) | ||
| 529 | if len(path) > 2: | ||
| 530 | fstabDevices.append(path[1]) | ||
| 531 | |||
| 532 | allUnitExist = True; | ||
| 533 | for mountPoint in overlayMountPoints: | ||
| 534 | qaSkip = (d.getVarFlag("OVERLAYFS_QA_SKIP", mountPoint) or "").split() | ||
| 535 | if "mount-configured" in qaSkip: | ||
| 536 | continue | ||
| 537 | |||
| 538 | mountPath = d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint) | ||
| 539 | if mountPath in fstabDevices: | ||
| 540 | continue | ||
| 541 | |||
| 542 | mountUnit = mountUnitName(mountPath) | ||
| 543 | if any(os.path.isfile(oe.path.join(dirpath, mountUnit)) | ||
| 544 | for dirpath in searchpaths): | ||
| 545 | continue | ||
| 546 | |||
| 547 | bb.warn(f'Mount path {mountPath} not found in fstab and unit ' | ||
| 548 | f'{mountUnit} not found in systemd unit directories.') | ||
| 549 | bb.warn(f'Skip this check by setting OVERLAYFS_QA_SKIP[{mountPoint}] = ' | ||
| 550 | '"mount-configured"') | ||
| 551 | allUnitExist = False; | ||
| 552 | |||
| 553 | if not allUnitExist: | ||
| 554 | bb.fatal('Not all mount paths and units are installed in the image') | ||
| 555 | } | ||
| 556 | |||
| 557 | python overlayfs_postprocess() { | ||
| 558 | import shutil | ||
| 559 | |||
| 560 | # install helper script | ||
| 561 | helperScriptName = "overlayfs-create-dirs.sh" | ||
| 562 | helperScriptSource = oe.path.join(d.getVar("COREBASE"), "meta/files", helperScriptName) | ||
| 563 | helperScriptDest = oe.path.join(d.getVar("IMAGE_ROOTFS"), "/usr/sbin/", helperScriptName) | ||
| 564 | shutil.copyfile(helperScriptSource, helperScriptDest) | ||
| 565 | os.chmod(helperScriptDest, 0o755) | ||
| 566 | } | ||
diff --git a/meta/classes-recipe/rootfs_deb.bbclass b/meta/classes-recipe/rootfs_deb.bbclass deleted file mode 100644 index c5c6426abb..0000000000 --- a/meta/classes-recipe/rootfs_deb.bbclass +++ /dev/null | |||
| @@ -1,41 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright 2006-2007 Openedhand Ltd. | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | ROOTFS_PKGMANAGE = "dpkg apt" | ||
| 8 | |||
| 9 | do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot" | ||
| 10 | do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot" | ||
| 11 | do_rootfs[recrdeptask] += "do_package_write_deb do_package_qa" | ||
| 12 | do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS" | ||
| 13 | |||
| 14 | do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock" | ||
| 15 | do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock" | ||
| 16 | do_populate_sdk_ext[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock" | ||
| 17 | |||
| 18 | python rootfs_deb_bad_recommendations() { | ||
| 19 | if d.getVar("BAD_RECOMMENDATIONS"): | ||
| 20 | bb.warn("Debian package install does not support BAD_RECOMMENDATIONS") | ||
| 21 | } | ||
| 22 | do_rootfs[prefuncs] += "rootfs_deb_bad_recommendations" | ||
| 23 | |||
| 24 | DEB_POSTPROCESS_COMMANDS = "" | ||
| 25 | |||
| 26 | opkglibdir = "${localstatedir}/lib/opkg" | ||
| 27 | |||
| 28 | python () { | ||
| 29 | # Map TARGET_ARCH to Debian's ideas about architectures | ||
| 30 | darch = d.getVar('SDK_ARCH') | ||
| 31 | if darch in ["x86", "i486", "i586", "i686", "pentium"]: | ||
| 32 | d.setVar('DEB_SDK_ARCH', 'i386') | ||
| 33 | elif darch == "x86_64": | ||
| 34 | d.setVar('DEB_SDK_ARCH', 'amd64') | ||
| 35 | elif darch == "arm": | ||
| 36 | d.setVar('DEB_SDK_ARCH', 'armel') | ||
| 37 | elif darch == "aarch64": | ||
| 38 | d.setVar('DEB_SDK_ARCH', 'arm64') | ||
| 39 | else: | ||
| 40 | bb.fatal("Unhandled SDK_ARCH %s" % darch) | ||
| 41 | } | ||
diff --git a/meta/classes-recipe/rootfs_ipk.bbclass b/meta/classes-recipe/rootfs_ipk.bbclass deleted file mode 100644 index 87fff53a58..0000000000 --- a/meta/classes-recipe/rootfs_ipk.bbclass +++ /dev/null | |||
| @@ -1,44 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # Creates a root filesystem out of IPKs | ||
| 9 | # | ||
| 10 | # This rootfs can be mounted via root-nfs or it can be put into an cramfs/jffs etc. | ||
| 11 | # See image.bbclass for a usage of this. | ||
| 12 | # | ||
| 13 | |||
| 14 | EXTRAOPKGCONFIG ?= "" | ||
| 15 | ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}" | ||
| 16 | |||
| 17 | do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot" | ||
| 18 | do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot" | ||
| 19 | do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa" | ||
| 20 | do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS" | ||
| 21 | |||
| 22 | do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock" | ||
| 23 | do_populate_sdk[lockfiles] += "${WORKDIR}/sdk-ipk.lock" | ||
| 24 | do_populate_sdk_ext[lockfiles] += "${WORKDIR}/sdk-ipk.lock" | ||
| 25 | |||
| 26 | OPKG_PREPROCESS_COMMANDS = "" | ||
| 27 | |||
| 28 | OPKG_POSTPROCESS_COMMANDS = "" | ||
| 29 | |||
| 30 | OPKGLIBDIR ??= "${localstatedir}/lib" | ||
| 31 | |||
| 32 | MULTILIBRE_ALLOW_REP += "${OPKGLIBDIR}/opkg /usr/lib/opkg" | ||
| 33 | |||
| 34 | python () { | ||
| 35 | |||
| 36 | if d.getVar('BUILD_IMAGES_FROM_FEEDS'): | ||
| 37 | flags = d.getVarFlag('do_rootfs', 'recrdeptask') | ||
| 38 | flags = flags.replace("do_package_write_ipk", "") | ||
| 39 | flags = flags.replace("do_deploy", "") | ||
| 40 | flags = flags.replace("do_populate_sysroot", "") | ||
| 41 | d.setVarFlag('do_rootfs', 'recrdeptask', flags) | ||
| 42 | d.setVar('OPKG_PREPROCESS_COMMANDS', "") | ||
| 43 | d.setVar('OPKG_POSTPROCESS_COMMANDS', '') | ||
| 44 | } | ||
diff --git a/meta/classes-recipe/rootfs_rpm.bbclass b/meta/classes-recipe/rootfs_rpm.bbclass deleted file mode 100644 index 55f1cc92ca..0000000000 --- a/meta/classes-recipe/rootfs_rpm.bbclass +++ /dev/null | |||
| @@ -1,43 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # | ||
| 8 | # Creates a root filesystem out of rpm packages | ||
| 9 | # | ||
| 10 | |||
| 11 | ROOTFS_PKGMANAGE = "rpm dnf" | ||
| 12 | |||
| 13 | # dnf is using our custom sysconfig module, and so will fail without these | ||
| 14 | export STAGING_INCDIR | ||
| 15 | export STAGING_LIBDIR | ||
| 16 | |||
| 17 | # Add 100Meg of extra space for dnf | ||
| 18 | IMAGE_ROOTFS_EXTRA_SPACE:append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "", d)}" | ||
| 19 | |||
| 20 | # Dnf is python based, so be sure python3-native is available to us. | ||
| 21 | EXTRANATIVEPATH += "python3-native" | ||
| 22 | |||
| 23 | RPMROOTFSDEPENDS = "rpm-native:do_populate_sysroot \ | ||
| 24 | dnf-native:do_populate_sysroot \ | ||
| 25 | createrepo-c-native:do_populate_sysroot" | ||
| 26 | |||
| 27 | do_rootfs[depends] += "${RPMROOTFSDEPENDS}" | ||
| 28 | do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}" | ||
| 29 | |||
| 30 | do_rootfs[recrdeptask] += "do_package_write_rpm do_package_qa" | ||
| 31 | do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS" | ||
| 32 | |||
| 33 | python () { | ||
| 34 | if d.getVar('BUILD_IMAGES_FROM_FEEDS'): | ||
| 35 | flags = d.getVarFlag('do_rootfs', 'recrdeptask') | ||
| 36 | flags = flags.replace("do_package_write_rpm", "") | ||
| 37 | flags = flags.replace("do_deploy", "") | ||
| 38 | flags = flags.replace("do_populate_sysroot", "") | ||
| 39 | d.setVarFlag('do_rootfs', 'recrdeptask', flags) | ||
| 40 | d.setVar('RPM_PREPROCESS_COMMANDS', '') | ||
| 41 | d.setVar('RPM_POSTPROCESS_COMMANDS', '') | ||
| 42 | |||
| 43 | } | ||
diff --git a/meta/classes-recipe/rootfsdebugfiles.bbclass b/meta/classes-recipe/rootfsdebugfiles.bbclass deleted file mode 100644 index 4c2fc1de25..0000000000 --- a/meta/classes-recipe/rootfsdebugfiles.bbclass +++ /dev/null | |||
| @@ -1,47 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # This class installs additional files found on the build host | ||
| 8 | # directly into the rootfs. | ||
| 9 | # | ||
| 10 | # One use case is to install a constant ssh host key in | ||
| 11 | # an image that gets created for just one machine. This | ||
| 12 | # solves two issues: | ||
| 13 | # - host key generation on the device can stall when the | ||
| 14 | # kernel has not gathered enough entropy yet (seen in practice | ||
| 15 | # under qemu) | ||
| 16 | # - ssh complains by default when the host key changes | ||
| 17 | # | ||
| 18 | # For dropbear, with the ssh host key store along side the local.conf: | ||
| 19 | # 1. Extend local.conf: | ||
| 20 | # INHERIT += "rootfsdebugfiles" | ||
| 21 | # ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ;" | ||
| 22 | # 2. Boot the image once, copy the dropbear_rsa_host_key from | ||
| 23 | # the device into your build conf directory. | ||
| 24 | # 3. A optional parameter can be used to set file mode | ||
| 25 | # of the copied target, for instance: | ||
| 26 | # ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key 0600;" | ||
| 27 | # in case they might be required to have a specific mode. (Shoundn't be too open, for example) | ||
| 28 | # | ||
| 29 | # Do not use for production images! It bypasses several | ||
| 30 | # core build mechanisms (updating the image when one | ||
| 31 | # of the files changes, license tracking in the image | ||
| 32 | # manifest, ...). | ||
| 33 | |||
| 34 | ROOTFS_DEBUG_FILES ?= "" | ||
| 35 | ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'" | ||
| 36 | |||
| 37 | ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files" | ||
| 38 | rootfs_debug_files () { | ||
| 39 | #!/bin/sh -e | ||
| 40 | echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do | ||
| 41 | if [ -e "$source" ]; then | ||
| 42 | mkdir -p $(dirname $target) | ||
| 43 | cp -a $source $target | ||
| 44 | [ -n "$mode" ] && chmod $mode $target | ||
| 45 | fi | ||
| 46 | done | ||
| 47 | } | ||
diff --git a/meta/classes-recipe/rust-common.bbclass b/meta/classes-recipe/rust-common.bbclass deleted file mode 100644 index 31331c7a26..0000000000 --- a/meta/classes-recipe/rust-common.bbclass +++ /dev/null | |||
| @@ -1,196 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit python3native | ||
| 8 | inherit rust-target-config | ||
| 9 | |||
| 10 | # Common variables used by all Rust builds | ||
| 11 | export rustlibdir = "${libdir}/rustlib/${RUST_HOST_SYS}/lib" | ||
| 12 | FILES:${PN} += "${rustlibdir}/*.so" | ||
| 13 | FILES:${PN}-dev += "${rustlibdir}/*.rlib ${rustlibdir}/*.rmeta" | ||
| 14 | FILES:${PN}-dbg += "${rustlibdir}/.debug" | ||
| 15 | |||
| 16 | RUSTLIB ?= "-L ${STAGING_DIR_HOST}${rustlibdir}" | ||
| 17 | RUST_DEBUG_REMAP = "--remap-path-prefix=${WORKDIR}=${TARGET_DBGSRC_DIR}" | ||
| 18 | RUSTFLAGS += "${RUSTLIB} ${RUST_DEBUG_REMAP}" | ||
| 19 | RUSTLIB_DEP ??= "libstd-rs" | ||
| 20 | RUST_PANIC_STRATEGY ??= "unwind" | ||
| 21 | |||
| 22 | def target_is_armv7(d): | ||
| 23 | '''Determine if target is armv7''' | ||
| 24 | # TUNE_FEATURES may include arm* even if the target is not arm | ||
| 25 | # in the case of *-native packages | ||
| 26 | if d.getVar('TARGET_ARCH') != 'arm': | ||
| 27 | return False | ||
| 28 | |||
| 29 | feat = d.getVar('TUNE_FEATURES') | ||
| 30 | feat = frozenset(feat.split()) | ||
| 31 | mach_overrides = d.getVar('MACHINEOVERRIDES') | ||
| 32 | mach_overrides = frozenset(mach_overrides.split(':')) | ||
| 33 | |||
| 34 | v7=frozenset(['armv7a', 'armv7r', 'armv7m', 'armv7ve']) | ||
| 35 | if mach_overrides.isdisjoint(v7) and feat.isdisjoint(v7): | ||
| 36 | return False | ||
| 37 | else: | ||
| 38 | return True | ||
| 39 | target_is_armv7[vardepvalue] = "${@target_is_armv7(d)}" | ||
| 40 | |||
| 41 | # Responsible for taking Yocto triples and converting it to Rust triples | ||
| 42 | def rust_base_triple(d, thing): | ||
| 43 | ''' | ||
| 44 | Mangle bitbake's *_SYS into something that rust might support (see | ||
| 45 | rust/mk/cfg/* for a list) | ||
| 46 | |||
| 47 | Note that os is assumed to be some linux form | ||
| 48 | ''' | ||
| 49 | |||
| 50 | # The llvm-target for armv7 is armv7-unknown-linux-gnueabihf | ||
| 51 | if d.getVar('{}_ARCH'.format(thing)) == d.getVar('TARGET_ARCH') and target_is_armv7(d): | ||
| 52 | arch = "armv7" | ||
| 53 | else: | ||
| 54 | arch = oe.rust.arch_to_rust_arch(d.getVar('{}_ARCH'.format(thing))) | ||
| 55 | |||
| 56 | # Substituting "unknown" when vendor is empty will match rust's standard | ||
| 57 | # targets when building native recipes (including rust-native itself) | ||
| 58 | vendor = d.getVar('{}_VENDOR'.format(thing)) or "-unknown" | ||
| 59 | |||
| 60 | # Default to glibc | ||
| 61 | libc = "-gnu" | ||
| 62 | os = d.getVar('{}_OS'.format(thing)) | ||
| 63 | # This catches ARM targets and appends the necessary hard float bits | ||
| 64 | if os == "linux-gnueabi" or os == "linux-musleabi": | ||
| 65 | libc = bb.utils.contains('TUNE_FEATURES', 'callconvention-hard', 'hf', '', d) | ||
| 66 | elif os == "linux-gnux32" or os == "linux-muslx32": | ||
| 67 | libc = "" | ||
| 68 | elif "musl" in os: | ||
| 69 | libc = "-musl" | ||
| 70 | os = "linux" | ||
| 71 | elif "elf" in os: | ||
| 72 | libc = "-elf" | ||
| 73 | os = "none" | ||
| 74 | elif "eabi" in os: | ||
| 75 | libc = "-eabi" | ||
| 76 | os = "none" | ||
| 77 | |||
| 78 | return arch + vendor + '-' + os + libc | ||
| 79 | |||
| 80 | |||
| 81 | # In some cases uname and the toolchain differ on their idea of the arch name | ||
| 82 | RUST_BUILD_ARCH = "${@oe.rust.arch_to_rust_arch(d.getVar('BUILD_ARCH'))}" | ||
| 83 | |||
| 84 | # Naming explanation | ||
| 85 | # Yocto | ||
| 86 | # - BUILD_SYS - Yocto triple of the build environment | ||
| 87 | # - HOST_SYS - What we're building for in Yocto | ||
| 88 | # - TARGET_SYS - What we're building for in Yocto | ||
| 89 | # | ||
| 90 | # So when building '-native' packages BUILD_SYS == HOST_SYS == TARGET_SYS | ||
| 91 | # When building packages for the image HOST_SYS == TARGET_SYS | ||
| 92 | # This is a gross over simplification as there are other modes but | ||
| 93 | # currently this is all that's supported. | ||
| 94 | # | ||
| 95 | # Rust | ||
| 96 | # - TARGET - the system where the binary will run | ||
| 97 | # - HOST - the system where the binary is being built | ||
| 98 | # | ||
| 99 | # Rust additionally will use two additional cases: | ||
| 100 | # - undecorated (e.g. CC) - equivalent to TARGET | ||
| 101 | # - triple suffix (e.g. CC:x86_64_unknown_linux_gnu) - both | ||
| 102 | # see: https://github.com/rust-lang/cc-rs | ||
| 103 | # The way that Rust's internal triples and Yocto triples are mapped together | ||
| 104 | # its likely best to not use the triple suffix due to potential confusion. | ||
| 105 | |||
| 106 | RUST_BUILD_SYS = "${@rust_base_triple(d, 'BUILD')}" | ||
| 107 | RUST_BUILD_SYS[vardepvalue] = "${RUST_BUILD_SYS}" | ||
| 108 | RUST_HOST_SYS = "${@rust_base_triple(d, 'HOST')}" | ||
| 109 | RUST_HOST_SYS[vardepvalue] = "${RUST_HOST_SYS}" | ||
| 110 | RUST_TARGET_SYS = "${@rust_base_triple(d, 'TARGET')}" | ||
| 111 | RUST_TARGET_SYS[vardepvalue] = "${RUST_TARGET_SYS}" | ||
| 112 | |||
| 113 | # wrappers to get around the fact that Rust needs a single | ||
| 114 | # binary but Yocto's compiler and linker commands have | ||
| 115 | # arguments. Technically the archiver is always one command but | ||
| 116 | # this is necessary for builds that determine the prefix and then | ||
| 117 | # use those commands based on the prefix. | ||
| 118 | WRAPPER_DIR = "${WORKDIR}/wrapper" | ||
| 119 | RUST_BUILD_CC = "${WRAPPER_DIR}/build-rust-cc" | ||
| 120 | RUST_BUILD_CXX = "${WRAPPER_DIR}/build-rust-cxx" | ||
| 121 | RUST_BUILD_CCLD = "${WRAPPER_DIR}/build-rust-ccld" | ||
| 122 | RUST_BUILD_AR = "${WRAPPER_DIR}/build-rust-ar" | ||
| 123 | RUST_TARGET_CC = "${WRAPPER_DIR}/target-rust-cc" | ||
| 124 | RUST_TARGET_CXX = "${WRAPPER_DIR}/target-rust-cxx" | ||
| 125 | RUST_TARGET_CCLD = "${WRAPPER_DIR}/target-rust-ccld" | ||
| 126 | RUST_TARGET_AR = "${WRAPPER_DIR}/target-rust-ar" | ||
| 127 | |||
| 128 | create_wrapper_rust () { | ||
| 129 | file="$1" | ||
| 130 | shift | ||
| 131 | extras="$1" | ||
| 132 | shift | ||
| 133 | crate_cc_extras="$1" | ||
| 134 | shift | ||
| 135 | |||
| 136 | cat <<- EOF > "${file}" | ||
| 137 | #!/usr/bin/env python3 | ||
| 138 | import os, sys | ||
| 139 | orig_binary = "$@" | ||
| 140 | extras = "${extras}" | ||
| 141 | |||
| 142 | # Apply a required subset of CC crate compiler flags | ||
| 143 | # when we build a target recipe for a non-bare-metal target. | ||
| 144 | # https://github.com/rust-lang/cc-rs/blob/main/src/lib.rs#L1614 | ||
| 145 | if "CRATE_CC_NO_DEFAULTS" in os.environ.keys() and \ | ||
| 146 | "TARGET" in os.environ.keys() and not "-none-" in os.environ["TARGET"]: | ||
| 147 | orig_binary += "${crate_cc_extras}" | ||
| 148 | |||
| 149 | binary = orig_binary.split()[0] | ||
| 150 | args = orig_binary.split() + sys.argv[1:] | ||
| 151 | if extras: | ||
| 152 | args.append(extras) | ||
| 153 | os.execvp(binary, args) | ||
| 154 | EOF | ||
| 155 | chmod +x "${file}" | ||
| 156 | } | ||
| 157 | |||
| 158 | WRAPPER_TARGET_CC = "${CC}" | ||
| 159 | WRAPPER_TARGET_CXX = "${CXX}" | ||
| 160 | WRAPPER_TARGET_CCLD = "${CCLD}" | ||
| 161 | WRAPPER_TARGET_LDFLAGS = "${LDFLAGS}" | ||
| 162 | WRAPPER_TARGET_EXTRALD = "" | ||
| 163 | # see recipes-devtools/gcc/gcc/0018-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch | ||
| 164 | # we need to link with ssp_nonshared on musl to avoid "undefined reference to `__stack_chk_fail_local'" | ||
| 165 | # when building MACHINE=qemux86 for musl | ||
| 166 | WRAPPER_TARGET_EXTRALD:libc-musl = "-lssp_nonshared" | ||
| 167 | WRAPPER_TARGET_AR = "${AR}" | ||
| 168 | |||
| 169 | # compiler is used by gcc-rs | ||
| 170 | # linker is used by rustc/cargo | ||
| 171 | # archiver is used by the build of libstd-rs | ||
| 172 | do_rust_create_wrappers () { | ||
| 173 | mkdir -p "${WRAPPER_DIR}" | ||
| 174 | |||
| 175 | # Yocto Build / Rust Host C compiler | ||
| 176 | create_wrapper_rust "${RUST_BUILD_CC}" "" "${CRATE_CC_FLAGS}" "${BUILD_CC}" "${BUILD_LDFLAGS}" | ||
| 177 | # Yocto Build / Rust Host C++ compiler | ||
| 178 | create_wrapper_rust "${RUST_BUILD_CXX}" "" "${CRATE_CC_FLAGS}" "${BUILD_CXX}" | ||
| 179 | # Yocto Build / Rust Host linker | ||
| 180 | create_wrapper_rust "${RUST_BUILD_CCLD}" "" "" "${BUILD_CCLD}" "${BUILD_LDFLAGS}" | ||
| 181 | # Yocto Build / Rust Host archiver | ||
| 182 | create_wrapper_rust "${RUST_BUILD_AR}" "" "" "${BUILD_AR}" | ||
| 183 | |||
| 184 | # Yocto Target / Rust Target C compiler | ||
| 185 | create_wrapper_rust "${RUST_TARGET_CC}" "${WRAPPER_TARGET_EXTRALD}" "${CRATE_CC_FLAGS}" "${WRAPPER_TARGET_CC}" "${WRAPPER_TARGET_LDFLAGS}" | ||
| 186 | # Yocto Target / Rust Target C++ compiler | ||
| 187 | create_wrapper_rust "${RUST_TARGET_CXX}" "${WRAPPER_TARGET_EXTRALD}" "${CRATE_CC_FLAGS}" "${WRAPPER_TARGET_CXX}" "${CXXFLAGS}" | ||
| 188 | # Yocto Target / Rust Target linker | ||
| 189 | create_wrapper_rust "${RUST_TARGET_CCLD}" "${WRAPPER_TARGET_EXTRALD}" "" "${WRAPPER_TARGET_CCLD}" "${WRAPPER_TARGET_LDFLAGS}" | ||
| 190 | # Yocto Target / Rust Target archiver | ||
| 191 | create_wrapper_rust "${RUST_TARGET_AR}" "" "" "${WRAPPER_TARGET_AR}" | ||
| 192 | |||
| 193 | } | ||
| 194 | |||
| 195 | addtask rust_create_wrappers before do_configure after do_patch do_prepare_recipe_sysroot | ||
| 196 | do_rust_create_wrappers[dirs] += "${WRAPPER_DIR}" | ||
diff --git a/meta/classes-recipe/rust-target-config.bbclass b/meta/classes-recipe/rust-target-config.bbclass deleted file mode 100644 index 9ce57843cf..0000000000 --- a/meta/classes-recipe/rust-target-config.bbclass +++ /dev/null | |||
| @@ -1,468 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Right now this is focused on arm-specific tune features. | ||
| 8 | # We get away with this for now as one can only use x86-64 as the build host | ||
| 9 | # (not arm). | ||
| 10 | # Note that TUNE_FEATURES is _always_ refering to the target, so we really | ||
| 11 | # don't want to use this for the host/build. | ||
| 12 | def llvm_features_from_tune(d): | ||
| 13 | f = [] | ||
| 14 | feat = d.getVar('TUNE_FEATURES') | ||
| 15 | if not feat: | ||
| 16 | return [] | ||
| 17 | feat = frozenset(feat.split()) | ||
| 18 | |||
| 19 | mach_overrides = d.getVar('MACHINEOVERRIDES') | ||
| 20 | mach_overrides = frozenset(mach_overrides.split(':')) | ||
| 21 | |||
| 22 | if 'vfpv4' in feat: | ||
| 23 | f.append("+vfp4") | ||
| 24 | elif 'vfpv4d16' in feat: | ||
| 25 | f.append("+vfp4") | ||
| 26 | f.append("-d32") | ||
| 27 | elif 'vfpv3' in feat: | ||
| 28 | f.append("+vfp3") | ||
| 29 | elif 'vfpv3d16' in feat: | ||
| 30 | f.append("+vfp3") | ||
| 31 | f.append("-d32") | ||
| 32 | elif 'vfpv2' in feat or 'vfp' in feat: | ||
| 33 | f.append("+vfp2") | ||
| 34 | |||
| 35 | if 'neon' in feat: | ||
| 36 | f.append("+neon") | ||
| 37 | elif target_is_armv7(d): | ||
| 38 | f.append("-neon") | ||
| 39 | |||
| 40 | if 'mips32' in feat: | ||
| 41 | f.append("+mips32") | ||
| 42 | |||
| 43 | if 'mips32r2' in feat: | ||
| 44 | f.append("+mips32r2") | ||
| 45 | |||
| 46 | if target_is_armv7(d): | ||
| 47 | f.append('+v7') | ||
| 48 | |||
| 49 | if ('armv6' in mach_overrides) or ('armv6' in feat): | ||
| 50 | f.append("+v6") | ||
| 51 | if 'armv5te' in feat: | ||
| 52 | f.append("+strict-align") | ||
| 53 | f.append("+v5te") | ||
| 54 | elif 'armv5' in feat: | ||
| 55 | f.append("+strict-align") | ||
| 56 | f.append("+v5") | ||
| 57 | |||
| 58 | if ('armv4' in mach_overrides) or ('armv4' in feat): | ||
| 59 | f.append("+strict-align") | ||
| 60 | |||
| 61 | if 'dsp' in feat: | ||
| 62 | f.append("+dsp") | ||
| 63 | |||
| 64 | if 'thumb' in feat: | ||
| 65 | if d.getVar('ARM_THUMB_OPT') == "thumb": | ||
| 66 | if target_is_armv7(d): | ||
| 67 | f.append('+thumb2') | ||
| 68 | f.append("+thumb-mode") | ||
| 69 | |||
| 70 | if 'cortexa5' in feat: | ||
| 71 | f.append("+a5") | ||
| 72 | if 'cortexa7' in feat: | ||
| 73 | f.append("+a7") | ||
| 74 | if 'cortexa9' in feat: | ||
| 75 | f.append("+a9") | ||
| 76 | if 'cortexa15' in feat: | ||
| 77 | f.append("+a15") | ||
| 78 | if 'cortexa17' in feat: | ||
| 79 | f.append("+a17") | ||
| 80 | if 'rv' in feat: | ||
| 81 | if 'm' in feat: | ||
| 82 | f.append("+m") | ||
| 83 | if 'a' in feat: | ||
| 84 | f.append("+a") | ||
| 85 | if 'f' in feat: | ||
| 86 | f.append("+f") | ||
| 87 | if 'd' in feat: | ||
| 88 | f.append("+d") | ||
| 89 | if 'c' in feat: | ||
| 90 | f.append("+c") | ||
| 91 | if 'v' in feat: | ||
| 92 | f.append("+v") | ||
| 93 | if 'zicbom' in feat: | ||
| 94 | f.append("+zicbom") | ||
| 95 | if 'zicsr' in feat: | ||
| 96 | f.append("+zicsr") | ||
| 97 | if 'zifencei' in feat: | ||
| 98 | f.append("+zifencei") | ||
| 99 | if 'zba' in feat: | ||
| 100 | f.append("+zba") | ||
| 101 | if 'zbb' in feat: | ||
| 102 | f.append("+zbb") | ||
| 103 | if 'zbc' in feat: | ||
| 104 | f.append("+zbc") | ||
| 105 | if 'zbs' in feat: | ||
| 106 | f.append("+zbs") | ||
| 107 | return f | ||
| 108 | llvm_features_from_tune[vardepvalue] = "${@llvm_features_from_tune(d)}" | ||
| 109 | |||
| 110 | # TARGET_CC_ARCH changes from build/cross/target so it'll do the right thing | ||
| 111 | # this should go away when https://github.com/rust-lang/rust/pull/31709 is | ||
| 112 | # stable (1.9.0?) | ||
| 113 | def llvm_features_from_cc_arch(d): | ||
| 114 | f = [] | ||
| 115 | feat = d.getVar('TARGET_CC_ARCH') | ||
| 116 | if not feat: | ||
| 117 | return [] | ||
| 118 | feat = frozenset(feat.split()) | ||
| 119 | |||
| 120 | if '-mmmx' in feat: | ||
| 121 | f.append("+mmx") | ||
| 122 | if '-msse' in feat: | ||
| 123 | f.append("+sse") | ||
| 124 | if '-msse2' in feat: | ||
| 125 | f.append("+sse2") | ||
| 126 | if '-msse3' in feat: | ||
| 127 | f.append("+sse3") | ||
| 128 | if '-mssse3' in feat: | ||
| 129 | f.append("+ssse3") | ||
| 130 | if '-msse4.1' in feat: | ||
| 131 | f.append("+sse4.1") | ||
| 132 | if '-msse4.2' in feat: | ||
| 133 | f.append("+sse4.2") | ||
| 134 | if '-msse4a' in feat: | ||
| 135 | f.append("+sse4a") | ||
| 136 | if '-mavx' in feat: | ||
| 137 | f.append("+avx") | ||
| 138 | if '-mavx2' in feat: | ||
| 139 | f.append("+avx2") | ||
| 140 | |||
| 141 | return f | ||
| 142 | |||
| 143 | def llvm_features_from_target_fpu(d): | ||
| 144 | # TARGET_FPU can be hard or soft. +soft-float tell llvm to use soft float | ||
| 145 | # ABI. There is no option for hard. | ||
| 146 | |||
| 147 | fpu = d.getVar('TARGET_FPU') | ||
| 148 | return ["+soft-float"] if fpu == "soft" else [] | ||
| 149 | |||
| 150 | def llvm_features(d): | ||
| 151 | return ','.join(llvm_features_from_tune(d) + | ||
| 152 | llvm_features_from_cc_arch(d) + | ||
| 153 | llvm_features_from_target_fpu(d)) | ||
| 154 | |||
| 155 | llvm_features[vardepvalue] = "${@llvm_features(d)}" | ||
| 156 | |||
| 157 | ## arm-unknown-linux-gnueabihf | ||
| 158 | DATA_LAYOUT[arm-eabi] = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" | ||
| 159 | TARGET_ENDIAN[arm-eabi] = "little" | ||
| 160 | TARGET_POINTER_WIDTH[arm-eabi] = "32" | ||
| 161 | TARGET_C_INT_WIDTH[arm-eabi] = "32" | ||
| 162 | MAX_ATOMIC_WIDTH[arm-eabi] = "64" | ||
| 163 | FEATURES[arm-eabi] = "+v6,+vfp2" | ||
| 164 | |||
| 165 | ## armv7-unknown-linux-gnueabihf | ||
| 166 | DATA_LAYOUT[armv7-eabi] = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" | ||
| 167 | TARGET_ENDIAN[armv7-eabi] = "little" | ||
| 168 | TARGET_POINTER_WIDTH[armv7-eabi] = "32" | ||
| 169 | TARGET_C_INT_WIDTH[armv7-eabi] = "32" | ||
| 170 | MAX_ATOMIC_WIDTH[armv7-eabi] = "64" | ||
| 171 | FEATURES[armv7-eabi] = "+v7,+vfp2,+thumb2" | ||
| 172 | |||
| 173 | ## aarch64-unknown-linux-{gnu, musl} | ||
| 174 | DATA_LAYOUT[aarch64] = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32" | ||
| 175 | TARGET_ENDIAN[aarch64] = "little" | ||
| 176 | TARGET_POINTER_WIDTH[aarch64] = "64" | ||
| 177 | TARGET_C_INT_WIDTH[aarch64] = "32" | ||
| 178 | MAX_ATOMIC_WIDTH[aarch64] = "128" | ||
| 179 | |||
| 180 | ## x86_64-unknown-linux-{gnu, musl} | ||
| 181 | DATA_LAYOUT[x86_64] = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" | ||
| 182 | TARGET_ENDIAN[x86_64] = "little" | ||
| 183 | TARGET_POINTER_WIDTH[x86_64] = "64" | ||
| 184 | TARGET_C_INT_WIDTH[x86_64] = "32" | ||
| 185 | MAX_ATOMIC_WIDTH[x86_64] = "64" | ||
| 186 | |||
| 187 | ## x86_64-unknown-linux-gnux32 | ||
| 188 | DATA_LAYOUT[x86_64-x32] = "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" | ||
| 189 | TARGET_ENDIAN[x86_64-x32] = "little" | ||
| 190 | TARGET_POINTER_WIDTH[x86_64-x32] = "32" | ||
| 191 | TARGET_C_INT_WIDTH[x86_64-x32] = "32" | ||
| 192 | MAX_ATOMIC_WIDTH[x86_64-x32] = "64" | ||
| 193 | |||
| 194 | ## i686-unknown-linux-{gnu, musl} | ||
| 195 | DATA_LAYOUT[i686] = "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128" | ||
| 196 | TARGET_ENDIAN[i686] = "little" | ||
| 197 | TARGET_POINTER_WIDTH[i686] = "32" | ||
| 198 | TARGET_C_INT_WIDTH[i686] = "32" | ||
| 199 | MAX_ATOMIC_WIDTH[i686] = "64" | ||
| 200 | |||
| 201 | ## XXX: a bit of a hack so qemux86 builds, clone of i686-unknown-linux-{gnu, musl} above | ||
| 202 | DATA_LAYOUT[i586] = "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128" | ||
| 203 | TARGET_ENDIAN[i586] = "little" | ||
| 204 | TARGET_POINTER_WIDTH[i586] = "32" | ||
| 205 | TARGET_C_INT_WIDTH[i586] = "32" | ||
| 206 | MAX_ATOMIC_WIDTH[i586] = "64" | ||
| 207 | |||
| 208 | ## mips-unknown-linux-{gnu, musl} | ||
| 209 | DATA_LAYOUT[mips] = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64" | ||
| 210 | TARGET_ENDIAN[mips] = "big" | ||
| 211 | TARGET_POINTER_WIDTH[mips] = "32" | ||
| 212 | TARGET_C_INT_WIDTH[mips] = "32" | ||
| 213 | MAX_ATOMIC_WIDTH[mips] = "32" | ||
| 214 | |||
| 215 | ## mipsel-unknown-linux-{gnu, musl} | ||
| 216 | DATA_LAYOUT[mipsel] = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64" | ||
| 217 | TARGET_ENDIAN[mipsel] = "little" | ||
| 218 | TARGET_POINTER_WIDTH[mipsel] = "32" | ||
| 219 | TARGET_C_INT_WIDTH[mipsel] = "32" | ||
| 220 | MAX_ATOMIC_WIDTH[mipsel] = "32" | ||
| 221 | |||
| 222 | ## mips64-unknown-linux-{gnu, musl} | ||
| 223 | DATA_LAYOUT[mips64] = "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128" | ||
| 224 | TARGET_ENDIAN[mips64] = "big" | ||
| 225 | TARGET_POINTER_WIDTH[mips64] = "64" | ||
| 226 | TARGET_C_INT_WIDTH[mips64] = "32" | ||
| 227 | MAX_ATOMIC_WIDTH[mips64] = "64" | ||
| 228 | |||
| 229 | ## mips64-n32-unknown-linux-{gnu, musl} | ||
| 230 | DATA_LAYOUT[mips64-n32] = "E-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32:64-S128" | ||
| 231 | TARGET_ENDIAN[mips64-n32] = "big" | ||
| 232 | TARGET_POINTER_WIDTH[mips64-n32] = "32" | ||
| 233 | TARGET_C_INT_WIDTH[mips64-n32] = "32" | ||
| 234 | MAX_ATOMIC_WIDTH[mips64-n32] = "64" | ||
| 235 | |||
| 236 | ## mips64el-unknown-linux-{gnu, musl} | ||
| 237 | DATA_LAYOUT[mips64el] = "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128" | ||
| 238 | TARGET_ENDIAN[mips64el] = "little" | ||
| 239 | TARGET_POINTER_WIDTH[mips64el] = "64" | ||
| 240 | TARGET_C_INT_WIDTH[mips64el] = "32" | ||
| 241 | MAX_ATOMIC_WIDTH[mips64el] = "64" | ||
| 242 | |||
| 243 | ## powerpc-unknown-linux-{gnu, musl} | ||
| 244 | DATA_LAYOUT[powerpc] = "E-m:e-p:32:32-Fn32-i64:64-n32" | ||
| 245 | TARGET_ENDIAN[powerpc] = "big" | ||
| 246 | TARGET_POINTER_WIDTH[powerpc] = "32" | ||
| 247 | TARGET_C_INT_WIDTH[powerpc] = "32" | ||
| 248 | MAX_ATOMIC_WIDTH[powerpc] = "32" | ||
| 249 | |||
| 250 | ## powerpc64-unknown-linux-{gnu, musl} | ||
| 251 | DATA_LAYOUT[powerpc64] = "E-m:e-Fi64-i64:64-i128:128-n32:64-S128-v256:256:256-v512:512:512" | ||
| 252 | TARGET_ENDIAN[powerpc64] = "big" | ||
| 253 | TARGET_POINTER_WIDTH[powerpc64] = "64" | ||
| 254 | TARGET_C_INT_WIDTH[powerpc64] = "32" | ||
| 255 | MAX_ATOMIC_WIDTH[powerpc64] = "64" | ||
| 256 | |||
| 257 | ## powerpc64le-unknown-linux-{gnu, musl} | ||
| 258 | DATA_LAYOUT[powerpc64le] = "e-m:e-Fn32-i64:64-i128:128-n32:64-S128-v256:256:256-v512:512:512" | ||
| 259 | TARGET_ENDIAN[powerpc64le] = "little" | ||
| 260 | TARGET_POINTER_WIDTH[powerpc64le] = "64" | ||
| 261 | TARGET_C_INT_WIDTH[powerpc64le] = "32" | ||
| 262 | MAX_ATOMIC_WIDTH[powerpc64le] = "64" | ||
| 263 | |||
| 264 | ## riscv32-unknown-linux-{gnu, musl} | ||
| 265 | DATA_LAYOUT[riscv32] = "e-m:e-p:32:32-i64:64-n32-S128" | ||
| 266 | TARGET_ENDIAN[riscv32] = "little" | ||
| 267 | TARGET_POINTER_WIDTH[riscv32] = "32" | ||
| 268 | TARGET_C_INT_WIDTH[riscv32] = "32" | ||
| 269 | MAX_ATOMIC_WIDTH[riscv32] = "32" | ||
| 270 | |||
| 271 | ## riscv64-unknown-linux-{gnu, musl} | ||
| 272 | DATA_LAYOUT[riscv64] = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128" | ||
| 273 | TARGET_ENDIAN[riscv64] = "little" | ||
| 274 | TARGET_POINTER_WIDTH[riscv64] = "64" | ||
| 275 | TARGET_C_INT_WIDTH[riscv64] = "32" | ||
| 276 | MAX_ATOMIC_WIDTH[riscv64] = "64" | ||
| 277 | |||
| 278 | ## loongarch64-unknown-linux-{gnu, musl} | ||
| 279 | DATA_LAYOUT[loongarch64] = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128" | ||
| 280 | TARGET_ENDIAN[loongarch64] = "little" | ||
| 281 | TARGET_POINTER_WIDTH[loongarch64] = "64" | ||
| 282 | TARGET_C_INT_WIDTH[loongarch64] = "32" | ||
| 283 | MAX_ATOMIC_WIDTH[loongarch64] = "64" | ||
| 284 | FEATURES[loongarch64] = "+d" | ||
| 285 | |||
| 286 | # Convert a normal arch (HOST_ARCH, TARGET_ARCH, BUILD_ARCH, etc) to something | ||
| 287 | # rust's internals won't choke on. | ||
| 288 | def arch_to_rust_target_arch(arch): | ||
| 289 | if arch == "i586" or arch == "i686": | ||
| 290 | return "x86" | ||
| 291 | elif arch == "mipsel": | ||
| 292 | return "mips" | ||
| 293 | elif arch == "mip64sel": | ||
| 294 | return "mips64" | ||
| 295 | elif arch == "armv7": | ||
| 296 | return "arm" | ||
| 297 | elif arch == "powerpc64le": | ||
| 298 | return "powerpc64" | ||
| 299 | else: | ||
| 300 | return arch | ||
| 301 | |||
| 302 | # Convert a rust target string to a llvm-compatible triplet | ||
| 303 | def rust_sys_to_llvm_target(sys): | ||
| 304 | return sys | ||
| 305 | |||
| 306 | # generates our target CPU value | ||
| 307 | def llvm_cpu(d): | ||
| 308 | cpu = d.getVar('PACKAGE_ARCH') | ||
| 309 | target = d.getVar('TRANSLATED_TARGET_ARCH') | ||
| 310 | |||
| 311 | trans = {} | ||
| 312 | trans['corei7-64'] = "corei7" | ||
| 313 | trans['core2-32'] = "core2" | ||
| 314 | trans['x86-64'] = "x86-64" | ||
| 315 | trans['i686'] = "i686" | ||
| 316 | trans['i586'] = "i586" | ||
| 317 | trans['mips64'] = "mips64" | ||
| 318 | trans['mips64el'] = "mips64" | ||
| 319 | trans['powerpc64le'] = "ppc64le" | ||
| 320 | trans['powerpc64'] = "ppc64" | ||
| 321 | trans['riscv64'] = "generic-rv64" | ||
| 322 | trans['riscv32'] = "generic-rv32" | ||
| 323 | trans['loongarch64'] = "la464" | ||
| 324 | |||
| 325 | if target in ["mips", "mipsel", "powerpc"]: | ||
| 326 | feat = frozenset(d.getVar('TUNE_FEATURES').split()) | ||
| 327 | if "mips32r2" in feat: | ||
| 328 | trans['mipsel'] = "mips32r2" | ||
| 329 | trans['mips'] = "mips32r2" | ||
| 330 | elif "mips32" in feat: | ||
| 331 | trans['mipsel'] = "mips32" | ||
| 332 | trans['mips'] = "mips32" | ||
| 333 | elif "ppc7400" in feat: | ||
| 334 | trans['powerpc'] = "7400" | ||
| 335 | |||
| 336 | try: | ||
| 337 | return trans[cpu] | ||
| 338 | except: | ||
| 339 | return trans.get(target, "generic") | ||
| 340 | |||
| 341 | llvm_cpu[vardepvalue] = "${@llvm_cpu(d)}" | ||
| 342 | |||
| 343 | def rust_gen_target(d, thing, wd, arch): | ||
| 344 | import json | ||
| 345 | |||
| 346 | build_sys = d.getVar('BUILD_SYS') | ||
| 347 | target_sys = d.getVar('TARGET_SYS') | ||
| 348 | |||
| 349 | sys = d.getVar('{}_SYS'.format(thing)) | ||
| 350 | prefix = d.getVar('{}_PREFIX'.format(thing)) | ||
| 351 | rustsys = d.getVar('RUST_{}_SYS'.format(thing)) | ||
| 352 | os = d.getVar('{}_OS'.format(thing)) | ||
| 353 | |||
| 354 | abi = None | ||
| 355 | cpu = "generic" | ||
| 356 | features = "" | ||
| 357 | |||
| 358 | # Need to apply the target tuning consitently, only if the triplet applies to the target | ||
| 359 | # and not in the native case | ||
| 360 | if sys == target_sys and sys != build_sys: | ||
| 361 | abi = d.getVar('ABIEXTENSION') | ||
| 362 | cpu = llvm_cpu(d) | ||
| 363 | if bb.data.inherits_class('native', d): | ||
| 364 | features = ','.join(llvm_features_from_cc_arch(d)) | ||
| 365 | else: | ||
| 366 | features = llvm_features(d) or "" | ||
| 367 | # arm and armv7 have different targets in llvm | ||
| 368 | if arch == "arm" and target_is_armv7(d): | ||
| 369 | arch = 'armv7' | ||
| 370 | |||
| 371 | rust_arch = oe.rust.arch_to_rust_arch(arch) | ||
| 372 | |||
| 373 | if abi: | ||
| 374 | arch_abi = "{}-{}".format(rust_arch, abi) | ||
| 375 | else: | ||
| 376 | arch_abi = rust_arch | ||
| 377 | |||
| 378 | features = features or d.getVarFlag('FEATURES', arch_abi) or "" | ||
| 379 | features = features.strip() | ||
| 380 | |||
| 381 | # build tspec | ||
| 382 | tspec = {} | ||
| 383 | tspec['llvm-target'] = rust_sys_to_llvm_target(rustsys) | ||
| 384 | tspec['data-layout'] = d.getVarFlag('DATA_LAYOUT', arch_abi) | ||
| 385 | if tspec['data-layout'] is None: | ||
| 386 | bb.fatal("No rust target defined for %s" % arch_abi) | ||
| 387 | tspec['max-atomic-width'] = int(d.getVarFlag('MAX_ATOMIC_WIDTH', arch_abi)) | ||
| 388 | tspec['target-pointer-width'] = d.getVarFlag('TARGET_POINTER_WIDTH', arch_abi) | ||
| 389 | tspec['target-c-int-width'] = int(d.getVarFlag('TARGET_C_INT_WIDTH', arch_abi)) | ||
| 390 | tspec['target-endian'] = d.getVarFlag('TARGET_ENDIAN', arch_abi) | ||
| 391 | tspec['arch'] = arch_to_rust_target_arch(rust_arch) | ||
| 392 | if "elf" in os: | ||
| 393 | tspec['os'] = "none" | ||
| 394 | else: | ||
| 395 | tspec['os'] = "linux" | ||
| 396 | if "musl" in tspec['llvm-target']: | ||
| 397 | tspec['env'] = "musl" | ||
| 398 | else: | ||
| 399 | tspec['env'] = "gnu" | ||
| 400 | if "riscv64" in tspec['llvm-target']: | ||
| 401 | tspec['llvm-abiname'] = d.getVar('TUNE_RISCV_ABI') | ||
| 402 | if "riscv32" in tspec['llvm-target']: | ||
| 403 | tspec['llvm-abiname'] = d.getVar('TUNE_RISCV_ABI') | ||
| 404 | if "loongarch64" in tspec['llvm-target']: | ||
| 405 | tspec['llvm-abiname'] = "lp64d" | ||
| 406 | if "powerpc64le" in tspec['llvm-target']: | ||
| 407 | tspec['llvm-abiname'] = "elfv2" | ||
| 408 | elif "powerpc64" in tspec['llvm-target']: | ||
| 409 | tspec['llvm-abiname'] = "elfv1" | ||
| 410 | tspec['vendor'] = "unknown" | ||
| 411 | tspec['target-family'] = "unix" | ||
| 412 | tspec['linker'] = "{}{}gcc".format(d.getVar('CCACHE'), prefix) | ||
| 413 | tspec['cpu'] = cpu | ||
| 414 | if features != "": | ||
| 415 | tspec['features'] = features | ||
| 416 | fpu = d.getVar('TARGET_FPU') | ||
| 417 | if fpu in ["soft", "softfp"]: | ||
| 418 | tspec['llvm-floatabi'] = "soft" | ||
| 419 | elif fpu == "hard": | ||
| 420 | tspec['llvm-floatabi'] = "hard" | ||
| 421 | tspec['dynamic-linking'] = True | ||
| 422 | tspec['executables'] = True | ||
| 423 | tspec['linker-is-gnu'] = True | ||
| 424 | tspec['linker-flavor'] = "gcc" | ||
| 425 | tspec['has-rpath'] = True | ||
| 426 | tspec['has-thread-local'] = True | ||
| 427 | tspec['position-independent-executables'] = True | ||
| 428 | tspec['panic-strategy'] = d.getVar("RUST_PANIC_STRATEGY") | ||
| 429 | |||
| 430 | # write out the target spec json file | ||
| 431 | with open(wd + rustsys + '.json', 'w') as f: | ||
| 432 | json.dump(tspec, f, indent=4) | ||
| 433 | |||
| 434 | # These are accounted for in tmpdir path names so don't need to be in the task sig | ||
| 435 | rust_gen_target[vardepsexclude] += "ABIEXTENSION llvm_cpu" | ||
| 436 | |||
| 437 | do_rust_gen_targets[vardeps] += "DATA_LAYOUT TARGET_ENDIAN TARGET_POINTER_WIDTH TARGET_C_INT_WIDTH MAX_ATOMIC_WIDTH FEATURES" | ||
| 438 | |||
| 439 | RUST_TARGETS_DIR = "${WORKDIR}/rust-targets/" | ||
| 440 | export RUST_TARGET_PATH = "${RUST_TARGETS_DIR}" | ||
| 441 | |||
| 442 | python do_rust_gen_targets () { | ||
| 443 | wd = d.getVar('RUST_TARGETS_DIR') | ||
| 444 | # Order of BUILD, HOST, TARGET is important in case the files overwrite, most specific last | ||
| 445 | rust_gen_target(d, 'BUILD', wd, d.getVar('BUILD_ARCH')) | ||
| 446 | rust_gen_target(d, 'HOST', wd, d.getVar('HOST_ARCH')) | ||
| 447 | rust_gen_target(d, 'TARGET', wd, d.getVar('TARGET_ARCH')) | ||
| 448 | } | ||
| 449 | |||
| 450 | addtask rust_gen_targets after do_patch before do_configure | ||
| 451 | do_rust_gen_targets[dirs] += "${RUST_TARGETS_DIR}" | ||
| 452 | |||
| 453 | # For building target C dependecies use only compiler parameters defined in OE | ||
| 454 | # and ignore the CC crate defaults which conflicts with OE ones in some cases. | ||
| 455 | # https://github.com/rust-lang/cc-rs#external-configuration-via-environment-variables | ||
| 456 | # Some CC crate compiler flags are still required. | ||
| 457 | # We apply them conditionally in rust wrappers. | ||
| 458 | |||
| 459 | CRATE_CC_FLAGS:class-native = "" | ||
| 460 | CRATE_CC_FLAGS:class-nativesdk = "" | ||
| 461 | CRATE_CC_FLAGS:class-target = " -ffunction-sections -fdata-sections -fPIC" | ||
| 462 | |||
| 463 | do_compile:prepend:class-target() { | ||
| 464 | export CRATE_CC_NO_DEFAULTS=1 | ||
| 465 | } | ||
| 466 | do_install:prepend:class-target() { | ||
| 467 | export CRATE_CC_NO_DEFAULTS=1 | ||
| 468 | } | ||
diff --git a/meta/classes-recipe/rust.bbclass b/meta/classes-recipe/rust.bbclass deleted file mode 100644 index e727601679..0000000000 --- a/meta/classes-recipe/rust.bbclass +++ /dev/null | |||
| @@ -1,51 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit rust-common | ||
| 8 | |||
| 9 | RUSTC = "rustc" | ||
| 10 | |||
| 11 | RUSTC_ARCHFLAGS += "--target=${RUST_HOST_SYS} ${RUSTFLAGS}" | ||
| 12 | |||
| 13 | def rust_base_dep(d): | ||
| 14 | # Taken from meta/classes/base.bbclass `base_dep_prepend` and modified to | ||
| 15 | # use rust instead of gcc | ||
| 16 | deps = "" | ||
| 17 | if not d.getVar('INHIBIT_DEFAULT_RUST_DEPS'): | ||
| 18 | if (d.getVar('HOST_SYS') != d.getVar('BUILD_SYS')): | ||
| 19 | deps += " rust-native ${RUSTLIB_DEP}" | ||
| 20 | else: | ||
| 21 | deps += " rust-native" | ||
| 22 | return deps | ||
| 23 | |||
| 24 | DEPENDS:append = " ${@rust_base_dep(d)}" | ||
| 25 | |||
| 26 | # BUILD_LDFLAGS | ||
| 27 | # ${STAGING_LIBDIR_NATIVE} | ||
| 28 | # ${STAGING_BASE_LIBDIR_NATIVE} | ||
| 29 | # BUILDSDK_LDFLAGS | ||
| 30 | # ${STAGING_LIBDIR} | ||
| 31 | # #{STAGING_DIR_HOST} | ||
| 32 | # TARGET_LDFLAGS ????? | ||
| 33 | #RUSTC_BUILD_LDFLAGS = "\ | ||
| 34 | # --sysroot ${STAGING_DIR_NATIVE} \ | ||
| 35 | # -L${STAGING_LIBDIR_NATIVE} \ | ||
| 36 | # -L${STAGING_BASE_LIBDIR_NATIVE} \ | ||
| 37 | #" | ||
| 38 | |||
| 39 | # XXX: for some reason bitbake sets BUILD_* & TARGET_* but uses the bare | ||
| 40 | # variables for HOST. Alias things to make it easier for us. | ||
| 41 | HOST_LDFLAGS ?= "${LDFLAGS}" | ||
| 42 | HOST_CFLAGS ?= "${CFLAGS}" | ||
| 43 | HOST_CXXFLAGS ?= "${CXXFLAGS}" | ||
| 44 | HOST_CPPFLAGS ?= "${CPPFLAGS}" | ||
| 45 | |||
| 46 | rustlib_suffix = "${TUNE_ARCH}${TARGET_VENDOR}-${TARGET_OS}/rustlib/${RUST_HOST_SYS}/lib" | ||
| 47 | # Native sysroot standard library path | ||
| 48 | rustlib_src = "${prefix}/lib/${rustlib_suffix}" | ||
| 49 | # Host sysroot standard library path | ||
| 50 | rustlib = "${libdir}/${rustlib_suffix}" | ||
| 51 | rustlib:class-native = "${libdir}/rustlib/${BUILD_SYS}/lib" | ||
diff --git a/meta/classes-recipe/scons.bbclass b/meta/classes-recipe/scons.bbclass deleted file mode 100644 index 1cb375522d..0000000000 --- a/meta/classes-recipe/scons.bbclass +++ /dev/null | |||
| @@ -1,40 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit python3native | ||
| 8 | |||
| 9 | DEPENDS += "python3-scons-native" | ||
| 10 | |||
| 11 | EXTRA_OESCONS ?= "" | ||
| 12 | # This value below is derived from $(getconf ARG_MAX) | ||
| 13 | SCONS_MAXLINELENGTH ?= "MAXLINELENGTH=2097152" | ||
| 14 | EXTRA_OESCONS:append = " ${SCONS_MAXLINELENGTH}" | ||
| 15 | scons_do_configure() { | ||
| 16 | if [ -n "${CONFIGURESTAMPFILE}" -a "${S}" = "${B}" ]; then | ||
| 17 | if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then | ||
| 18 | ${STAGING_BINDIR_NATIVE}/scons --directory=${S} --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} | ||
| 19 | fi | ||
| 20 | |||
| 21 | mkdir -p `dirname ${CONFIGURESTAMPFILE}` | ||
| 22 | echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE} | ||
| 23 | fi | ||
| 24 | } | ||
| 25 | |||
| 26 | scons_do_compile() { | ||
| 27 | ${STAGING_BINDIR_NATIVE}/scons --directory=${S} ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \ | ||
| 28 | die "scons build execution failed." | ||
| 29 | } | ||
| 30 | |||
| 31 | scons_do_install() { | ||
| 32 | ${STAGING_BINDIR_NATIVE}/scons --directory=${S} install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \ | ||
| 33 | die "scons install execution failed." | ||
| 34 | } | ||
| 35 | |||
| 36 | do_configure[vardepsexclude] = "SCONS_MAXLINELENGTH" | ||
| 37 | do_compile[vardepsexclude] = "SCONS_MAXLINELENGTH" | ||
| 38 | do_install[vardepsexclude] = "SCONS_MAXLINELENGTH" | ||
| 39 | |||
| 40 | EXPORT_FUNCTIONS do_configure do_compile do_install | ||
diff --git a/meta/classes-recipe/setuptools3-base.bbclass b/meta/classes-recipe/setuptools3-base.bbclass deleted file mode 100644 index 190d9e6e3a..0000000000 --- a/meta/classes-recipe/setuptools3-base.bbclass +++ /dev/null | |||
| @@ -1,34 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | DEPENDS:append:class-target = " python3-native python3" | ||
| 8 | DEPENDS:append:class-nativesdk = " python3-native python3" | ||
| 9 | RDEPENDS:${PN}:append:class-target = " python3-core" | ||
| 10 | |||
| 11 | export STAGING_INCDIR | ||
| 12 | export STAGING_LIBDIR | ||
| 13 | |||
| 14 | # LDSHARED is the ld *command* used to create shared library | ||
| 15 | export LDSHARED = "${CCLD} -shared" | ||
| 16 | # LDXXSHARED is the ld *command* used to create shared library of C++ | ||
| 17 | # objects | ||
| 18 | export LDCXXSHARED = "${CXX} -shared" | ||
| 19 | # CCSHARED are the C *flags* used to create objects to go into a shared | ||
| 20 | # library (module) | ||
| 21 | export CCSHARED = "-fPIC -DPIC" | ||
| 22 | # LINKFORSHARED are the flags passed to the $(CC) command that links | ||
| 23 | # the python executable | ||
| 24 | export LINKFORSHARED = "${SECURITY_CFLAGS} -Xlinker -export-dynamic" | ||
| 25 | |||
| 26 | # The environment variable SETUPTOOLS_SCM_SUBPROCESS_TIMEOUT allows | ||
| 27 | # to override the subprocess timeout. | ||
| 28 | export SETUPTOOLS_SCM_SUBPROCESS_TIMEOUT ??= "600" | ||
| 29 | |||
| 30 | FILES:${PN} += "${PYTHON_SITEPACKAGES_DIR}" | ||
| 31 | FILES:${PN}-staticdev += "${PYTHON_SITEPACKAGES_DIR}/*.a" | ||
| 32 | FILES:${PN}-dev += "${PYTHON_SITEPACKAGES_DIR}/*.la" | ||
| 33 | |||
| 34 | inherit python3native python3targetconfig | ||
diff --git a/meta/classes-recipe/setuptools3.bbclass b/meta/classes-recipe/setuptools3.bbclass deleted file mode 100644 index 0bbe378023..0000000000 --- a/meta/classes-recipe/setuptools3.bbclass +++ /dev/null | |||
| @@ -1,58 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit setuptools3-base python_pep517 | ||
| 8 | |||
| 9 | DEPENDS += "python3-setuptools-native python3-wheel-native" | ||
| 10 | |||
| 11 | SETUPTOOLS_BUILD_ARGS ?= "" | ||
| 12 | |||
| 13 | SETUPTOOLS_SETUP_PATH ?= "${S}" | ||
| 14 | |||
| 15 | python do_check_backend() { | ||
| 16 | import re | ||
| 17 | filename = d.expand("${SETUPTOOLS_SETUP_PATH}/pyproject.toml") | ||
| 18 | if os.path.exists(filename): | ||
| 19 | for line in open(filename): | ||
| 20 | match = re.match(r"build-backend\s*=\s*\W([\w.]+)\W", line) | ||
| 21 | if not match: continue | ||
| 22 | |||
| 23 | msg = f"inherits setuptools3 but has pyproject.toml with {match[1]}, use the correct class" | ||
| 24 | if "pep517-backend" not in (d.getVar("INSANE_SKIP") or "").split(): | ||
| 25 | oe.qa.handle_error("pep517-backend", msg, d) | ||
| 26 | oe.qa.exit_if_errors(d) | ||
| 27 | } | ||
| 28 | addtask check_backend after do_patch before do_configure | ||
| 29 | |||
| 30 | setuptools3_do_configure() { | ||
| 31 | : | ||
| 32 | } | ||
| 33 | # This isn't nice, but is the best solutions to ensure clean builds for now. | ||
| 34 | # https://github.com/pypa/setuptools/issues/4732 | ||
| 35 | do_configure[cleandirs] = "${SETUPTOOLS_SETUP_PATH}/build" | ||
| 36 | |||
| 37 | setuptools3_do_compile() { | ||
| 38 | cd ${SETUPTOOLS_SETUP_PATH} | ||
| 39 | |||
| 40 | export STAGING_INCDIR=${STAGING_INCDIR} | ||
| 41 | export STAGING_LIBDIR=${STAGING_LIBDIR} | ||
| 42 | |||
| 43 | nativepython3 setup.py --verbose \ | ||
| 44 | build ${@oe.utils.parallel_make_argument(d, "-j %d")} \ | ||
| 45 | bdist_wheel --dist-dir ${PEP517_WHEEL_PATH} \ | ||
| 46 | ${SETUPTOOLS_BUILD_ARGS} | ||
| 47 | } | ||
| 48 | setuptools3_do_compile[vardepsexclude] = "MACHINE" | ||
| 49 | do_compile[cleandirs] += "${PEP517_WHEEL_PATH}" | ||
| 50 | |||
| 51 | # This could be removed in the future but some recipes in meta-oe still use it | ||
| 52 | setuptools3_do_install() { | ||
| 53 | python_pep517_do_install | ||
| 54 | } | ||
| 55 | |||
| 56 | EXPORT_FUNCTIONS do_configure do_compile do_install | ||
| 57 | |||
| 58 | export LDSHARED = "${CCLD} -shared" | ||
diff --git a/meta/classes-recipe/setuptools3_legacy.bbclass b/meta/classes-recipe/setuptools3_legacy.bbclass deleted file mode 100644 index 6b51b9796b..0000000000 --- a/meta/classes-recipe/setuptools3_legacy.bbclass +++ /dev/null | |||
| @@ -1,98 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # This class is for packages which use the deprecated setuptools behaviour, | ||
| 8 | # specifically custom install tasks which don't work correctly with bdist_wheel. | ||
| 9 | # This behaviour is deprecated in setuptools[1] and won't work in the future, so | ||
| 10 | # all users of this should consider their options: pure Python modules can use a | ||
| 11 | # modern Python tool such as build[2], or packages which are doing more (such as | ||
| 12 | # installing init scripts) should use a fully-featured build system such as Meson. | ||
| 13 | # | ||
| 14 | # [1] https://setuptools.pypa.io/en/latest/history.html#id142 | ||
| 15 | # [2] https://pypi.org/project/build/ | ||
| 16 | |||
| 17 | inherit setuptools3-base | ||
| 18 | |||
| 19 | B = "${WORKDIR}/build" | ||
| 20 | do_configure[cleandirs] = "${B}" | ||
| 21 | |||
| 22 | SETUPTOOLS_BUILD_ARGS ?= "" | ||
| 23 | SETUPTOOLS_INSTALL_ARGS ?= "--root=${D} \ | ||
| 24 | --prefix=${prefix} \ | ||
| 25 | --install-lib=${PYTHON_SITEPACKAGES_DIR} \ | ||
| 26 | --install-data=${datadir}" | ||
| 27 | |||
| 28 | SETUPTOOLS_PYTHON = "python3" | ||
| 29 | SETUPTOOLS_PYTHON:class-native = "nativepython3" | ||
| 30 | |||
| 31 | SETUPTOOLS_SETUP_PATH ?= "${S}" | ||
| 32 | |||
| 33 | python do_check_backend() { | ||
| 34 | import re | ||
| 35 | filename = d.expand("${SETUPTOOLS_SETUP_PATH}/pyproject.toml") | ||
| 36 | if os.path.exists(filename): | ||
| 37 | for line in open(filename): | ||
| 38 | match = re.match(r"build-backend\s*=\s*\W([\w.]+)\W", line) | ||
| 39 | if not match: continue | ||
| 40 | |||
| 41 | msg = f"inherits setuptools3_legacy but has pyproject.toml with {match[1]}, use the correct class" | ||
| 42 | if "pep517-backend" not in (d.getVar("INSANE_SKIP") or "").split(): | ||
| 43 | oe.qa.handle_error("pep517-backend", msg, d) | ||
| 44 | } | ||
| 45 | addtask check_backend after do_patch before do_configure | ||
| 46 | |||
| 47 | setuptools3_legacy_do_configure() { | ||
| 48 | : | ||
| 49 | } | ||
| 50 | |||
| 51 | setuptools3_legacy_do_compile() { | ||
| 52 | cd ${SETUPTOOLS_SETUP_PATH} | ||
| 53 | STAGING_INCDIR=${STAGING_INCDIR} \ | ||
| 54 | STAGING_LIBDIR=${STAGING_LIBDIR} \ | ||
| 55 | ${STAGING_BINDIR_NATIVE}/python3-native/python3 setup.py \ | ||
| 56 | build --build-base=${B} ${SETUPTOOLS_BUILD_ARGS} || \ | ||
| 57 | bbfatal_log "'python3 setup.py build ${SETUPTOOLS_BUILD_ARGS}' execution failed." | ||
| 58 | } | ||
| 59 | setuptools3_legacy_do_compile[vardepsexclude] = "MACHINE" | ||
| 60 | |||
| 61 | setuptools3_legacy_do_install() { | ||
| 62 | cd ${SETUPTOOLS_SETUP_PATH} | ||
| 63 | install -d ${D}${PYTHON_SITEPACKAGES_DIR} | ||
| 64 | STAGING_INCDIR=${STAGING_INCDIR} \ | ||
| 65 | STAGING_LIBDIR=${STAGING_LIBDIR} \ | ||
| 66 | PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR}:$PYTHONPATH \ | ||
| 67 | ${STAGING_BINDIR_NATIVE}/python3-native/python3 setup.py \ | ||
| 68 | build --build-base=${B} install --skip-build ${SETUPTOOLS_INSTALL_ARGS} || \ | ||
| 69 | bbfatal_log "'python3 setup.py install ${SETUPTOOLS_INSTALL_ARGS}' execution failed." | ||
| 70 | |||
| 71 | # support filenames with *spaces* | ||
| 72 | find ${D} -name "*.py" -exec grep -q ${D} {} \; \ | ||
| 73 | -exec sed -i -e s:${D}::g {} \; | ||
| 74 | |||
| 75 | for i in ${D}${bindir}/* ${D}${sbindir}/*; do | ||
| 76 | if [ -f "$i" ]; then | ||
| 77 | sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${SETUPTOOLS_PYTHON}:g $i | ||
| 78 | sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i | ||
| 79 | fi | ||
| 80 | done | ||
| 81 | |||
| 82 | rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth | ||
| 83 | |||
| 84 | # | ||
| 85 | # FIXME: Bandaid against wrong datadir computation | ||
| 86 | # | ||
| 87 | if [ -e ${D}${datadir}/share ]; then | ||
| 88 | mv -f ${D}${datadir}/share/* ${D}${datadir}/ | ||
| 89 | rmdir ${D}${datadir}/share | ||
| 90 | fi | ||
| 91 | } | ||
| 92 | setuptools3_legacy_do_install[vardepsexclude] = "MACHINE" | ||
| 93 | |||
| 94 | EXPORT_FUNCTIONS do_configure do_compile do_install | ||
| 95 | |||
| 96 | export LDSHARED = "${CCLD} -shared" | ||
| 97 | DEPENDS += "python3-setuptools-native" | ||
| 98 | |||
diff --git a/meta/classes-recipe/siteinfo.bbclass b/meta/classes-recipe/siteinfo.bbclass deleted file mode 100644 index 25b53d929a..0000000000 --- a/meta/classes-recipe/siteinfo.bbclass +++ /dev/null | |||
| @@ -1,226 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # This class exists to provide information about the targets that | ||
| 8 | # may be needed by other classes and/or recipes. If you add a new | ||
| 9 | # target this will probably need to be updated. | ||
| 10 | |||
| 11 | # | ||
| 12 | # Returns information about 'what' for the named target 'target' | ||
| 13 | # where 'target' == "<arch>-<os>" | ||
| 14 | # | ||
| 15 | # 'what' can be one of | ||
| 16 | # * target: Returns the target name ("<arch>-<os>") | ||
| 17 | # * endianness: Return "be" for big endian targets, "le" for little endian | ||
| 18 | # * bits: Returns the bit size of the target, either "32" or "64" | ||
| 19 | # * libc: Returns the name of the c library used by the target | ||
| 20 | # | ||
| 21 | # It is an error for the target not to exist. | ||
| 22 | # If 'what' doesn't exist then an empty value is returned | ||
| 23 | # | ||
| 24 | def siteinfo_data_for_machine(arch, os, d): | ||
| 25 | archinfo = { | ||
| 26 | "allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch | ||
| 27 | "aarch64": "endian-little bit-64 arm-common arm-64", | ||
| 28 | "aarch64_be": "endian-big bit-64 arm-common arm-64", | ||
| 29 | "arc": "endian-little bit-32 arc-common", | ||
| 30 | "arceb": "endian-big bit-32 arc-common", | ||
| 31 | "arm": "endian-little bit-32 arm-common arm-32", | ||
| 32 | "armeb": "endian-big bit-32 arm-common arm-32", | ||
| 33 | "avr32": "endian-big bit-32 avr32-common", | ||
| 34 | "bfin": "endian-little bit-32 bfin-common", | ||
| 35 | "epiphany": "endian-little bit-32", | ||
| 36 | "i386": "endian-little bit-32 ix86-common", | ||
| 37 | "i486": "endian-little bit-32 ix86-common", | ||
| 38 | "i586": "endian-little bit-32 ix86-common", | ||
| 39 | "i686": "endian-little bit-32 ix86-common", | ||
| 40 | "ia64": "endian-little bit-64", | ||
| 41 | "lm32": "endian-big bit-32", | ||
| 42 | "loongarch32": "endian-little bit-32 loongarch", | ||
| 43 | "loongarch64": "endian-little bit-64 loongarch", | ||
| 44 | "m68k": "endian-big bit-32", | ||
| 45 | "microblaze": "endian-big bit-32 microblaze-common", | ||
| 46 | "microblazeel": "endian-little bit-32 microblaze-common", | ||
| 47 | "mips": "endian-big bit-32 mips-common", | ||
| 48 | "mips64": "endian-big bit-64 mips-common", | ||
| 49 | "mips64el": "endian-little bit-64 mips-common", | ||
| 50 | "mipsisa64r6": "endian-big bit-64 mips-common", | ||
| 51 | "mipsisa64r6el": "endian-little bit-64 mips-common", | ||
| 52 | "mipsel": "endian-little bit-32 mips-common", | ||
| 53 | "mipsisa32r6": "endian-big bit-32 mips-common", | ||
| 54 | "mipsisa32r6el": "endian-little bit-32 mips-common", | ||
| 55 | "powerpc": "endian-big bit-32 powerpc-common", | ||
| 56 | "powerpcle": "endian-little bit-32 powerpc-common", | ||
| 57 | "nios2": "endian-little bit-32 nios2-common", | ||
| 58 | "powerpc64": "endian-big bit-64 powerpc-common", | ||
| 59 | "powerpc64le": "endian-little bit-64 powerpc-common", | ||
| 60 | "ppc": "endian-big bit-32 powerpc-common", | ||
| 61 | "ppc64": "endian-big bit-64 powerpc-common", | ||
| 62 | "ppc64le" : "endian-little bit-64 powerpc-common", | ||
| 63 | "riscv32": "endian-little bit-32 riscv-common", | ||
| 64 | "riscv64": "endian-little bit-64 riscv-common", | ||
| 65 | "sh3": "endian-little bit-32 sh-common", | ||
| 66 | "sh3eb": "endian-big bit-32 sh-common", | ||
| 67 | "sh4": "endian-little bit-32 sh-common", | ||
| 68 | "sh4eb": "endian-big bit-32 sh-common", | ||
| 69 | "sparc": "endian-big bit-32", | ||
| 70 | "viac3": "endian-little bit-32 ix86-common", | ||
| 71 | "x86_64": "endian-little", # bitinfo specified in targetinfo | ||
| 72 | } | ||
| 73 | osinfo = { | ||
| 74 | "darwin": "common-darwin", | ||
| 75 | "darwin9": "common-darwin", | ||
| 76 | "darwin19": "common-darwin", | ||
| 77 | "darwin21": "common-darwin", | ||
| 78 | "linux": "common-linux common-glibc", | ||
| 79 | "linux-gnu": "common-linux common-glibc", | ||
| 80 | "linux-gnu_ilp32": "common-linux common-glibc", | ||
| 81 | "linux-gnux32": "common-linux common-glibc", | ||
| 82 | "linux-gnun32": "common-linux common-glibc", | ||
| 83 | "linux-gnueabi": "common-linux common-glibc", | ||
| 84 | "linux-gnuspe": "common-linux common-glibc", | ||
| 85 | "linux-musl": "common-linux common-musl", | ||
| 86 | "linux-muslx32": "common-linux common-musl", | ||
| 87 | "linux-musleabi": "common-linux common-musl", | ||
| 88 | "linux-muslspe": "common-linux common-musl", | ||
| 89 | "uclinux-uclibc": "common-uclibc", | ||
| 90 | "cygwin": "common-cygwin", | ||
| 91 | "mingw32": "common-mingw", | ||
| 92 | } | ||
| 93 | targetinfo = { | ||
| 94 | "aarch64-linux-gnu": "aarch64-linux", | ||
| 95 | "aarch64_be-linux-gnu": "aarch64_be-linux", | ||
| 96 | "aarch64-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32", | ||
| 97 | "aarch64_be-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32", | ||
| 98 | "aarch64-linux-musl": "aarch64-linux", | ||
| 99 | "aarch64_be-linux-musl": "aarch64_be-linux", | ||
| 100 | "arm-linux-gnueabi": "arm-linux", | ||
| 101 | "arm-linux-musleabi": "arm-linux", | ||
| 102 | "armeb-linux-gnueabi": "armeb-linux", | ||
| 103 | "armeb-linux-musleabi": "armeb-linux", | ||
| 104 | "loongarch32-linux": "loongarch32-linux", | ||
| 105 | "loongarch64-linux": "loongarch64-linux", | ||
| 106 | "microblazeel-linux" : "microblaze-linux", | ||
| 107 | "microblazeel-linux-musl" : "microblaze-linux", | ||
| 108 | "mips-linux-musl": "mips-linux", | ||
| 109 | "mipsel-linux-musl": "mipsel-linux", | ||
| 110 | "mips64-linux-musl": "mips64-linux", | ||
| 111 | "mips64el-linux-musl": "mips64el-linux", | ||
| 112 | "mips64-linux-gnun32": "mips-linux bit-32", | ||
| 113 | "mips64el-linux-gnun32": "mipsel-linux bit-32", | ||
| 114 | "mipsisa64r6-linux-gnun32": "mipsisa32r6-linux bit-32", | ||
| 115 | "mipsisa64r6el-linux-gnun32": "mipsisa32r6el-linux bit-32", | ||
| 116 | "powerpc-linux": "powerpc32-linux powerpc32-linux-glibc", | ||
| 117 | "powerpc-linux-musl": "powerpc-linux powerpc32-linux powerpc32-linux-musl", | ||
| 118 | "powerpcle-linux": "powerpc32-linux powerpc32-linux-glibc", | ||
| 119 | "powerpcle-linux-musl": "powerpc-linux powerpc32-linux powerpc32-linux-musl", | ||
| 120 | "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux powerpc32-linux-glibc", | ||
| 121 | "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux powerpc32-linux-musl", | ||
| 122 | "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux powerpc64-linux-glibc", | ||
| 123 | "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux powerpc64-linux-musl", | ||
| 124 | "powerpc64-linux": "powerpc-linux powerpc64-linux powerpc64-linux-glibc", | ||
| 125 | "powerpc64-linux-musl": "powerpc-linux powerpc64-linux powerpc64-linux-musl", | ||
| 126 | "powerpc64le-linux": "powerpc-linux powerpc64-linux powerpc64-linux-glibc", | ||
| 127 | "powerpc64le-linux-musl": "powerpc-linux powerpc64-linux powerpc64-linux-musl", | ||
| 128 | "riscv32-linux": "riscv32-linux", | ||
| 129 | "riscv32-linux-musl": "riscv32-linux", | ||
| 130 | "riscv64-linux": "riscv64-linux", | ||
| 131 | "riscv64-linux-musl": "riscv64-linux", | ||
| 132 | "x86_64-cygwin": "bit-64", | ||
| 133 | "x86_64-darwin": "bit-64", | ||
| 134 | "x86_64-darwin9": "bit-64", | ||
| 135 | "x86_64-darwin19": "bit-64", | ||
| 136 | "x86_64-darwin21": "bit-64", | ||
| 137 | "x86_64-linux": "bit-64", | ||
| 138 | "x86_64-linux-musl": "x86_64-linux bit-64", | ||
| 139 | "x86_64-linux-muslx32": "bit-32 ix86-common x32-linux", | ||
| 140 | "x86_64-elf": "bit-64", | ||
| 141 | "x86_64-linux-gnu": "bit-64 x86_64-linux", | ||
| 142 | "x86_64-linux-gnux32": "bit-32 ix86-common x32-linux", | ||
| 143 | "x86_64-mingw32": "bit-64", | ||
| 144 | } | ||
| 145 | |||
| 146 | # Add in any extra user supplied data which may come from a BSP layer, removing the | ||
| 147 | # need to always change this class directly | ||
| 148 | extra_siteinfo = (d.getVar("SITEINFO_EXTRA_DATAFUNCS") or "").split() | ||
| 149 | for m in extra_siteinfo: | ||
| 150 | call = m + "(archinfo, osinfo, targetinfo, d)" | ||
| 151 | locs = { "archinfo" : archinfo, "osinfo" : osinfo, "targetinfo" : targetinfo, "d" : d} | ||
| 152 | archinfo, osinfo, targetinfo = bb.utils.better_eval(call, locs) | ||
| 153 | |||
| 154 | target = "%s-%s" % (arch, os) | ||
| 155 | |||
| 156 | sitedata = [] | ||
| 157 | if arch in archinfo: | ||
| 158 | sitedata.extend(archinfo[arch].split()) | ||
| 159 | if os in osinfo: | ||
| 160 | sitedata.extend(osinfo[os].split()) | ||
| 161 | if target in targetinfo: | ||
| 162 | sitedata.extend(targetinfo[target].split()) | ||
| 163 | sitedata.append(target) | ||
| 164 | sitedata.append("common") | ||
| 165 | |||
| 166 | bb.debug(1, "SITE files %s" % sitedata); | ||
| 167 | return sitedata | ||
| 168 | |||
| 169 | def siteinfo_data(d): | ||
| 170 | return siteinfo_data_for_machine(d.getVar("HOST_ARCH"), d.getVar("HOST_OS"), d) | ||
| 171 | |||
| 172 | python () { | ||
| 173 | sitedata = set(siteinfo_data(d)) | ||
| 174 | if "endian-little" in sitedata: | ||
| 175 | d.setVar("SITEINFO_ENDIANNESS", "le") | ||
| 176 | elif "endian-big" in sitedata: | ||
| 177 | d.setVar("SITEINFO_ENDIANNESS", "be") | ||
| 178 | else: | ||
| 179 | bb.error("Unable to determine endianness for architecture '%s'" % | ||
| 180 | d.getVar("HOST_ARCH")) | ||
| 181 | bb.fatal("Please add your architecture to siteinfo.bbclass") | ||
| 182 | |||
| 183 | if "bit-32" in sitedata: | ||
| 184 | d.setVar("SITEINFO_BITS", "32") | ||
| 185 | elif "bit-64" in sitedata: | ||
| 186 | d.setVar("SITEINFO_BITS", "64") | ||
| 187 | else: | ||
| 188 | bb.error("Unable to determine bit size for architecture '%s'" % | ||
| 189 | d.getVar("HOST_ARCH")) | ||
| 190 | bb.fatal("Please add your architecture to siteinfo.bbclass") | ||
| 191 | } | ||
| 192 | |||
| 193 | # Layers with siteconfig need to add a replacement path to this variable so the | ||
| 194 | # sstate isn't path specific | ||
| 195 | SITEINFO_PATHVARS = "COREBASE" | ||
| 196 | |||
| 197 | def siteinfo_get_files(d, sysrootcache=False): | ||
| 198 | sitedata = siteinfo_data(d) | ||
| 199 | sitefiles = [] | ||
| 200 | searched = [] | ||
| 201 | for path in d.getVar("BBPATH").split(":"): | ||
| 202 | for element in sitedata: | ||
| 203 | filename = os.path.join(path, "site", element) | ||
| 204 | if os.path.exists(filename): | ||
| 205 | searched.append(filename + ":True") | ||
| 206 | sitefiles.append(filename) | ||
| 207 | else: | ||
| 208 | searched.append(filename + ":False") | ||
| 209 | |||
| 210 | # Have to parameterise out hardcoded paths such as COREBASE for the main site files | ||
| 211 | for var in d.getVar("SITEINFO_PATHVARS").split(): | ||
| 212 | searched2 = [] | ||
| 213 | replace = os.path.normpath(d.getVar(var)) | ||
| 214 | for s in searched: | ||
| 215 | searched2.append(s.replace(replace, "${" + var + "}")) | ||
| 216 | searched = searched2 | ||
| 217 | |||
| 218 | if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d): | ||
| 219 | # We need sstate sigs for native/cross not to vary upon arch so we can't depend on the site files. | ||
| 220 | # In future we may want to depend upon all site files? | ||
| 221 | # This would show up as breaking sstatetests.SStateTests.test_sstate_32_64_same_hash for example | ||
| 222 | searched = [] | ||
| 223 | |||
| 224 | return sitefiles, searched | ||
| 225 | |||
| 226 | |||
diff --git a/meta/classes-recipe/sourceforge-releases.bbclass b/meta/classes-recipe/sourceforge-releases.bbclass deleted file mode 100644 index 0b5e5d0711..0000000000 --- a/meta/classes-recipe/sourceforge-releases.bbclass +++ /dev/null | |||
| @@ -1,2 +0,0 @@ | |||
| 1 | SOURCEFORGE_PROJECT ?= "${BPN}" | ||
| 2 | UPSTREAM_CHECK_URI = "https://sourceforge.net/projects/${SOURCEFORGE_PROJECT}/files/" | ||
diff --git a/meta/classes-recipe/syslinux.bbclass b/meta/classes-recipe/syslinux.bbclass deleted file mode 100644 index be3b898b4d..0000000000 --- a/meta/classes-recipe/syslinux.bbclass +++ /dev/null | |||
| @@ -1,194 +0,0 @@ | |||
| 1 | # syslinux.bbclass | ||
| 2 | # Copyright (C) 2004-2006, Advanced Micro Devices, Inc. | ||
| 3 | # SPDX-License-Identifier: MIT | ||
| 4 | |||
| 5 | # Provide syslinux specific functions for building bootable images. | ||
| 6 | |||
| 7 | # External variables | ||
| 8 | # ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional) | ||
| 9 | # ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional) | ||
| 10 | # ${AUTO_SYSLINUXMENU} - set this to 1 to enable creating an automatic menu | ||
| 11 | # ${LABELS} - a list of targets for the automatic config | ||
| 12 | # ${APPEND} - an override list of append strings for each label | ||
| 13 | # ${SYSLINUX_OPTS} - additional options to add to the syslinux file ';' delimited | ||
| 14 | # ${SYSLINUX_SPLASH} - A background for the vga boot menu if using the boot menu | ||
| 15 | # ${SYSLINUX_DEFAULT_CONSOLE} - set to "console=ttyX" to change kernel boot default console | ||
| 16 | # ${SYSLINUX_SERIAL} - Set an alternate serial port or turn off serial with empty string | ||
| 17 | # ${SYSLINUX_SERIAL_TTY} - Set alternate console=tty... kernel boot argument | ||
| 18 | # ${SYSLINUX_KERNEL_ARGS} - Add additional kernel arguments | ||
| 19 | |||
| 20 | do_bootimg[depends] += "${MLPREFIX}syslinux:do_populate_sysroot \ | ||
| 21 | syslinux-native:do_populate_sysroot" | ||
| 22 | |||
| 23 | ISOLINUXDIR ?= "/isolinux" | ||
| 24 | SYSLINUXDIR = "/" | ||
| 25 | # The kernel has an internal default console, which you can override with | ||
| 26 | # a console=...some_tty... | ||
| 27 | SYSLINUX_DEFAULT_CONSOLE ?= "" | ||
| 28 | SYSLINUX_SERIAL ?= "0 115200" | ||
| 29 | SYSLINUX_SERIAL_TTY ?= "console=ttyS0,115200" | ||
| 30 | SYSLINUX_PROMPT ?= "0" | ||
| 31 | SYSLINUX_TIMEOUT ?= "50" | ||
| 32 | AUTO_SYSLINUXMENU ?= "1" | ||
| 33 | SYSLINUX_ALLOWOPTIONS ?= "1" | ||
| 34 | SYSLINUX_ROOT ?= "${ROOT}" | ||
| 35 | SYSLINUX_CFG_VM ?= "${S}/syslinux_vm.cfg" | ||
| 36 | SYSLINUX_CFG_LIVE ?= "${S}/syslinux_live.cfg" | ||
| 37 | APPEND ?= "" | ||
| 38 | |||
| 39 | # Need UUID utility code. | ||
| 40 | inherit fs-uuid | ||
| 41 | |||
| 42 | syslinux_populate() { | ||
| 43 | DEST=$1 | ||
| 44 | BOOTDIR=$2 | ||
| 45 | CFGNAME=$3 | ||
| 46 | |||
| 47 | install -d ${DEST}${BOOTDIR} | ||
| 48 | |||
| 49 | # Install the config files | ||
| 50 | install -m 0644 ${SYSLINUX_CFG} ${DEST}${BOOTDIR}/${CFGNAME} | ||
| 51 | if [ "${AUTO_SYSLINUXMENU}" = 1 ] ; then | ||
| 52 | install -m 0644 ${STAGING_DATADIR}/syslinux/vesamenu.c32 ${DEST}${BOOTDIR}/vesamenu.c32 | ||
| 53 | install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${DEST}${BOOTDIR}/libcom32.c32 | ||
| 54 | install -m 0444 ${STAGING_DATADIR}/syslinux/libutil.c32 ${DEST}${BOOTDIR}/libutil.c32 | ||
| 55 | if [ "${SYSLINUX_SPLASH}" != "" ] ; then | ||
| 56 | install -m 0644 ${SYSLINUX_SPLASH} ${DEST}${BOOTDIR}/splash.lss | ||
| 57 | fi | ||
| 58 | fi | ||
| 59 | } | ||
| 60 | |||
| 61 | syslinux_iso_populate() { | ||
| 62 | iso_dir=$1 | ||
| 63 | syslinux_populate $iso_dir ${ISOLINUXDIR} isolinux.cfg | ||
| 64 | install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin $iso_dir${ISOLINUXDIR} | ||
| 65 | install -m 0644 ${STAGING_DATADIR}/syslinux/ldlinux.c32 $iso_dir${ISOLINUXDIR} | ||
| 66 | } | ||
| 67 | |||
| 68 | syslinux_hddimg_populate() { | ||
| 69 | hdd_dir=$1 | ||
| 70 | syslinux_populate $hdd_dir ${SYSLINUXDIR} syslinux.cfg | ||
| 71 | install -m 0444 ${STAGING_DATADIR}/syslinux/ldlinux.sys $hdd_dir${SYSLINUXDIR}/ldlinux.sys | ||
| 72 | } | ||
| 73 | |||
| 74 | syslinux_hddimg_install() { | ||
| 75 | syslinux ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg | ||
| 76 | } | ||
| 77 | |||
| 78 | python build_syslinux_cfg () { | ||
| 79 | import copy | ||
| 80 | import sys | ||
| 81 | |||
| 82 | workdir = d.getVar('WORKDIR') | ||
| 83 | if not workdir: | ||
| 84 | bb.error("WORKDIR not defined, unable to package") | ||
| 85 | return | ||
| 86 | |||
| 87 | labels = d.getVar('LABELS') | ||
| 88 | if not labels: | ||
| 89 | bb.debug(1, "LABELS not defined, nothing to do") | ||
| 90 | return | ||
| 91 | |||
| 92 | if labels == []: | ||
| 93 | bb.debug(1, "No labels, nothing to do") | ||
| 94 | return | ||
| 95 | |||
| 96 | cfile = d.getVar('SYSLINUX_CFG') | ||
| 97 | if not cfile: | ||
| 98 | bb.fatal('Unable to read SYSLINUX_CFG') | ||
| 99 | |||
| 100 | try: | ||
| 101 | cfgfile = open(cfile, 'w') | ||
| 102 | except OSError: | ||
| 103 | bb.fatal('Unable to open %s' % cfile) | ||
| 104 | |||
| 105 | cfgfile.write('# Automatically created by OE\n') | ||
| 106 | |||
| 107 | opts = d.getVar('SYSLINUX_OPTS') | ||
| 108 | |||
| 109 | if opts: | ||
| 110 | for opt in opts.split(';'): | ||
| 111 | cfgfile.write('%s\n' % opt) | ||
| 112 | |||
| 113 | allowoptions = d.getVar('SYSLINUX_ALLOWOPTIONS') | ||
| 114 | if allowoptions: | ||
| 115 | cfgfile.write('ALLOWOPTIONS %s\n' % allowoptions) | ||
| 116 | else: | ||
| 117 | cfgfile.write('ALLOWOPTIONS 1\n') | ||
| 118 | |||
| 119 | syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE') | ||
| 120 | syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY') | ||
| 121 | syslinux_serial = d.getVar('SYSLINUX_SERIAL') | ||
| 122 | if syslinux_serial: | ||
| 123 | cfgfile.write('SERIAL %s\n' % syslinux_serial) | ||
| 124 | |||
| 125 | menu = (d.getVar('AUTO_SYSLINUXMENU') == "1") | ||
| 126 | |||
| 127 | if menu and syslinux_serial: | ||
| 128 | cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0])) | ||
| 129 | else: | ||
| 130 | cfgfile.write('DEFAULT %s\n' % (labels.split()[0])) | ||
| 131 | |||
| 132 | timeout = d.getVar('SYSLINUX_TIMEOUT') | ||
| 133 | |||
| 134 | if timeout: | ||
| 135 | cfgfile.write('TIMEOUT %s\n' % timeout) | ||
| 136 | else: | ||
| 137 | cfgfile.write('TIMEOUT 50\n') | ||
| 138 | |||
| 139 | prompt = d.getVar('SYSLINUX_PROMPT') | ||
| 140 | if prompt: | ||
| 141 | cfgfile.write('PROMPT %s\n' % prompt) | ||
| 142 | else: | ||
| 143 | cfgfile.write('PROMPT 1\n') | ||
| 144 | |||
| 145 | if menu: | ||
| 146 | cfgfile.write('ui vesamenu.c32\n') | ||
| 147 | cfgfile.write('menu title Select kernel options and boot kernel\n') | ||
| 148 | cfgfile.write('menu tabmsg Press [Tab] to edit, [Return] to select\n') | ||
| 149 | splash = d.getVar('SYSLINUX_SPLASH') | ||
| 150 | if splash: | ||
| 151 | cfgfile.write('menu background splash.lss\n') | ||
| 152 | |||
| 153 | for label in labels.split(): | ||
| 154 | localdata = bb.data.createCopy(d) | ||
| 155 | |||
| 156 | overrides = localdata.getVar('OVERRIDES') | ||
| 157 | if not overrides: | ||
| 158 | bb.fatal('OVERRIDES not defined') | ||
| 159 | |||
| 160 | localdata.setVar('OVERRIDES', label + ':' + overrides) | ||
| 161 | |||
| 162 | btypes = [ [ "", syslinux_default_console ] ] | ||
| 163 | if menu and syslinux_serial: | ||
| 164 | btypes = [ [ "Graphics console ", syslinux_default_console ], | ||
| 165 | [ "Serial console ", syslinux_serial_tty ] ] | ||
| 166 | |||
| 167 | root= d.getVar('SYSLINUX_ROOT') | ||
| 168 | if not root: | ||
| 169 | bb.fatal('SYSLINUX_ROOT not defined') | ||
| 170 | |||
| 171 | kernel = localdata.getVar('KERNEL_IMAGETYPE') | ||
| 172 | for btype in btypes: | ||
| 173 | cfgfile.write('LABEL %s%s\nKERNEL /%s\n' % (btype[0], label, kernel)) | ||
| 174 | |||
| 175 | exargs = d.getVar('SYSLINUX_KERNEL_ARGS') | ||
| 176 | if exargs: | ||
| 177 | btype[1] += " " + exargs | ||
| 178 | |||
| 179 | append = localdata.getVar('APPEND') | ||
| 180 | initrd = localdata.getVar('INITRD') | ||
| 181 | |||
| 182 | append = root + " " + append | ||
| 183 | cfgfile.write('APPEND ') | ||
| 184 | |||
| 185 | if initrd: | ||
| 186 | cfgfile.write('initrd=/initrd ') | ||
| 187 | |||
| 188 | cfgfile.write('LABEL=%s '% (label)) | ||
| 189 | append = replace_rootfs_uuid(d, append) | ||
| 190 | cfgfile.write('%s %s\n' % (append, btype[1])) | ||
| 191 | |||
| 192 | cfgfile.close() | ||
| 193 | } | ||
| 194 | build_syslinux_cfg[dirs] = "${S}" | ||
diff --git a/meta/classes-recipe/systemd-boot-cfg.bbclass b/meta/classes-recipe/systemd-boot-cfg.bbclass deleted file mode 100644 index 12da41ebad..0000000000 --- a/meta/classes-recipe/systemd-boot-cfg.bbclass +++ /dev/null | |||
| @@ -1,77 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | SYSTEMD_BOOT_CFG ?= "${S}/loader.conf" | ||
| 8 | SYSTEMD_BOOT_ENTRIES ?= "" | ||
| 9 | SYSTEMD_BOOT_TIMEOUT ?= "10" | ||
| 10 | |||
| 11 | # Uses MACHINE specific KERNEL_IMAGETYPE | ||
| 12 | PACKAGE_ARCH = "${MACHINE_ARCH}" | ||
| 13 | |||
| 14 | # Need UUID utility code. | ||
| 15 | inherit fs-uuid | ||
| 16 | |||
| 17 | python build_efi_cfg() { | ||
| 18 | s = d.getVar("S") | ||
| 19 | labels = d.getVar('LABELS') | ||
| 20 | if not labels: | ||
| 21 | bb.debug(1, "LABELS not defined, nothing to do") | ||
| 22 | return | ||
| 23 | |||
| 24 | if labels == []: | ||
| 25 | bb.debug(1, "No labels, nothing to do") | ||
| 26 | return | ||
| 27 | |||
| 28 | cfile = d.getVar('SYSTEMD_BOOT_CFG') | ||
| 29 | cdir = os.path.dirname(cfile) | ||
| 30 | if not os.path.exists(cdir): | ||
| 31 | os.makedirs(cdir) | ||
| 32 | try: | ||
| 33 | cfgfile = open(cfile, 'w') | ||
| 34 | except OSError: | ||
| 35 | bb.fatal('Unable to open %s' % cfile) | ||
| 36 | |||
| 37 | cfgfile.write('# Automatically created by OE\n') | ||
| 38 | cfgfile.write('default %s.conf\n' % (labels.split()[0])) | ||
| 39 | timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT') | ||
| 40 | if timeout: | ||
| 41 | cfgfile.write('timeout %s\n' % timeout) | ||
| 42 | else: | ||
| 43 | cfgfile.write('timeout 10\n') | ||
| 44 | cfgfile.close() | ||
| 45 | |||
| 46 | for label in labels.split(): | ||
| 47 | localdata = d.createCopy() | ||
| 48 | |||
| 49 | entryfile = "%s/%s.conf" % (s, label) | ||
| 50 | if not os.path.exists(s): | ||
| 51 | os.makedirs(s) | ||
| 52 | d.appendVar("SYSTEMD_BOOT_ENTRIES", " " + entryfile) | ||
| 53 | try: | ||
| 54 | entrycfg = open(entryfile, "w") | ||
| 55 | except OSError: | ||
| 56 | bb.fatal('Unable to open %s' % entryfile) | ||
| 57 | |||
| 58 | entrycfg.write('title %s\n' % label) | ||
| 59 | |||
| 60 | kernel = localdata.getVar("KERNEL_IMAGETYPE") | ||
| 61 | entrycfg.write('linux /%s\n' % kernel) | ||
| 62 | |||
| 63 | append = localdata.getVar('APPEND') | ||
| 64 | initrd = localdata.getVar('INITRD') | ||
| 65 | |||
| 66 | if initrd: | ||
| 67 | entrycfg.write('initrd /initrd\n') | ||
| 68 | lb = label | ||
| 69 | if label == "install": | ||
| 70 | lb = "install-efi" | ||
| 71 | entrycfg.write('options LABEL=%s ' % lb) | ||
| 72 | if append: | ||
| 73 | append = replace_rootfs_uuid(d, append) | ||
| 74 | entrycfg.write('%s' % append) | ||
| 75 | entrycfg.write('\n') | ||
| 76 | entrycfg.close() | ||
| 77 | } | ||
diff --git a/meta/classes-recipe/systemd-boot.bbclass b/meta/classes-recipe/systemd-boot.bbclass deleted file mode 100644 index 5aa32dd997..0000000000 --- a/meta/classes-recipe/systemd-boot.bbclass +++ /dev/null | |||
| @@ -1,35 +0,0 @@ | |||
| 1 | # Copyright (C) 2016 Intel Corporation | ||
| 2 | # | ||
| 3 | # SPDX-License-Identifier: MIT | ||
| 4 | |||
| 5 | # systemd-boot.bbclass - The "systemd-boot" is essentially the gummiboot merged into systemd. | ||
| 6 | # The original standalone gummiboot project is dead without any more | ||
| 7 | # maintenance. | ||
| 8 | # | ||
| 9 | # Set EFI_PROVIDER = "systemd-boot" to use systemd-boot on your live images instead of grub-efi | ||
| 10 | # (images built by image-live.bbclass) | ||
| 11 | |||
| 12 | do_bootimg[depends] += "${MLPREFIX}systemd-boot:do_deploy" | ||
| 13 | |||
| 14 | require conf/image-uefi.conf | ||
| 15 | # Need UUID utility code. | ||
| 16 | inherit fs-uuid | ||
| 17 | |||
| 18 | efi_populate() { | ||
| 19 | efi_populate_common "$1" systemd | ||
| 20 | |||
| 21 | # systemd-boot requires these paths for configuration files | ||
| 22 | # they are not customizable so no point in new vars | ||
| 23 | install -d ${DEST}/loader | ||
| 24 | install -d ${DEST}/loader/entries | ||
| 25 | install -m 0644 ${SYSTEMD_BOOT_CFG} ${DEST}/loader/loader.conf | ||
| 26 | for i in ${SYSTEMD_BOOT_ENTRIES}; do | ||
| 27 | install -m 0644 ${i} ${DEST}/loader/entries | ||
| 28 | done | ||
| 29 | } | ||
| 30 | |||
| 31 | efi_iso_populate:append() { | ||
| 32 | cp -r $iso_dir/loader ${EFIIMGDIR} | ||
| 33 | } | ||
| 34 | |||
| 35 | inherit systemd-boot-cfg | ||
diff --git a/meta/classes-recipe/systemd.bbclass b/meta/classes-recipe/systemd.bbclass deleted file mode 100644 index 562e71fb56..0000000000 --- a/meta/classes-recipe/systemd.bbclass +++ /dev/null | |||
| @@ -1,303 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # The list of packages that should have systemd packaging scripts added. For | ||
| 8 | # each entry, optionally have a SYSTEMD_SERVICE:[package] that lists the service | ||
| 9 | # files in this package. If this variable isn't set, [package].service is used. | ||
| 10 | SYSTEMD_PACKAGES ?= "${PN}" | ||
| 11 | SYSTEMD_PACKAGES:class-native ?= "" | ||
| 12 | SYSTEMD_PACKAGES:class-nativesdk ?= "" | ||
| 13 | |||
| 14 | # Whether to enable or disable the services on installation. | ||
| 15 | SYSTEMD_AUTO_ENABLE ??= "enable" | ||
| 16 | |||
| 17 | # This class will be included in any recipe that supports systemd init scripts, | ||
| 18 | # even if systemd is not in DISTRO_FEATURES. As such don't make any changes | ||
| 19 | # directly but check the DISTRO_FEATURES first. | ||
| 20 | python __anonymous() { | ||
| 21 | # If the distro features have systemd but not sysvinit, inhibit update-rcd | ||
| 22 | # from doing any work so that pure-systemd images don't have redundant init | ||
| 23 | # files. | ||
| 24 | if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d): | ||
| 25 | d.appendVar("DEPENDS", " systemd-systemctl-native") | ||
| 26 | d.appendVar("PACKAGE_WRITE_DEPS", " systemd-systemctl-native") | ||
| 27 | if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d): | ||
| 28 | d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1") | ||
| 29 | } | ||
| 30 | |||
| 31 | systemd_postinst() { | ||
| 32 | if type systemctl >/dev/null 2>/dev/null; then | ||
| 33 | OPTS="" | ||
| 34 | |||
| 35 | if [ -n "$D" ]; then | ||
| 36 | OPTS="--root=$D" | ||
| 37 | fi | ||
| 38 | |||
| 39 | if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then | ||
| 40 | for service in ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)}; do | ||
| 41 | systemctl ${OPTS} enable "$service" | ||
| 42 | done | ||
| 43 | |||
| 44 | for service in ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", True, d)}; do | ||
| 45 | systemctl --global ${OPTS} enable "$service" | ||
| 46 | done | ||
| 47 | fi | ||
| 48 | |||
| 49 | if [ -z "$D" ] && systemctl >/dev/null 2>/dev/null; then | ||
| 50 | # Reload only system service manager | ||
| 51 | # --global for daemon-reload is not supported: https://github.com/systemd/systemd/issues/19284 | ||
| 52 | systemctl daemon-reload | ||
| 53 | [ -n "${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)}" ] && \ | ||
| 54 | systemctl preset ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)} | ||
| 55 | |||
| 56 | [ -n "${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", True, d)}" ] && \ | ||
| 57 | systemctl --global preset ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", True, d)} | ||
| 58 | |||
| 59 | if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then | ||
| 60 | # --global flag for restart is not supported by systemd (see above) | ||
| 61 | [ -n "${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)}" ] && \ | ||
| 62 | systemctl --no-block restart ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)} | ||
| 63 | fi | ||
| 64 | fi | ||
| 65 | fi | ||
| 66 | } | ||
| 67 | |||
| 68 | systemd_prerm() { | ||
| 69 | if type systemctl >/dev/null 2>/dev/null; then | ||
| 70 | if [ -z "$D" ] && systemctl >/dev/null 2>/dev/null; then | ||
| 71 | if [ -n "${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)}" ]; then | ||
| 72 | systemctl stop ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)} | ||
| 73 | systemctl disable ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", False, d)} | ||
| 74 | fi | ||
| 75 | |||
| 76 | # same as above, --global flag is not supported for stop so do disable only | ||
| 77 | if [ -n "${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", True, d)}" ]; then | ||
| 78 | systemctl --global disable ${@systemd_filter_services("${SYSTEMD_SERVICE_ESCAPED}", True, d)} | ||
| 79 | fi | ||
| 80 | fi | ||
| 81 | fi | ||
| 82 | } | ||
| 83 | |||
| 84 | |||
| 85 | systemd_populate_packages[vardeps] += "systemd_prerm systemd_postinst" | ||
| 86 | systemd_populate_packages[vardepsexclude] += "OVERRIDES" | ||
| 87 | |||
| 88 | |||
| 89 | def systemd_service_path(service, searchpaths, d): | ||
| 90 | path_found = '' | ||
| 91 | |||
| 92 | # Deal with adding, for example, 'ifplugd@eth0.service' from | ||
| 93 | # 'ifplugd@.service' | ||
| 94 | base = None | ||
| 95 | at = service.find('@') | ||
| 96 | if at != -1: | ||
| 97 | ext = service.rfind('.') | ||
| 98 | base = service[:at] + '@' + service[ext:] | ||
| 99 | |||
| 100 | for path in searchpaths: | ||
| 101 | if os.path.lexists(oe.path.join(d.getVar("D"), path, service)): | ||
| 102 | path_found = path | ||
| 103 | break | ||
| 104 | elif base is not None: | ||
| 105 | if os.path.exists(oe.path.join(d.getVar("D"), path, base)): | ||
| 106 | path_found = path | ||
| 107 | break | ||
| 108 | |||
| 109 | return path_found, base | ||
| 110 | |||
| 111 | def systemd_service_searchpaths(user, d): | ||
| 112 | if user: | ||
| 113 | return [ | ||
| 114 | oe.path.join(d.getVar("sysconfdir"), "systemd", "user"), | ||
| 115 | d.getVar("systemd_user_unitdir"), | ||
| 116 | ] | ||
| 117 | else: | ||
| 118 | return [ | ||
| 119 | oe.path.join(d.getVar("sysconfdir"), "systemd", "system"), | ||
| 120 | d.getVar("systemd_system_unitdir"), | ||
| 121 | ] | ||
| 122 | |||
| 123 | def systemd_service_exists(service, user, d): | ||
| 124 | searchpaths = systemd_service_searchpaths(user, d) | ||
| 125 | path, _ = systemd_service_path(service, searchpaths, d) | ||
| 126 | |||
| 127 | return path != '' | ||
| 128 | |||
| 129 | def systemd_filter_services(services, user, d): | ||
| 130 | return ' '.join(service for service in services.split() if systemd_service_exists(service, user, d)) | ||
| 131 | |||
| 132 | python systemd_populate_packages() { | ||
| 133 | import re | ||
| 134 | import shlex | ||
| 135 | |||
| 136 | if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d): | ||
| 137 | return | ||
| 138 | |||
| 139 | def get_package_var(d, var, pkg): | ||
| 140 | val = (d.getVar('%s:%s' % (var, pkg)) or "").strip() | ||
| 141 | if val == "": | ||
| 142 | val = (d.getVar(var) or "").strip() | ||
| 143 | return val | ||
| 144 | |||
| 145 | # Check if systemd-packages already included in PACKAGES | ||
| 146 | def systemd_check_package(pkg_systemd): | ||
| 147 | packages = d.getVar('PACKAGES') | ||
| 148 | if not pkg_systemd in packages.split(): | ||
| 149 | bb.error('%s is marked for packaging systemd scripts, but it does not appear in package list, please add it to PACKAGES or adjust SYSTEMD_PACKAGES accordingly' % pkg_systemd) | ||
| 150 | |||
| 151 | |||
| 152 | def systemd_generate_package_scripts(pkg): | ||
| 153 | bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg) | ||
| 154 | |||
| 155 | paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE:' + pkg).split()) | ||
| 156 | d.setVar('SYSTEMD_SERVICE_ESCAPED:' + pkg, paths_escaped) | ||
| 157 | |||
| 158 | # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE:pkg | ||
| 159 | # variable. | ||
| 160 | localdata = d.createCopy() | ||
| 161 | localdata.prependVar("OVERRIDES", pkg + ":") | ||
| 162 | |||
| 163 | postinst = d.getVar('pkg_postinst:%s' % pkg) | ||
| 164 | if not postinst: | ||
| 165 | postinst = '#!/bin/sh\n' | ||
| 166 | postinst += localdata.getVar('systemd_postinst') | ||
| 167 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 168 | |||
| 169 | prerm = d.getVar('pkg_prerm:%s' % pkg) | ||
| 170 | if not prerm: | ||
| 171 | prerm = '#!/bin/sh\n' | ||
| 172 | prerm += localdata.getVar('systemd_prerm') | ||
| 173 | d.setVar('pkg_prerm:%s' % pkg, prerm) | ||
| 174 | |||
| 175 | |||
| 176 | # Add files to FILES:*-systemd if existent and not already done | ||
| 177 | def systemd_append_file(pkg_systemd, file_append): | ||
| 178 | appended = False | ||
| 179 | if os.path.exists(oe.path.join(d.getVar("D"), file_append)): | ||
| 180 | var_name = "FILES:" + pkg_systemd | ||
| 181 | files = d.getVar(var_name, False) or "" | ||
| 182 | if file_append not in files.split(): | ||
| 183 | d.appendVar(var_name, " " + file_append) | ||
| 184 | appended = True | ||
| 185 | return appended | ||
| 186 | |||
| 187 | # Add systemd files to FILES:*-systemd, parse for Also= and follow recursive | ||
| 188 | def systemd_add_files_and_parse(pkg_systemd, path, service): | ||
| 189 | # avoid infinite recursion | ||
| 190 | if systemd_append_file(pkg_systemd, oe.path.join(path, service)): | ||
| 191 | fullpath = oe.path.join(d.getVar("D"), path, service) | ||
| 192 | if service.find('.service') != -1: | ||
| 193 | # for *.service add *@.service | ||
| 194 | service_base = service.replace('.service', '') | ||
| 195 | systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service') | ||
| 196 | # Add the socket unit which is referred by the Also= in this service file to the same package. | ||
| 197 | with open(fullpath, 'r') as unit_f: | ||
| 198 | for line in unit_f: | ||
| 199 | if line.startswith('Also'): | ||
| 200 | also_unit = line.split('=', 1)[1].strip() | ||
| 201 | if also_unit.find('.socket') != -1: | ||
| 202 | systemd_add_files_and_parse(pkg_systemd, path, also_unit) | ||
| 203 | if service.find('.socket') != -1: | ||
| 204 | # for *.socket add *.service and *@.service | ||
| 205 | service_base = service.replace('.socket', '') | ||
| 206 | systemd_add_files_and_parse(pkg_systemd, path, service_base + '.service') | ||
| 207 | systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service') | ||
| 208 | |||
| 209 | # Check service-files and call systemd_add_files_and_parse for each entry | ||
| 210 | def systemd_check_services(): | ||
| 211 | searchpaths = systemd_service_searchpaths(False, d) | ||
| 212 | searchpaths.extend(systemd_service_searchpaths(True, d)) | ||
| 213 | |||
| 214 | systemd_packages = d.getVar('SYSTEMD_PACKAGES') | ||
| 215 | |||
| 216 | # scan for all in SYSTEMD_SERVICE[] | ||
| 217 | for pkg_systemd in systemd_packages.split(): | ||
| 218 | for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split(): | ||
| 219 | path_found, base = systemd_service_path(service, searchpaths, d) | ||
| 220 | |||
| 221 | if path_found != '': | ||
| 222 | systemd_add_files_and_parse(pkg_systemd, path_found, service) | ||
| 223 | else: | ||
| 224 | bb.fatal("Didn't find service unit '{0}', specified in SYSTEMD_SERVICE:{1}. {2}".format( | ||
| 225 | service, pkg_systemd, "Also looked for service unit '{0}'.".format(base) if base is not None else "")) | ||
| 226 | |||
| 227 | def systemd_create_presets(pkg, action, user): | ||
| 228 | import re | ||
| 229 | |||
| 230 | # Check there is at least one service of given type (system/user), don't | ||
| 231 | # create empty files. | ||
| 232 | needs_preset = False | ||
| 233 | for service in d.getVar('SYSTEMD_SERVICE:%s' % pkg).split(): | ||
| 234 | if systemd_service_exists(service, user, d): | ||
| 235 | needs_preset = True | ||
| 236 | break | ||
| 237 | |||
| 238 | if not needs_preset: | ||
| 239 | return | ||
| 240 | |||
| 241 | prefix = "user" if user else "system" | ||
| 242 | presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "%s-preset/98-%s.preset" % (prefix, pkg)) | ||
| 243 | bb.utils.mkdirhier(os.path.dirname(presetf)) | ||
| 244 | with open(presetf, 'a') as fd: | ||
| 245 | template_services = {} | ||
| 246 | for service in d.getVar('SYSTEMD_SERVICE:%s' % pkg).split(): | ||
| 247 | if not systemd_service_exists(service, user, d): | ||
| 248 | continue | ||
| 249 | if '@' in service and '@.' not in service: | ||
| 250 | (servicename, postfix) = service.split('@') | ||
| 251 | (instance, service_type) = postfix.rsplit('.', 1) | ||
| 252 | template_services.setdefault(servicename + '@.' + service_type, []).append(instance) | ||
| 253 | else: | ||
| 254 | template_services.setdefault(service, []) | ||
| 255 | for template, instances in template_services.items(): | ||
| 256 | if instances: | ||
| 257 | fd.write("%s %s %s\n" % (action, template, ' '.join(instances))) | ||
| 258 | else: | ||
| 259 | fd.write("%s %s\n" % (action, template)) | ||
| 260 | d.appendVar("FILES:%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "%s-preset/98-%s.preset" % (prefix, pkg))) | ||
| 261 | |||
| 262 | # Run all modifications once when creating package | ||
| 263 | if os.path.exists(d.getVar("D")): | ||
| 264 | for pkg in d.getVar('SYSTEMD_PACKAGES').split(): | ||
| 265 | systemd_check_package(pkg) | ||
| 266 | if d.getVar('SYSTEMD_SERVICE:' + pkg): | ||
| 267 | systemd_generate_package_scripts(pkg) | ||
| 268 | action = get_package_var(d, 'SYSTEMD_AUTO_ENABLE', pkg) | ||
| 269 | if action in ("enable", "disable"): | ||
| 270 | systemd_create_presets(pkg, action, False) | ||
| 271 | systemd_create_presets(pkg, action, True) | ||
| 272 | elif action not in ("mask", "preset"): | ||
| 273 | bb.fatal("SYSTEMD_AUTO_ENABLE:%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action)) | ||
| 274 | systemd_check_services() | ||
| 275 | } | ||
| 276 | |||
| 277 | PACKAGESPLITFUNCS =+ "systemd_populate_packages" | ||
| 278 | |||
| 279 | rm_systemd_unitdir() { | ||
| 280 | rm -rf ${D}${systemd_unitdir} | ||
| 281 | # Change into ${D} and use a relative path with rmdir -p to avoid | ||
| 282 | # having it remove ${D} if it becomes empty. | ||
| 283 | (cd ${D} && rmdir -p $(dirname ${systemd_unitdir#/}) 2>/dev/null || :) | ||
| 284 | } | ||
| 285 | |||
| 286 | rm_sysvinit_initddir() { | ||
| 287 | local sysv_initddir=${INIT_D_DIR} | ||
| 288 | : ${sysv_initddir:=${sysconfdir}/init.d} | ||
| 289 | |||
| 290 | # If systemd_system_unitdir contains anything, delete sysv_initddir | ||
| 291 | if [ "$(ls -A ${D}${systemd_system_unitdir} 2>/dev/null)" ]; then | ||
| 292 | rm -rf ${D}$sysv_initddir | ||
| 293 | rmdir -p $(dirname ${D}$sysv_initddir) 2>/dev/null || : | ||
| 294 | fi | ||
| 295 | } | ||
| 296 | |||
| 297 | do_install[postfuncs] += "${RMINITDIR}" | ||
| 298 | RMINITDIR = " \ | ||
| 299 | ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '', 'rm_systemd_unitdir', d)} \ | ||
| 300 | ${@'rm_sysvinit_initddir' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \ | ||
| 301 | not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) else ''} \ | ||
| 302 | " | ||
| 303 | RMINITDIR:class-native = "" | ||
diff --git a/meta/classes-recipe/testexport.bbclass b/meta/classes-recipe/testexport.bbclass deleted file mode 100644 index 843d777e3b..0000000000 --- a/meta/classes-recipe/testexport.bbclass +++ /dev/null | |||
| @@ -1,183 +0,0 @@ | |||
| 1 | # Copyright (C) 2016 Intel Corporation | ||
| 2 | # | ||
| 3 | # SPDX-License-Identifier: MIT | ||
| 4 | # | ||
| 5 | # testexport.bbclass allows to execute runtime test outside OE environment. | ||
| 6 | # Most of the tests are commands run on target image over ssh. | ||
| 7 | # To use it add testexport to global inherit and call your target image with -c testexport | ||
| 8 | # You can try it out like this: | ||
| 9 | # - First build an image. i.e. core-image-sato | ||
| 10 | # - Add IMAGE_CLASSES += "testexport" in local.conf | ||
| 11 | # - Then bitbake core-image-sato -c testexport. That will generate the directory structure | ||
| 12 | # to execute the runtime tests using runexported.py. | ||
| 13 | # | ||
| 14 | # For more information on TEST_SUITES check testimage class. | ||
| 15 | |||
| 16 | inherit testimage | ||
| 17 | |||
| 18 | TEST_LOG_DIR ?= "${WORKDIR}/testexport" | ||
| 19 | TEST_EXPORT_DIR ?= "${TMPDIR}/testexport/${PN}" | ||
| 20 | TEST_EXPORT_PACKAGED_DIR ?= "packages/packaged" | ||
| 21 | TEST_EXPORT_EXTRACTED_DIR ?= "packages/extracted" | ||
| 22 | |||
| 23 | TEST_TARGET ?= "simpleremote" | ||
| 24 | TEST_TARGET_IP ?= "" | ||
| 25 | TEST_SERVER_IP ?= "" | ||
| 26 | |||
| 27 | require conf/testexport.conf | ||
| 28 | |||
| 29 | TEST_EXPORT_SDK_ENABLED ?= "0" | ||
| 30 | |||
| 31 | TEST_EXPORT_DEPENDS = "" | ||
| 32 | TEST_EXPORT_DEPENDS += "${@bb.utils.contains('TEST_EXPORT_SDK_ENABLED', '1', 'testexport-tarball:do_populate_sdk', '', d)}" | ||
| 33 | TEST_EXPORT_LOCK = "${TMPDIR}/testimage.lock" | ||
| 34 | |||
| 35 | addtask testexport | ||
| 36 | do_testexport[nostamp] = "1" | ||
| 37 | do_testexport[depends] += "${TEST_EXPORT_DEPENDS} ${TESTIMAGEDEPENDS}" | ||
| 38 | do_testexport[lockfiles] += "${TEST_EXPORT_LOCK}" | ||
| 39 | |||
| 40 | python do_testexport() { | ||
| 41 | testexport_main(d) | ||
| 42 | } | ||
| 43 | |||
| 44 | def testexport_main(d): | ||
| 45 | import json | ||
| 46 | import logging | ||
| 47 | |||
| 48 | from oeqa.runtime.context import OERuntimeTestContext | ||
| 49 | from oeqa.runtime.context import OERuntimeTestContextExecutor | ||
| 50 | |||
| 51 | image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'), | ||
| 52 | d.getVar('IMAGE_LINK_NAME') or d.getVar('IMAGE_NAME'))) | ||
| 53 | |||
| 54 | tdname = "%s.testdata.json" % image_name | ||
| 55 | td = json.load(open(tdname, "r")) | ||
| 56 | |||
| 57 | logger = logging.getLogger("BitBake") | ||
| 58 | |||
| 59 | target_kwargs = { } | ||
| 60 | target_kwargs['machine'] = d.getVar("MACHINE") or None | ||
| 61 | target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None | ||
| 62 | target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or "" | ||
| 63 | target_kwargs['serialcontrol_ps1'] = d.getVar("TEST_SERIALCONTROL_PS1") or None | ||
| 64 | target_kwargs['serialcontrol_connect_timeout'] = d.getVar("TEST_SERIALCONTROL_CONNECT_TIMEOUT") or None | ||
| 65 | |||
| 66 | target = OERuntimeTestContextExecutor.getTarget( | ||
| 67 | d.getVar("TEST_TARGET"), None, d.getVar("TEST_TARGET_IP"), | ||
| 68 | d.getVar("TEST_SERVER_IP"), **target_kwargs) | ||
| 69 | |||
| 70 | image_manifest = "%s.manifest" % image_name | ||
| 71 | image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest) | ||
| 72 | |||
| 73 | extract_dir = d.getVar("TEST_EXTRACTED_DIR") | ||
| 74 | |||
| 75 | tc = OERuntimeTestContext(td, logger, target, image_packages, extract_dir) | ||
| 76 | |||
| 77 | copy_needed_files(d, tc) | ||
| 78 | |||
| 79 | def copy_needed_files(d, tc): | ||
| 80 | import shutil | ||
| 81 | import oe.path | ||
| 82 | |||
| 83 | from oeqa.utils.package_manager import _get_json_file | ||
| 84 | from oeqa.core.utils.test import getSuiteCasesFiles | ||
| 85 | |||
| 86 | export_path = d.getVar('TEST_EXPORT_DIR') | ||
| 87 | corebase_path = d.getVar('COREBASE') | ||
| 88 | bblayers = d.getVar('BBLAYERS').split() | ||
| 89 | |||
| 90 | # Clean everything before starting | ||
| 91 | oe.path.remove(export_path) | ||
| 92 | bb.utils.mkdirhier(os.path.join(export_path, 'lib', 'oeqa')) | ||
| 93 | |||
| 94 | # The source of files to copy are relative to 'COREBASE' directory | ||
| 95 | # The destination is relative to 'TEST_EXPORT_DIR' | ||
| 96 | # core files/dirs first | ||
| 97 | core_files_to_copy = [ os.path.join('scripts', 'oe-test'), | ||
| 98 | os.path.join('scripts', 'lib', 'argparse_oe.py'), | ||
| 99 | os.path.join('scripts', 'lib', 'scriptutils.py'), ] | ||
| 100 | for f in core_files_to_copy: | ||
| 101 | src = os.path.join(corebase_path, f) | ||
| 102 | dst = os.path.join(export_path, f.split('/', 1)[-1]) | ||
| 103 | if os.path.isdir(src): | ||
| 104 | oe.path.copytree(src, dst) | ||
| 105 | else: | ||
| 106 | shutil.copy2(src, dst) | ||
| 107 | |||
| 108 | # layer specific files/dirs | ||
| 109 | layer_files_to_copy = [ os.path.join('lib', 'oeqa', 'core'), | ||
| 110 | os.path.join('lib', 'oeqa', 'runtime'), | ||
| 111 | os.path.join('lib', 'oeqa', 'files'), | ||
| 112 | os.path.join('lib', 'oeqa', 'utils'),] | ||
| 113 | for layer in bblayers: | ||
| 114 | meta = os.path.basename(layer) | ||
| 115 | for f in layer_files_to_copy: | ||
| 116 | src = os.path.join(layer, f) | ||
| 117 | dst = os.path.join(export_path, meta, f) | ||
| 118 | if os.path.exists(src): | ||
| 119 | if os.path.isdir(src): | ||
| 120 | oe.path.copytree(src, dst) | ||
| 121 | else: | ||
| 122 | shutil.copy2(src, dst) | ||
| 123 | |||
| 124 | # Copy test data | ||
| 125 | image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'), | ||
| 126 | d.getVar('IMAGE_LINK_NAME'))) | ||
| 127 | image_manifest = "%s.manifest" % image_name | ||
| 128 | tdname = "%s.testdata.json" % image_name | ||
| 129 | test_data_path = os.path.join(export_path, 'data') | ||
| 130 | bb.utils.mkdirhier(test_data_path) | ||
| 131 | shutil.copy2(image_manifest, os.path.join(test_data_path, 'manifest')) | ||
| 132 | shutil.copy2(tdname, os.path.join(test_data_path, 'testdata.json')) | ||
| 133 | |||
| 134 | for subdir, dirs, files in os.walk(export_path): | ||
| 135 | for dir in dirs: | ||
| 136 | if dir == '__pycache__': | ||
| 137 | shutil.rmtree(os.path.join(subdir, dir)) | ||
| 138 | |||
| 139 | # Create tar file for common parts of testexport | ||
| 140 | testexport_create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR")) | ||
| 141 | |||
| 142 | # Copy packages needed for runtime testing | ||
| 143 | test_paths = get_runtime_paths(d) | ||
| 144 | test_modules = d.getVar('TEST_SUITES').split() | ||
| 145 | tc.loadTests(test_paths, modules=test_modules) | ||
| 146 | package_extraction(d, tc.suites) | ||
| 147 | test_pkg_dir = d.getVar("TEST_NEEDED_PACKAGES_DIR") | ||
| 148 | if os.path.isdir(test_pkg_dir) and os.listdir(test_pkg_dir): | ||
| 149 | export_pkg_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"), "packages") | ||
| 150 | oe.path.copytree(test_pkg_dir, export_pkg_dir) | ||
| 151 | # Create tar file for packages needed by the DUT | ||
| 152 | testexport_create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE"), export_pkg_dir) | ||
| 153 | |||
| 154 | # Copy SDK | ||
| 155 | if d.getVar("TEST_EXPORT_SDK_ENABLED") == "1": | ||
| 156 | sdk_deploy = d.getVar("SDK_DEPLOY") | ||
| 157 | tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME") | ||
| 158 | tarball_path = os.path.join(sdk_deploy, tarball_name) | ||
| 159 | export_sdk_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"), | ||
| 160 | d.getVar("TEST_EXPORT_SDK_DIR")) | ||
| 161 | bb.utils.mkdirhier(export_sdk_dir) | ||
| 162 | shutil.copy2(tarball_path, export_sdk_dir) | ||
| 163 | |||
| 164 | # Create tar file for the sdk | ||
| 165 | testexport_create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH"), export_sdk_dir) | ||
| 166 | |||
| 167 | bb.plain("Exported tests to: %s" % export_path) | ||
| 168 | |||
| 169 | def testexport_create_tarball(d, tar_name, src_dir): | ||
| 170 | |||
| 171 | import tarfile | ||
| 172 | |||
| 173 | tar_path = os.path.join(d.getVar("TEST_EXPORT_DIR"), tar_name) | ||
| 174 | current_dir = os.getcwd() | ||
| 175 | src_dir = src_dir.rstrip('/') | ||
| 176 | dir_name = os.path.dirname(src_dir) | ||
| 177 | base_name = os.path.basename(src_dir) | ||
| 178 | |||
| 179 | os.chdir(dir_name) | ||
| 180 | tar = tarfile.open(tar_path, "w:gz") | ||
| 181 | tar.add(base_name) | ||
| 182 | tar.close() | ||
| 183 | os.chdir(current_dir) | ||
diff --git a/meta/classes-recipe/testimage.bbclass b/meta/classes-recipe/testimage.bbclass deleted file mode 100644 index 847a6f18a8..0000000000 --- a/meta/classes-recipe/testimage.bbclass +++ /dev/null | |||
| @@ -1,488 +0,0 @@ | |||
| 1 | # Copyright (C) 2013 Intel Corporation | ||
| 2 | # | ||
| 3 | # SPDX-License-Identifier: MIT | ||
| 4 | |||
| 5 | inherit metadata_scm | ||
| 6 | inherit image-artifact-names | ||
| 7 | |||
| 8 | # testimage.bbclass enables testing of qemu images using python unittests. | ||
| 9 | # Most of the tests are commands run on target image over ssh. | ||
| 10 | # To use it add testimage to global inherit and call your target image with -c testimage | ||
| 11 | # You can try it out like this: | ||
| 12 | # - first add IMAGE_CLASSES += "testimage" in local.conf | ||
| 13 | # - build a qemu core-image-sato | ||
| 14 | # - then bitbake core-image-sato -c testimage. That will run a standard suite of tests. | ||
| 15 | # | ||
| 16 | # The tests can be run automatically each time an image is built if you set | ||
| 17 | # TESTIMAGE_AUTO = "1" | ||
| 18 | |||
| 19 | TESTIMAGE_AUTO ??= "0" | ||
| 20 | |||
| 21 | # When any test fails, TESTIMAGE_FAILED_QA ARTIFACTS will be parsed and for | ||
| 22 | # each entry in it, if artifact pointed by path description exists on target, | ||
| 23 | # it will be retrieved onto host | ||
| 24 | |||
| 25 | TESTIMAGE_FAILED_QA_ARTIFACTS = "\ | ||
| 26 | ${localstatedir}/log \ | ||
| 27 | ${localstatedir}/volatile/log \ | ||
| 28 | ${sysconfdir}/version \ | ||
| 29 | ${sysconfdir}/os-release \ | ||
| 30 | ${nonarch_libdir}/os-release \ | ||
| 31 | " | ||
| 32 | |||
| 33 | # If some ptests are run and fail, retrieve corresponding directories | ||
| 34 | TESTIMAGE_FAILED_QA_ARTIFACTS += "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '${libdir}/*/ptest', '', d)}" | ||
| 35 | |||
| 36 | # You can set (or append to) TEST_SUITES in local.conf to select the tests | ||
| 37 | # which you want to run for your target. | ||
| 38 | # The test names are the module names in meta/lib/oeqa/runtime/cases. | ||
| 39 | # Each name in TEST_SUITES represents a required test for the image. (no skipping allowed) | ||
| 40 | # Appending "auto" means that it will try to run all tests that are suitable for the image (each test decides that on it's own). | ||
| 41 | # Note that order in TEST_SUITES is relevant: tests are run in an order such that | ||
| 42 | # tests mentioned in @skipUnlessPassed run before the tests that depend on them, | ||
| 43 | # but without such dependencies, tests run in the order in which they are listed | ||
| 44 | # in TEST_SUITES. | ||
| 45 | # | ||
| 46 | # A layer can add its own tests in lib/oeqa/runtime, provided it extends BBPATH as normal in its layer.conf. | ||
| 47 | |||
| 48 | # TEST_LOG_DIR contains a command ssh log and may contain infromation about what command is running, output and return codes and for qemu a boot log till login. | ||
| 49 | # Booting is handled by this class, and it's not a test in itself. | ||
| 50 | # TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt. | ||
| 51 | # TEST_OVERALL_TIMEOUT can be used to set the maximum time in seconds the tests will be allowed to run (defaults to no limit). | ||
| 52 | # TEST_QEMUPARAMS can be used to pass extra parameters to qemu, e.g. "-m 1024" for setting the amount of ram to 1 GB. | ||
| 53 | # TEST_RUNQEMUPARAMS can be used to pass extra parameters to runqemu, e.g. "gl" to enable OpenGL acceleration. | ||
| 54 | # QEMU_USE_KVM can be set to "" to disable the use of kvm (by default it is enabled if target_arch == build_arch or both of them are x86 archs) | ||
| 55 | |||
| 56 | # TESTIMAGE_BOOT_PATTERNS can be used to override certain patterns used to communicate with the target when booting, | ||
| 57 | # if a pattern is not specifically present on this variable a default will be used when booting the target. | ||
| 58 | # TESTIMAGE_BOOT_PATTERNS[<flag>] overrides the pattern used for that specific flag, where flag comes from a list of accepted flags | ||
| 59 | # e.g. normally the system boots and waits for a login prompt (login:), after that it sends the command: "root\n" to log as the root user | ||
| 60 | # if we wanted to log in as the hypothetical "webserver" user for example we could set the following: | ||
| 61 | # TESTIMAGE_BOOT_PATTERNS = "send_login_user search_login_succeeded" | ||
| 62 | # TESTIMAGE_BOOT_PATTERNS[send_login_user] = "webserver\n" | ||
| 63 | # TESTIMAGE_BOOT_PATTERNS[search_login_succeeded] = "webserver@[a-zA-Z0-9\-]+:~#" | ||
| 64 | # The accepted flags are the following: search_reached_prompt, send_login_user, search_login_succeeded, search_cmd_finished. | ||
| 65 | # They are prefixed with either search/send, to differentiate if the pattern is meant to be sent or searched to/from the target terminal | ||
| 66 | |||
| 67 | TEST_LOG_DIR ?= "${WORKDIR}/testimage" | ||
| 68 | |||
| 69 | TEST_EXPORT_DIR ?= "${TMPDIR}/testimage/${PN}" | ||
| 70 | TEST_INSTALL_TMP_DIR ?= "${WORKDIR}/testimage/install_tmp" | ||
| 71 | TEST_NEEDED_PACKAGES_DIR ?= "${WORKDIR}/testimage/packages" | ||
| 72 | TEST_EXTRACTED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/extracted" | ||
| 73 | TEST_PACKAGED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/packaged" | ||
| 74 | |||
| 75 | BASICTESTSUITE = "\ | ||
| 76 | ping date df ssh scp python perl gi ptest parselogs \ | ||
| 77 | logrotate connman systemd oe_syslog pam stap ldd xorg \ | ||
| 78 | kernelmodule gcc buildcpio buildlzip buildgalculator \ | ||
| 79 | dnf rpm opkg apt weston go rust" | ||
| 80 | |||
| 81 | DEFAULT_TEST_SUITES = "${BASICTESTSUITE}" | ||
| 82 | |||
| 83 | # musl doesn't support systemtap | ||
| 84 | DEFAULT_TEST_SUITES:remove:libc-musl = "stap" | ||
| 85 | |||
| 86 | # qemumips is quite slow and has reached the timeout limit several times on the YP build cluster, | ||
| 87 | # mitigate this by removing build tests for qemumips machines. | ||
| 88 | MIPSREMOVE ??= "buildcpio buildlzip buildgalculator" | ||
| 89 | DEFAULT_TEST_SUITES:remove:qemumips = "${MIPSREMOVE}" | ||
| 90 | DEFAULT_TEST_SUITES:remove:qemumips64 = "${MIPSREMOVE}" | ||
| 91 | |||
| 92 | TEST_SUITES ?= "${DEFAULT_TEST_SUITES}" | ||
| 93 | |||
| 94 | QEMU_USE_KVM ?= "1" | ||
| 95 | TEST_QEMUBOOT_TIMEOUT ?= "1000" | ||
| 96 | TEST_OVERALL_TIMEOUT ?= "" | ||
| 97 | TEST_TARGET ?= "qemu" | ||
| 98 | TEST_QEMUPARAMS ?= "" | ||
| 99 | TEST_RUNQEMUPARAMS ?= "" | ||
| 100 | |||
| 101 | TESTIMAGE_BOOT_PATTERNS ?= "" | ||
| 102 | |||
| 103 | TESTIMAGEDEPENDS = "" | ||
| 104 | TESTIMAGEDEPENDS:append:qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot" | ||
| 105 | TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}" | ||
| 106 | TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}" | ||
| 107 | TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot package-index:do_package_index', '', d)}" | ||
| 108 | TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot package-index:do_package_index', '', d)}" | ||
| 109 | |||
| 110 | TESTIMAGELOCK = "${TMPDIR}/testimage.lock" | ||
| 111 | TESTIMAGELOCK:qemuall = "" | ||
| 112 | |||
| 113 | TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/" | ||
| 114 | |||
| 115 | TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR_IMAGE IMAGE_LINK_NAME IMAGE_NAME" | ||
| 116 | |||
| 117 | testimage_dump_monitor () { | ||
| 118 | query-status | ||
| 119 | query-block | ||
| 120 | dump-guest-memory {"paging":false,"protocol":"file:%s.img"} | ||
| 121 | } | ||
| 122 | |||
| 123 | python do_testimage() { | ||
| 124 | testimage_main(d) | ||
| 125 | } | ||
| 126 | |||
| 127 | addtask testimage | ||
| 128 | do_testimage[nostamp] = "1" | ||
| 129 | do_testimage[network] = "1" | ||
| 130 | do_testimage[depends] += "${TESTIMAGEDEPENDS}" | ||
| 131 | do_testimage[lockfiles] += "${TESTIMAGELOCK}" | ||
| 132 | |||
| 133 | def testimage_sanity(d): | ||
| 134 | if (d.getVar('TEST_TARGET') == 'simpleremote' | ||
| 135 | and (not d.getVar('TEST_TARGET_IP') | ||
| 136 | or not d.getVar('TEST_SERVER_IP'))): | ||
| 137 | bb.fatal('When TEST_TARGET is set to "simpleremote" ' | ||
| 138 | 'TEST_TARGET_IP and TEST_SERVER_IP are needed too.') | ||
| 139 | |||
| 140 | def get_testimage_configuration(d, test_type, machine): | ||
| 141 | import platform | ||
| 142 | from oeqa.utils.metadata import get_layers | ||
| 143 | configuration = {'TEST_TYPE': test_type, | ||
| 144 | 'MACHINE': machine, | ||
| 145 | 'DISTRO': d.getVar("DISTRO"), | ||
| 146 | 'IMAGE_BASENAME': d.getVar("IMAGE_BASENAME"), | ||
| 147 | 'IMAGE_PKGTYPE': d.getVar("IMAGE_PKGTYPE"), | ||
| 148 | 'STARTTIME': d.getVar("DATETIME"), | ||
| 149 | 'HOST_DISTRO': oe.lsb.distro_identifier().replace(' ', '-'), | ||
| 150 | 'LAYERS': get_layers(d.getVar("BBLAYERS"))} | ||
| 151 | return configuration | ||
| 152 | get_testimage_configuration[vardepsexclude] = "DATETIME" | ||
| 153 | |||
| 154 | def get_testimage_result_id(configuration): | ||
| 155 | return '%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'], configuration['MACHINE'], configuration['STARTTIME']) | ||
| 156 | |||
| 157 | def get_testimage_boot_patterns(d): | ||
| 158 | from collections import defaultdict | ||
| 159 | boot_patterns = defaultdict(str) | ||
| 160 | # Only accept certain values | ||
| 161 | accepted_patterns = ['search_reached_prompt', 'send_login_user', 'search_login_succeeded', 'search_cmd_finished'] | ||
| 162 | # Not all patterns need to be overriden, e.g. perhaps we only want to change the user | ||
| 163 | boot_patterns_flags = d.getVarFlags('TESTIMAGE_BOOT_PATTERNS') or {} | ||
| 164 | if boot_patterns_flags: | ||
| 165 | patterns_set = [p for p in boot_patterns_flags.items() if p[0] in d.getVar('TESTIMAGE_BOOT_PATTERNS').split()] | ||
| 166 | for flag, flagval in patterns_set: | ||
| 167 | if flag not in accepted_patterns: | ||
| 168 | bb.fatal('Testimage: The only accepted boot patterns are: search_reached_prompt,send_login_user, \ | ||
| 169 | search_login_succeeded,search_cmd_finished\n Make sure your TESTIMAGE_BOOT_PATTERNS=%s \ | ||
| 170 | contains an accepted flag.' % d.getVar('TESTIMAGE_BOOT_PATTERNS')) | ||
| 171 | return | ||
| 172 | boot_patterns[flag] = flagval.encode().decode('unicode-escape') | ||
| 173 | return boot_patterns | ||
| 174 | |||
| 175 | def testimage_main(d): | ||
| 176 | import os | ||
| 177 | import json | ||
| 178 | import signal | ||
| 179 | import logging | ||
| 180 | import shutil | ||
| 181 | |||
| 182 | from bb.utils import export_proxies | ||
| 183 | from oeqa.runtime.context import OERuntimeTestContext | ||
| 184 | from oeqa.runtime.context import OERuntimeTestContextExecutor | ||
| 185 | from oeqa.core.target.qemu import supported_fstypes | ||
| 186 | from oeqa.core.utils.test import getSuiteCases | ||
| 187 | from oeqa.utils import make_logger_bitbake_compatible | ||
| 188 | from oeqa.utils import get_json_result_dir | ||
| 189 | from oeqa.utils.postactions import run_failed_tests_post_actions | ||
| 190 | |||
| 191 | def sigterm_exception(signum, stackframe): | ||
| 192 | """ | ||
| 193 | Catch SIGTERM from worker in order to stop qemu. | ||
| 194 | """ | ||
| 195 | os.kill(os.getpid(), signal.SIGINT) | ||
| 196 | |||
| 197 | def handle_test_timeout(timeout): | ||
| 198 | bb.warn("Global test timeout reached (%s seconds), stopping the tests." %(timeout)) | ||
| 199 | os.kill(os.getpid(), signal.SIGINT) | ||
| 200 | |||
| 201 | testimage_sanity(d) | ||
| 202 | |||
| 203 | if (d.getVar('IMAGE_PKGTYPE') == 'rpm' | ||
| 204 | and ('dnf' in d.getVar('TEST_SUITES') or 'auto' in d.getVar('TEST_SUITES'))): | ||
| 205 | create_rpm_index(d) | ||
| 206 | |||
| 207 | logger = make_logger_bitbake_compatible(logging.getLogger("BitBake")) | ||
| 208 | pn = d.getVar("PN") | ||
| 209 | |||
| 210 | bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR")) | ||
| 211 | |||
| 212 | image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'), | ||
| 213 | d.getVar('IMAGE_LINK_NAME') or d.getVar('IMAGE_NAME'))) | ||
| 214 | |||
| 215 | tdname = "%s.testdata.json" % image_name | ||
| 216 | try: | ||
| 217 | with open(tdname, "r") as f: | ||
| 218 | td = json.load(f) | ||
| 219 | except FileNotFoundError as err: | ||
| 220 | bb.fatal('File %s not found (%s).\nHave you built the image with IMAGE_CLASSES += "testimage" in the conf/local.conf?' % (tdname, err)) | ||
| 221 | |||
| 222 | # Some variables need to be updates (mostly paths) with the | ||
| 223 | # ones of the current environment because some tests require them. | ||
| 224 | for var in d.getVar('TESTIMAGE_UPDATE_VARS').split(): | ||
| 225 | td[var] = d.getVar(var) | ||
| 226 | td['ORIGPATH'] = d.getVar("BB_ORIGENV").getVar("PATH") | ||
| 227 | |||
| 228 | image_manifest = "%s.manifest" % image_name | ||
| 229 | image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest) | ||
| 230 | |||
| 231 | extract_dir = d.getVar("TEST_EXTRACTED_DIR") | ||
| 232 | |||
| 233 | # Get machine | ||
| 234 | machine = d.getVar("MACHINE") | ||
| 235 | |||
| 236 | # Get rootfs | ||
| 237 | fstypes = d.getVar('IMAGE_FSTYPES').split() | ||
| 238 | if d.getVar("TEST_TARGET") == "qemu": | ||
| 239 | fstypes = [fs for fs in fstypes if fs in supported_fstypes] | ||
| 240 | if not fstypes: | ||
| 241 | bb.fatal('Unsupported image type built. Add a compatible image to ' | ||
| 242 | 'IMAGE_FSTYPES. Supported types: %s' % | ||
| 243 | ', '.join(supported_fstypes)) | ||
| 244 | elif d.getVar("TEST_TARGET") == "serial": | ||
| 245 | bb.fatal('Serial target is currently only supported in testexport.') | ||
| 246 | qfstype = fstypes[0] | ||
| 247 | qdeffstype = d.getVar("QB_DEFAULT_FSTYPE") | ||
| 248 | if qdeffstype: | ||
| 249 | qfstype = qdeffstype | ||
| 250 | rootfs = '%s.%s' % (image_name, qfstype) | ||
| 251 | |||
| 252 | # Get tmpdir (not really used, just for compatibility) | ||
| 253 | tmpdir = d.getVar("TMPDIR") | ||
| 254 | |||
| 255 | # Get deploy_dir_image (not really used, just for compatibility) | ||
| 256 | dir_image = d.getVar("DEPLOY_DIR_IMAGE") | ||
| 257 | |||
| 258 | # Get bootlog | ||
| 259 | bootlog = os.path.join(d.getVar("TEST_LOG_DIR"), | ||
| 260 | 'qemu_boot_log.%s' % d.getVar('DATETIME')) | ||
| 261 | |||
| 262 | # Get display | ||
| 263 | display = d.getVar("BB_ORIGENV").getVar("DISPLAY") | ||
| 264 | |||
| 265 | # Get kernel | ||
| 266 | kernel_name = ('%s-%s.bin' % (d.getVar("KERNEL_IMAGETYPE"), machine)) | ||
| 267 | kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), kernel_name) | ||
| 268 | |||
| 269 | # Get boottime | ||
| 270 | boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT")) | ||
| 271 | |||
| 272 | # Get use_kvm | ||
| 273 | kvm = oe.types.qemu_use_kvm(d.getVar('QEMU_USE_KVM'), d.getVar('TARGET_ARCH')) | ||
| 274 | |||
| 275 | # Get OVMF | ||
| 276 | ovmf = d.getVar("QEMU_USE_OVMF") | ||
| 277 | |||
| 278 | slirp = False | ||
| 279 | if bb.utils.contains('TEST_RUNQEMUPARAMS', 'slirp', True, False, d): | ||
| 280 | slirp = True | ||
| 281 | |||
| 282 | # TODO: We use the current implementation of qemu runner because of | ||
| 283 | # time constrains, qemu runner really needs a refactor too. | ||
| 284 | target_kwargs = { 'machine' : machine, | ||
| 285 | 'rootfs' : rootfs, | ||
| 286 | 'tmpdir' : tmpdir, | ||
| 287 | 'dir_image' : dir_image, | ||
| 288 | 'display' : display, | ||
| 289 | 'kernel' : kernel, | ||
| 290 | 'boottime' : boottime, | ||
| 291 | 'bootlog' : bootlog, | ||
| 292 | 'kvm' : kvm, | ||
| 293 | 'slirp' : slirp, | ||
| 294 | 'dump_dir' : d.getVar("TESTIMAGE_DUMP_DIR"), | ||
| 295 | 'serial_ports': len(d.getVar("SERIAL_CONSOLES").split()), | ||
| 296 | 'ovmf' : ovmf, | ||
| 297 | 'tmpfsdir' : d.getVar("RUNQEMU_TMPFS_DIR"), | ||
| 298 | } | ||
| 299 | |||
| 300 | if d.getVar("TESTIMAGE_BOOT_PATTERNS"): | ||
| 301 | target_kwargs['boot_patterns'] = get_testimage_boot_patterns(d) | ||
| 302 | |||
| 303 | # hardware controlled targets might need further access | ||
| 304 | target_kwargs['powercontrol_cmd'] = d.getVar("TEST_POWERCONTROL_CMD") or None | ||
| 305 | target_kwargs['powercontrol_extra_args'] = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or "" | ||
| 306 | target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None | ||
| 307 | target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or "" | ||
| 308 | target_kwargs['testimage_dump_monitor'] = d.getVar("testimage_dump_monitor") or "" | ||
| 309 | |||
| 310 | def export_ssh_agent(d): | ||
| 311 | import os | ||
| 312 | |||
| 313 | variables = ['SSH_AGENT_PID', 'SSH_AUTH_SOCK'] | ||
| 314 | for v in variables: | ||
| 315 | if v not in os.environ.keys(): | ||
| 316 | val = d.getVar(v) | ||
| 317 | if val is not None: | ||
| 318 | os.environ[v] = val | ||
| 319 | |||
| 320 | export_ssh_agent(d) | ||
| 321 | |||
| 322 | # runtime use network for download projects for build | ||
| 323 | export_proxies(d) | ||
| 324 | |||
| 325 | if slirp: | ||
| 326 | # Default to 127.0.0.1 and let the runner identify the port forwarding | ||
| 327 | # (as OEQemuTarget does), but allow overriding. | ||
| 328 | target_ip = d.getVar("TEST_TARGET_IP") or "127.0.0.1" | ||
| 329 | # Default to 10.0.2.2 as this is the IP that the guest has with the | ||
| 330 | # default qemu slirp networking configuration, but allow overriding. | ||
| 331 | server_ip = d.getVar("TEST_SERVER_IP") or "10.0.2.2" | ||
| 332 | else: | ||
| 333 | target_ip = d.getVar("TEST_TARGET_IP") | ||
| 334 | server_ip = d.getVar("TEST_SERVER_IP") | ||
| 335 | |||
| 336 | # the robot dance | ||
| 337 | target = OERuntimeTestContextExecutor.getTarget( | ||
| 338 | d.getVar("TEST_TARGET"), logger, target_ip, | ||
| 339 | server_ip, **target_kwargs) | ||
| 340 | |||
| 341 | # test context | ||
| 342 | tc = OERuntimeTestContext(td, logger, target, image_packages, extract_dir) | ||
| 343 | |||
| 344 | # Load tests before starting the target | ||
| 345 | test_paths = get_runtime_paths(d) | ||
| 346 | test_modules = d.getVar('TEST_SUITES').split() | ||
| 347 | if not test_modules: | ||
| 348 | bb.fatal('Empty test suite, please verify TEST_SUITES variable') | ||
| 349 | |||
| 350 | tc.loadTests(test_paths, modules=test_modules) | ||
| 351 | |||
| 352 | suitecases = getSuiteCases(tc.suites) | ||
| 353 | if not suitecases: | ||
| 354 | bb.fatal('Empty test suite, please verify TEST_SUITES variable') | ||
| 355 | else: | ||
| 356 | bb.debug(2, 'test suites:\n\t%s' % '\n\t'.join([str(c) for c in suitecases])) | ||
| 357 | |||
| 358 | package_extraction(d, tc.suites) | ||
| 359 | |||
| 360 | results = None | ||
| 361 | complete = False | ||
| 362 | orig_sigterm_handler = signal.signal(signal.SIGTERM, sigterm_exception) | ||
| 363 | try: | ||
| 364 | # We need to check if runqemu ends unexpectedly | ||
| 365 | # or if the worker send us a SIGTERM | ||
| 366 | tc.target.start(params=d.getVar("TEST_QEMUPARAMS"), runqemuparams=d.getVar("TEST_RUNQEMUPARAMS")) | ||
| 367 | import threading | ||
| 368 | try: | ||
| 369 | threading.Timer(int(d.getVar("TEST_OVERALL_TIMEOUT")), handle_test_timeout, (int(d.getVar("TEST_OVERALL_TIMEOUT")),)).start() | ||
| 370 | except ValueError: | ||
| 371 | pass | ||
| 372 | results = tc.runTests() | ||
| 373 | complete = True | ||
| 374 | if results.hasAnyFailingTest(): | ||
| 375 | run_failed_tests_post_actions(d, tc) | ||
| 376 | except (KeyboardInterrupt, BlockingIOError) as err: | ||
| 377 | if isinstance(err, KeyboardInterrupt): | ||
| 378 | bb.error('testimage interrupted, shutting down...') | ||
| 379 | else: | ||
| 380 | bb.error('runqemu failed, shutting down...') | ||
| 381 | if results: | ||
| 382 | results.stop() | ||
| 383 | finally: | ||
| 384 | signal.signal(signal.SIGTERM, orig_sigterm_handler) | ||
| 385 | tc.target.stop() | ||
| 386 | |||
| 387 | # Show results (if we have them) | ||
| 388 | if results: | ||
| 389 | configuration = get_testimage_configuration(d, 'runtime', machine) | ||
| 390 | results.logDetails(get_json_result_dir(d), | ||
| 391 | configuration, | ||
| 392 | get_testimage_result_id(configuration), | ||
| 393 | dump_streams=d.getVar('TESTREPORT_FULLLOGS')) | ||
| 394 | results.logSummary(pn) | ||
| 395 | |||
| 396 | # Copy additional logs to tmp/log/oeqa so it's easier to find them | ||
| 397 | targetdir = os.path.join(get_json_result_dir(d), d.getVar("PN")) | ||
| 398 | os.makedirs(targetdir, exist_ok=True) | ||
| 399 | os.symlink(bootlog, os.path.join(targetdir, os.path.basename(bootlog))) | ||
| 400 | os.symlink(d.getVar("BB_LOGFILE"), os.path.join(targetdir, os.path.basename(d.getVar("BB_LOGFILE") + "." + d.getVar('DATETIME')))) | ||
| 401 | |||
| 402 | if not results or not complete: | ||
| 403 | bb.error('%s - FAILED - tests were interrupted during execution, check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True) | ||
| 404 | if results and not results.wasSuccessful(): | ||
| 405 | bb.error('%s - FAILED - also check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True) | ||
| 406 | |||
| 407 | def get_runtime_paths(d): | ||
| 408 | """ | ||
| 409 | Returns a list of paths where runtime test must reside. | ||
| 410 | |||
| 411 | Runtime tests are expected in <LAYER_DIR>/lib/oeqa/runtime/cases/ | ||
| 412 | """ | ||
| 413 | paths = [] | ||
| 414 | |||
| 415 | for layer in d.getVar('BBLAYERS').split(): | ||
| 416 | path = os.path.join(layer, 'lib/oeqa/runtime/cases') | ||
| 417 | if os.path.isdir(path): | ||
| 418 | paths.append(path) | ||
| 419 | return paths | ||
| 420 | |||
| 421 | def create_index(arg): | ||
| 422 | import subprocess | ||
| 423 | |||
| 424 | index_cmd = arg | ||
| 425 | try: | ||
| 426 | bb.note("Executing '%s' ..." % index_cmd) | ||
| 427 | result = subprocess.check_output(index_cmd, | ||
| 428 | stderr=subprocess.STDOUT, | ||
| 429 | shell=True) | ||
| 430 | result = result.decode('utf-8') | ||
| 431 | except subprocess.CalledProcessError as e: | ||
| 432 | return("Index creation command '%s' failed with return code " | ||
| 433 | '%d:\n%s' % (e.cmd, e.returncode, e.output.decode("utf-8"))) | ||
| 434 | if result: | ||
| 435 | bb.note(result) | ||
| 436 | return None | ||
| 437 | |||
| 438 | def create_rpm_index(d): | ||
| 439 | import glob | ||
| 440 | # Index RPMs | ||
| 441 | rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo_c") | ||
| 442 | index_cmds = [] | ||
| 443 | archs = (d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or '').replace('-', '_') | ||
| 444 | |||
| 445 | for arch in archs.split(): | ||
| 446 | rpm_dir = os.path.join(d.getVar('DEPLOY_DIR_RPM'), arch) | ||
| 447 | idx_path = os.path.join(d.getVar('WORKDIR'), 'oe-testimage-repo', arch) | ||
| 448 | |||
| 449 | if not os.path.isdir(rpm_dir): | ||
| 450 | continue | ||
| 451 | |||
| 452 | lockfilename = os.path.join(d.getVar('DEPLOY_DIR_RPM'), 'rpm.lock') | ||
| 453 | lf = bb.utils.lockfile(lockfilename, False) | ||
| 454 | oe.path.copyhardlinktree(rpm_dir, idx_path) | ||
| 455 | # Full indexes overload a 256MB image so reduce the number of rpms | ||
| 456 | # in the feed by filtering to specific packages needed by the tests. | ||
| 457 | package_list = glob.glob(idx_path + "*/*.rpm") | ||
| 458 | |||
| 459 | for pkg in package_list: | ||
| 460 | if not os.path.basename(pkg).startswith(("dnf-test-", "busybox", "update-alternatives", "libc6", "musl")): | ||
| 461 | bb.utils.remove(pkg) | ||
| 462 | |||
| 463 | bb.utils.unlockfile(lf) | ||
| 464 | cmd = '%s --update -q %s' % (rpm_createrepo, idx_path) | ||
| 465 | |||
| 466 | # Create repodata | ||
| 467 | result = create_index(cmd) | ||
| 468 | if result: | ||
| 469 | bb.fatal('%s' % ('\n'.join(result))) | ||
| 470 | |||
| 471 | def package_extraction(d, test_suites): | ||
| 472 | from oeqa.utils.package_manager import find_packages_to_extract | ||
| 473 | from oeqa.utils.package_manager import extract_packages | ||
| 474 | |||
| 475 | bb.utils.remove(d.getVar("TEST_NEEDED_PACKAGES_DIR"), recurse=True) | ||
| 476 | packages = find_packages_to_extract(test_suites) | ||
| 477 | if packages: | ||
| 478 | bb.utils.mkdirhier(d.getVar("TEST_INSTALL_TMP_DIR")) | ||
| 479 | bb.utils.mkdirhier(d.getVar("TEST_PACKAGED_DIR")) | ||
| 480 | bb.utils.mkdirhier(d.getVar("TEST_EXTRACTED_DIR")) | ||
| 481 | extract_packages(d, packages) | ||
| 482 | |||
| 483 | testimage_main[vardepsexclude] += "BB_ORIGENV DATETIME" | ||
| 484 | |||
| 485 | python () { | ||
| 486 | if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"): | ||
| 487 | bb.build.addtask("testimage", "do_build", "do_image_complete", d) | ||
| 488 | } | ||
diff --git a/meta/classes-recipe/testsdk.bbclass b/meta/classes-recipe/testsdk.bbclass deleted file mode 100644 index b1c4fa67e6..0000000000 --- a/meta/classes-recipe/testsdk.bbclass +++ /dev/null | |||
| @@ -1,56 +0,0 @@ | |||
| 1 | # Copyright (C) 2013 - 2016 Intel Corporation | ||
| 2 | # | ||
| 3 | # SPDX-License-Identifier: MIT | ||
| 4 | |||
| 5 | # testsdk.bbclass enables testing for SDK and Extensible SDK | ||
| 6 | # | ||
| 7 | # To run SDK tests, run the commands: | ||
| 8 | # $ bitbake <image-name> -c populate_sdk | ||
| 9 | # $ bitbake <image-name> -c testsdk | ||
| 10 | # | ||
| 11 | # To run eSDK tests, run the commands: | ||
| 12 | # $ bitbake <image-name> -c populate_sdk_ext | ||
| 13 | # $ bitbake <image-name> -c testsdkext | ||
| 14 | # | ||
| 15 | # where "<image-name>" is an image like core-image-sato. | ||
| 16 | |||
| 17 | # List of test modules to run, or run all that can be found if unset | ||
| 18 | TESTSDK_SUITES ?= "" | ||
| 19 | |||
| 20 | TESTSDK_CLASS_NAME ?= "oeqa.sdk.testsdk.TestSDK" | ||
| 21 | TESTSDKEXT_CLASS_NAME ?= "oeqa.sdkext.testsdk.TestSDKExt" | ||
| 22 | TESTSDK_CASE_DIRS ?= "sdk" | ||
| 23 | |||
| 24 | def import_and_run(name, d): | ||
| 25 | import importlib | ||
| 26 | |||
| 27 | class_name = d.getVar(name) | ||
| 28 | if class_name: | ||
| 29 | module, cls = class_name.rsplit('.', 1) | ||
| 30 | m = importlib.import_module(module) | ||
| 31 | c = getattr(m, cls)() | ||
| 32 | c.run(d) | ||
| 33 | else: | ||
| 34 | bb.warn('No tests were run because %s did not define a class' % name) | ||
| 35 | |||
| 36 | import_and_run[vardepsexclude] = "DATETIME BB_ORIGENV" | ||
| 37 | |||
| 38 | python do_testsdk() { | ||
| 39 | import_and_run('TESTSDK_CLASS_NAME', d) | ||
| 40 | } | ||
| 41 | addtask testsdk | ||
| 42 | do_testsdk[nostamp] = "1" | ||
| 43 | do_testsdk[network] = "1" | ||
| 44 | |||
| 45 | python do_testsdkext() { | ||
| 46 | import_and_run('TESTSDKEXT_CLASS_NAME', d) | ||
| 47 | } | ||
| 48 | addtask testsdkext | ||
| 49 | do_testsdkext[nostamp] = "1" | ||
| 50 | do_testsdkext[network] = "1" | ||
| 51 | |||
| 52 | python () { | ||
| 53 | if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"): | ||
| 54 | bb.build.addtask("testsdk", None, "do_populate_sdk", d) | ||
| 55 | bb.build.addtask("testsdkext", None, "do_populate_sdk_ext", d) | ||
| 56 | } | ||
diff --git a/meta/classes-recipe/texinfo.bbclass b/meta/classes-recipe/texinfo.bbclass deleted file mode 100644 index 380247faf5..0000000000 --- a/meta/classes-recipe/texinfo.bbclass +++ /dev/null | |||
| @@ -1,24 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # This class is inherited by recipes whose upstream packages invoke the | ||
| 8 | # texinfo utilities at build-time. Native and cross recipes are made to use the | ||
| 9 | # dummy scripts provided by texinfo-dummy-native, for improved performance. | ||
| 10 | # Target architecture recipes use the genuine Texinfo utilities. By default, | ||
| 11 | # they use the Texinfo utilities on the host system. If you want to use the | ||
| 12 | # Texinfo recipe, you can remove texinfo-native from ASSUME_PROVIDED and | ||
| 13 | # makeinfo from SANITY_REQUIRED_UTILITIES. | ||
| 14 | |||
| 15 | TEXDEP = "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'texinfo-replacement-native', 'texinfo-dummy-native', d)}" | ||
| 16 | TEXDEP:class-native = "texinfo-dummy-native" | ||
| 17 | TEXDEP:class-cross = "texinfo-dummy-native" | ||
| 18 | TEXDEP:class-crosssdk = "texinfo-dummy-native" | ||
| 19 | TEXDEP:class-cross-canadian = "texinfo-dummy-native" | ||
| 20 | DEPENDS:append = " ${TEXDEP}" | ||
| 21 | |||
| 22 | # libtool-cross doesn't inherit cross | ||
| 23 | TEXDEP:pn-libtool-cross = "texinfo-dummy-native" | ||
| 24 | |||
diff --git a/meta/classes-recipe/toolchain-scripts-base.bbclass b/meta/classes-recipe/toolchain-scripts-base.bbclass deleted file mode 100644 index d24a986e02..0000000000 --- a/meta/classes-recipe/toolchain-scripts-base.bbclass +++ /dev/null | |||
| @@ -1,17 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | #This function create a version information file | ||
| 8 | toolchain_create_sdk_version () { | ||
| 9 | local versionfile=$1 | ||
| 10 | rm -f $versionfile | ||
| 11 | touch $versionfile | ||
| 12 | echo 'Distro: ${DISTRO}' >> $versionfile | ||
| 13 | echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile | ||
| 14 | echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile | ||
| 15 | echo 'Timestamp: ${DATETIME}' >> $versionfile | ||
| 16 | } | ||
| 17 | toolchain_create_sdk_version[vardepsexclude] = "DATETIME" | ||
diff --git a/meta/classes-recipe/toolchain-scripts.bbclass b/meta/classes-recipe/toolchain-scripts.bbclass deleted file mode 100644 index 8c062ef0e7..0000000000 --- a/meta/classes-recipe/toolchain-scripts.bbclass +++ /dev/null | |||
| @@ -1,249 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | inherit toolchain-scripts-base siteinfo kernel-arch meson-routines | ||
| 8 | |||
| 9 | # We want to be able to change the value of MULTIMACH_TARGET_SYS, because it | ||
| 10 | # doesn't always match our expectations... but we default to the stock value | ||
| 11 | REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}" | ||
| 12 | TARGET_CC_ARCH:append:toolchain-gcc:libc-musl = " -mmusl" | ||
| 13 | |||
| 14 | # default debug prefix map isn't valid in the SDK | ||
| 15 | DEBUG_PREFIX_MAP = "" | ||
| 16 | |||
| 17 | EXPORT_SDK_PS1 = "${@ 'export PS1=\\"%s\\"' % d.getVar('SDK_PS1') if d.getVar('SDK_PS1') else ''}" | ||
| 18 | |||
| 19 | def siteinfo_with_prefix(d, prefix): | ||
| 20 | # Return a prefixed value from siteinfo | ||
| 21 | for item in siteinfo_data_for_machine(d.getVar("TARGET_ARCH"), d.getVar("TARGET_OS"), d): | ||
| 22 | if item.startswith(prefix): | ||
| 23 | return item.replace(prefix, "") | ||
| 24 | raise KeyError | ||
| 25 | |||
| 26 | # This function creates an environment-setup-script for use in a deployable SDK | ||
| 27 | toolchain_create_sdk_env_script () { | ||
| 28 | # Create environment setup script. Remember that $SDKTARGETSYSROOT should | ||
| 29 | # only be expanded on the target at runtime. | ||
| 30 | base_sbindir=${10:-${base_sbindir_nativesdk}} | ||
| 31 | base_bindir=${9:-${base_bindir_nativesdk}} | ||
| 32 | sbindir=${8:-${sbindir_nativesdk}} | ||
| 33 | sdkpathnative=${7:-${SDKPATHNATIVE}} | ||
| 34 | prefix=${6:-${prefix_nativesdk}} | ||
| 35 | bindir=${5:-${bindir_nativesdk}} | ||
| 36 | libdir=${4:-${libdir}} | ||
| 37 | sysroot=${3:-${SDKTARGETSYSROOT}} | ||
| 38 | multimach_target_sys=${2:-${REAL_MULTIMACH_TARGET_SYS}} | ||
| 39 | script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-$multimach_target_sys} | ||
| 40 | rm -f $script | ||
| 41 | touch $script | ||
| 42 | |||
| 43 | echo '# Check for LD_LIBRARY_PATH being set, which can break SDK and generally is a bad practice' >> $script | ||
| 44 | echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script | ||
| 45 | echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script | ||
| 46 | echo '# Only disable this check if you are absolutely know what you are doing!' >> $script | ||
| 47 | echo 'if [ ! -z "${LD_LIBRARY_PATH:-}" ]; then' >> $script | ||
| 48 | echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script | ||
| 49 | echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script | ||
| 50 | echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script | ||
| 51 | echo ' echo "For more references see:"' >> $script | ||
| 52 | echo ' echo " http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80"' >> $script | ||
| 53 | echo ' echo " http://xahlee.info/UnixResource_dir/_/ldpath.html"' >> $script | ||
| 54 | echo ' return 1' >> $script | ||
| 55 | echo 'fi' >> $script | ||
| 56 | |||
| 57 | echo "${EXPORT_SDK_PS1}" >> $script | ||
| 58 | echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script | ||
| 59 | EXTRAPATH="" | ||
| 60 | for i in ${CANADIANEXTRAOS}; do | ||
| 61 | EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i" | ||
| 62 | done | ||
| 63 | echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':"$PATH"' >> $script | ||
| 64 | echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script | ||
| 65 | echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script | ||
| 66 | echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script | ||
| 67 | echo "export OECORE_NATIVE_SYSROOT=\"$sdkpathnative\"" >> $script | ||
| 68 | echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script | ||
| 69 | echo "export OECORE_ACLOCAL_OPTS=\"-I $sdkpathnative/usr/share/aclocal\"" >> $script | ||
| 70 | echo 'export OECORE_BASELIB="${baselib}"' >> $script | ||
| 71 | echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script | ||
| 72 | echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script | ||
| 73 | echo 'export OECORE_TARGET_BITS="${@siteinfo_with_prefix(d, 'bit-')}"' >>$script | ||
| 74 | echo 'export OECORE_TARGET_ENDIAN="${@siteinfo_with_prefix(d, 'endian-')}"' >>$script | ||
| 75 | echo 'export OECORE_MESON_HOST_SYSTEM="${@meson_operating_system('TARGET_OS', d)}"' >>$script | ||
| 76 | echo 'export OECORE_MESON_HOST_CPU_FAMILY="${@meson_cpu_family('TARGET_ARCH', d)}"' >>$script | ||
| 77 | echo 'export OECORE_MESON_HOST_CPU="${TARGET_ARCH}"' >>$script | ||
| 78 | echo 'export OECORE_MESON_HOST_ENDIAN="${@meson_endian('TARGET', d)}"' >>$script | ||
| 79 | |||
| 80 | echo 'unset command_not_found_handle' >> $script | ||
| 81 | |||
| 82 | toolchain_shared_env_script | ||
| 83 | } | ||
| 84 | |||
| 85 | # This function creates an environment-setup-script in B which enables | ||
| 86 | # a OE-core IDE to integrate with the build tree | ||
| 87 | # Caller must ensure CONFIG_SITE is setup | ||
| 88 | toolchain_create_tree_env_script () { | ||
| 89 | script=${B}/environment-setup-${REAL_MULTIMACH_TARGET_SYS} | ||
| 90 | bitbakedir=$(readlink -f ${BITBAKEPATH}/..) | ||
| 91 | rm -f $script | ||
| 92 | touch $script | ||
| 93 | echo 'standalone_sysroot_target="${STAGING_DIR}/${MACHINE}"' >> $script | ||
| 94 | echo 'standalone_sysroot_native="${STAGING_DIR}/${BUILD_ARCH}"' >> $script | ||
| 95 | echo "orig=`pwd`; cd ${COREBASE}; set ${TOPDIR} $bitbakedir; . ./oe-init-build-env; cd \$orig" >> $script | ||
| 96 | echo 'export PATH=$standalone_sysroot_native/${bindir_native}:$standalone_sysroot_native/${bindir_native}/${TARGET_SYS}:$PATH' >> $script | ||
| 97 | echo 'export PKG_CONFIG_SYSROOT_DIR=$standalone_sysroot_target' >> $script | ||
| 98 | echo 'export PKG_CONFIG_PATH=$standalone_sysroot_target'"$libdir"'/pkgconfig:$standalone_sysroot_target'"$prefix"'/share/pkgconfig' >> $script | ||
| 99 | echo 'export CONFIG_SITE="${CONFIG_SITE}"' >> $script | ||
| 100 | echo 'export SDKTARGETSYSROOT=$standalone_sysroot_target' >> $script | ||
| 101 | echo 'export OECORE_NATIVE_SYSROOT=$standalone_sysroot_native' >> $script | ||
| 102 | echo 'export OECORE_TARGET_SYSROOT=$standalone_sysroot_target' >> $script | ||
| 103 | echo 'export OECORE_ACLOCAL_OPTS="-I $standalone_sysroot_native/usr/share/aclocal"' >> $script | ||
| 104 | echo 'export OECORE_BASELIB="${baselib}"' >> $script | ||
| 105 | echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script | ||
| 106 | echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script | ||
| 107 | echo 'export OECORE_TARGET_BITS="${@siteinfo_with_prefix(d, 'bit-')}"' >>$script | ||
| 108 | echo 'export OECORE_TARGET_ENDIAN="${@siteinfo_with_prefix(d, 'endian-')}"' >>$script | ||
| 109 | echo 'export OECORE_MESON_HOST_SYSTEM="${@meson_operating_system('TARGET_OS', d)}"' >>$script | ||
| 110 | echo 'export OECORE_MESON_HOST_CPU_FAMILY="${@meson_cpu_family('TARGET_ARCH', d)}"' >>$script | ||
| 111 | echo 'export OECORE_MESON_HOST_CPU="${TARGET_ARCH}"' >>$script | ||
| 112 | echo 'export OECORE_MESON_HOST_ENDIAN="${@meson_endian('TARGET', d)}"' >>$script | ||
| 113 | |||
| 114 | toolchain_shared_env_script | ||
| 115 | |||
| 116 | cat >> $script <<EOF | ||
| 117 | |||
| 118 | if [ -d "\$OECORE_NATIVE_SYSROOT/${datadir}/post-relocate-setup.d/" ]; then | ||
| 119 | for s in \$OECORE_NATIVE_SYSROOT/${datadir}/post-relocate-setup.d/*; do | ||
| 120 | if [ ! -x \$s ]; then | ||
| 121 | continue | ||
| 122 | fi | ||
| 123 | \$s "\$1" | ||
| 124 | status=\$? | ||
| 125 | if [ \$status != 0 ]; then | ||
| 126 | echo "post-relocate command \"\$s \$1\" failed with status \$status" >&2 | ||
| 127 | exit \$status | ||
| 128 | fi | ||
| 129 | done | ||
| 130 | fi | ||
| 131 | EOF | ||
| 132 | } | ||
| 133 | |||
| 134 | toolchain_shared_env_script () { | ||
| 135 | echo 'export CC="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script | ||
| 136 | echo 'export CXX="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script | ||
| 137 | echo 'export CPP="${TARGET_PREFIX}gcc -E ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script | ||
| 138 | echo 'export AS="${TARGET_PREFIX}as ${TARGET_AS_ARCH}"' >> $script | ||
| 139 | echo 'export LD="${TARGET_PREFIX}ld ${TARGET_LD_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script | ||
| 140 | echo 'export GDB=${TARGET_PREFIX}gdb' >> $script | ||
| 141 | echo 'export STRIP=${TARGET_PREFIX}strip' >> $script | ||
| 142 | echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script | ||
| 143 | echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script | ||
| 144 | echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script | ||
| 145 | echo 'export READELF=${TARGET_PREFIX}readelf' >> $script | ||
| 146 | echo 'export AR=${TARGET_PREFIX}ar' >> $script | ||
| 147 | echo 'export NM=${TARGET_PREFIX}nm' >> $script | ||
| 148 | echo 'export M4=m4' >> $script | ||
| 149 | echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script | ||
| 150 | echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=$SDKTARGETSYSROOT"' >> $script | ||
| 151 | echo 'export CFLAGS="${TARGET_CFLAGS}"' >> $script | ||
| 152 | echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script | ||
| 153 | echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script | ||
| 154 | echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script | ||
| 155 | echo 'export KCFLAGS="--sysroot=$SDKTARGETSYSROOT"' >> $script | ||
| 156 | echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script | ||
| 157 | echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script | ||
| 158 | echo 'export ARCH=${ARCH}' >> $script | ||
| 159 | echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script | ||
| 160 | echo 'export OECORE_TUNE_CCARGS="${TUNE_CCARGS}"' >> $script | ||
| 161 | |||
| 162 | cat >> $script <<EOF | ||
| 163 | |||
| 164 | # Append environment subscripts | ||
| 165 | if [ -d "\$OECORE_TARGET_SYSROOT/environment-setup.d" ]; then | ||
| 166 | for envfile in \$OECORE_TARGET_SYSROOT/environment-setup.d/*.sh; do | ||
| 167 | . \$envfile | ||
| 168 | done | ||
| 169 | fi | ||
| 170 | if [ -d "\$OECORE_NATIVE_SYSROOT/environment-setup.d" ]; then | ||
| 171 | for envfile in \$OECORE_NATIVE_SYSROOT/environment-setup.d/*.sh; do | ||
| 172 | . \$envfile | ||
| 173 | done | ||
| 174 | fi | ||
| 175 | EOF | ||
| 176 | } | ||
| 177 | |||
| 178 | toolchain_create_post_relocate_script() { | ||
| 179 | relocate_script=$1 | ||
| 180 | env_dir=$2 | ||
| 181 | rm -f $relocate_script | ||
| 182 | touch $relocate_script | ||
| 183 | |||
| 184 | cat >> $relocate_script <<EOF | ||
| 185 | if [ -d "${SDKPATHNATIVE}/post-relocate-setup.d/" ]; then | ||
| 186 | # Source top-level SDK env scripts in case they are needed for the relocate | ||
| 187 | # scripts. | ||
| 188 | for env_setup_script in ${env_dir}/environment-setup-*; do | ||
| 189 | . \$env_setup_script | ||
| 190 | status=\$? | ||
| 191 | if [ \$status != 0 ]; then | ||
| 192 | echo "\$0: Failed to source \$env_setup_script with status \$status" | ||
| 193 | exit \$status | ||
| 194 | fi | ||
| 195 | |||
| 196 | for s in ${SDKPATHNATIVE}/post-relocate-setup.d/*; do | ||
| 197 | if [ ! -x \$s ]; then | ||
| 198 | continue | ||
| 199 | fi | ||
| 200 | \$s "\$1" | ||
| 201 | status=\$? | ||
| 202 | if [ \$status != 0 ]; then | ||
| 203 | echo "post-relocate command \"\$s \$1\" failed with status \$status" >&2 | ||
| 204 | exit \$status | ||
| 205 | fi | ||
| 206 | done | ||
| 207 | done | ||
| 208 | rm -rf "${SDKPATHNATIVE}/post-relocate-setup.d" | ||
| 209 | fi | ||
| 210 | EOF | ||
| 211 | } | ||
| 212 | |||
| 213 | #we get the cached site config in the runtime | ||
| 214 | TOOLCHAIN_CONFIGSITE_NOCACHE = "${@' '.join(siteinfo_get_files(d)[0])}" | ||
| 215 | TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses" | ||
| 216 | DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}" | ||
| 217 | |||
| 218 | #This function create a site config file | ||
| 219 | toolchain_create_sdk_siteconfig () { | ||
| 220 | local siteconfig=$1 | ||
| 221 | |||
| 222 | rm -f $siteconfig | ||
| 223 | touch $siteconfig | ||
| 224 | |||
| 225 | for sitefile in ${TOOLCHAIN_CONFIGSITE_NOCACHE} ; do | ||
| 226 | cat $sitefile >> $siteconfig | ||
| 227 | done | ||
| 228 | |||
| 229 | #get cached site config | ||
| 230 | for sitefile in ${TOOLCHAIN_NEED_CONFIGSITE_CACHE}; do | ||
| 231 | # Resolve virtual/* names to the real recipe name using sysroot-providers info | ||
| 232 | case $sitefile in virtual/*) | ||
| 233 | sitefile=`echo $sitefile | tr / _` | ||
| 234 | sitefile=`cat ${STAGING_DIR_TARGET}/sysroot-providers/$sitefile` | ||
| 235 | esac | ||
| 236 | done | ||
| 237 | } | ||
| 238 | |||
| 239 | python __anonymous () { | ||
| 240 | import oe.classextend | ||
| 241 | deps = "" | ||
| 242 | prefixes = (d.getVar("MULTILIB_VARIANTS") or "").split() | ||
| 243 | for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE') or "").split(): | ||
| 244 | deps += " %s:do_populate_sysroot" % dep | ||
| 245 | for variant in (d.getVar('MULTILIB_VARIANTS') or "").split(): | ||
| 246 | newdep = oe.classextend.add_suffix(dep, variant, prefixes) | ||
| 247 | deps += " %s:do_populate_sysroot" % newdep | ||
| 248 | d.appendVarFlag('do_configure', 'depends', deps) | ||
| 249 | } | ||
diff --git a/meta/classes-recipe/uboot-config.bbclass b/meta/classes-recipe/uboot-config.bbclass deleted file mode 100644 index fd6c045142..0000000000 --- a/meta/classes-recipe/uboot-config.bbclass +++ /dev/null | |||
| @@ -1,167 +0,0 @@ | |||
| 1 | # Handle U-Boot config for a machine | ||
| 2 | # | ||
| 3 | # The format to specify it, in the machine, is: | ||
| 4 | # | ||
| 5 | # UBOOT_CONFIG ??= <default> | ||
| 6 | # UBOOT_CONFIG[foo] = "config,images,binary" | ||
| 7 | # | ||
| 8 | # or | ||
| 9 | # | ||
| 10 | # UBOOT_MACHINE = "config" | ||
| 11 | # | ||
| 12 | # Copyright 2013, 2014 (C) O.S. Systems Software LTDA. | ||
| 13 | # | ||
| 14 | # SPDX-License-Identifier: MIT | ||
| 15 | |||
| 16 | |||
| 17 | def removesuffix(s, suffix): | ||
| 18 | if suffix and s.endswith(suffix): | ||
| 19 | return s[:-len(suffix)] | ||
| 20 | return s | ||
| 21 | |||
| 22 | UBOOT_ENTRYPOINT ?= "0x20008000" | ||
| 23 | UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}" | ||
| 24 | |||
| 25 | # When naming the files we install/deploy, the package version and revision | ||
| 26 | # are part of the filename. Create a single variable to represent this and | ||
| 27 | # allow it to be customized if desired. | ||
| 28 | UBOOT_VERSION ?= "${PV}-${PR}" | ||
| 29 | |||
| 30 | # Some versions of u-boot use .bin and others use .img. By default use .bin | ||
| 31 | # but enable individual recipes to change this value. | ||
| 32 | UBOOT_SUFFIX ??= "bin" | ||
| 33 | UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}" | ||
| 34 | UBOOT_BINARYNAME ?= "${@os.path.splitext(d.getVar("UBOOT_BINARY"))[0]}" | ||
| 35 | UBOOT_IMAGE ?= "${UBOOT_BINARYNAME}-${MACHINE}-${UBOOT_VERSION}.${UBOOT_SUFFIX}" | ||
| 36 | UBOOT_SYMLINK ?= "${UBOOT_BINARYNAME}-${MACHINE}.${UBOOT_SUFFIX}" | ||
| 37 | UBOOT_MAKE_TARGET ?= "all" | ||
| 38 | UBOOT_MAKE_OPTS ?= "" | ||
| 39 | |||
| 40 | # Output the ELF generated. Some platforms can use the ELF file and directly | ||
| 41 | # load it (JTAG booting, QEMU) additionally the ELF can be used for debugging | ||
| 42 | # purposes. | ||
| 43 | UBOOT_ELF ?= "" | ||
| 44 | UBOOT_ELF_SUFFIX ?= "elf" | ||
| 45 | UBOOT_ELF_IMAGE ?= "u-boot-${MACHINE}-${UBOOT_VERSION}.${UBOOT_ELF_SUFFIX}" | ||
| 46 | UBOOT_ELF_BINARY ?= "u-boot.${UBOOT_ELF_SUFFIX}" | ||
| 47 | UBOOT_ELF_SYMLINK ?= "u-boot-${MACHINE}.${UBOOT_ELF_SUFFIX}" | ||
| 48 | |||
| 49 | # Some versions of u-boot build an SPL (Second Program Loader) image that | ||
| 50 | # should be packaged along with the u-boot binary as well as placed in the | ||
| 51 | # deploy directory. For those versions they can set the following variables | ||
| 52 | # to allow packaging the SPL. | ||
| 53 | SPL_SUFFIX ?= "" | ||
| 54 | SPL_BINARY ?= "" | ||
| 55 | SPL_DELIMITER ?= "${@'.' if d.getVar("SPL_SUFFIX") else ''}" | ||
| 56 | SPL_BINARYFILE ?= "${@os.path.basename(d.getVar("SPL_BINARY"))}" | ||
| 57 | SPL_BINARYNAME ?= "${@removesuffix(d.getVar("SPL_BINARYFILE"), "." + d.getVar("SPL_SUFFIX"))}" | ||
| 58 | SPL_IMAGE ?= "${SPL_BINARYNAME}-${MACHINE}-${UBOOT_VERSION}${SPL_DELIMITER}${SPL_SUFFIX}" | ||
| 59 | SPL_SYMLINK ?= "${SPL_BINARYNAME}-${MACHINE}${SPL_DELIMITER}${SPL_SUFFIX}" | ||
| 60 | |||
| 61 | # Additional environment variables or a script can be installed alongside | ||
| 62 | # u-boot to be used automatically on boot. This file, typically 'uEnv.txt' | ||
| 63 | # or 'boot.scr', should be packaged along with u-boot as well as placed in the | ||
| 64 | # deploy directory. Machine configurations needing one of these files should | ||
| 65 | # include it in the SRC_URI and set the UBOOT_ENV parameter. | ||
| 66 | UBOOT_ENV_SUFFIX ?= "txt" | ||
| 67 | UBOOT_ENV ?= "" | ||
| 68 | UBOOT_ENV_SRC_SUFFIX ?= "cmd" | ||
| 69 | UBOOT_ENV_SRC ?= "${UBOOT_ENV}.${UBOOT_ENV_SRC_SUFFIX}" | ||
| 70 | UBOOT_ENV_BINARY ?= "${UBOOT_ENV}.${UBOOT_ENV_SUFFIX}" | ||
| 71 | UBOOT_ENV_IMAGE ?= "${UBOOT_ENV}-${MACHINE}-${UBOOT_VERSION}.${UBOOT_ENV_SUFFIX}" | ||
| 72 | UBOOT_ENV_SYMLINK ?= "${UBOOT_ENV}-${MACHINE}.${UBOOT_ENV_SUFFIX}" | ||
| 73 | |||
| 74 | # U-Boot EXTLINUX variables. U-Boot searches for /boot/extlinux/extlinux.conf | ||
| 75 | # to find EXTLINUX conf file. | ||
| 76 | UBOOT_EXTLINUX_INSTALL_DIR ?= "/boot/extlinux" | ||
| 77 | UBOOT_EXTLINUX_CONF_NAME ?= "extlinux.conf" | ||
| 78 | UBOOT_EXTLINUX_SYMLINK ?= "${UBOOT_EXTLINUX_CONF_NAME}-${MACHINE}-${UBOOT_VERSION}" | ||
| 79 | |||
| 80 | # Options for the device tree compiler passed to mkimage '-D' feature: | ||
| 81 | UBOOT_MKIMAGE_DTCOPTS ??= "" | ||
| 82 | SPL_MKIMAGE_DTCOPTS ??= "" | ||
| 83 | |||
| 84 | # mkimage command | ||
| 85 | UBOOT_MKIMAGE ?= "uboot-mkimage" | ||
| 86 | UBOOT_MKIMAGE_SIGN ?= "${UBOOT_MKIMAGE}" | ||
| 87 | |||
| 88 | # Signature activation | ||
| 89 | UBOOT_SIGN_ENABLE ?= "0" | ||
| 90 | |||
| 91 | # Arguments passed to mkimage for signing | ||
| 92 | UBOOT_MKIMAGE_SIGN_ARGS ?= "" | ||
| 93 | SPL_MKIMAGE_SIGN_ARGS ?= "" | ||
| 94 | |||
| 95 | # Options to deploy the u-boot device tree | ||
| 96 | UBOOT_DTB ?= "" | ||
| 97 | UBOOT_DTB_BINARY ??= "" | ||
| 98 | |||
| 99 | # uboot-fit_check_sign command | ||
| 100 | UBOOT_FIT_CHECK_SIGN ?= "uboot-fit_check_sign" | ||
| 101 | |||
| 102 | python () { | ||
| 103 | ubootmachine = d.getVar("UBOOT_MACHINE") | ||
| 104 | ubootconfigflags = d.getVarFlags('UBOOT_CONFIG') | ||
| 105 | ubootbinary = d.getVar('UBOOT_BINARY') | ||
| 106 | ubootbinaries = d.getVar('UBOOT_BINARIES') | ||
| 107 | ubootconfigmakeopts = d.getVar('UBOOT_CONFIG_MAKE_OPTS') | ||
| 108 | # The "doc" varflag is special, we don't want to see it here | ||
| 109 | ubootconfigflags.pop('doc', None) | ||
| 110 | ubootconfig = (d.getVar('UBOOT_CONFIG') or "").split() | ||
| 111 | recipename = d.getVar("PN") | ||
| 112 | |||
| 113 | if not ubootmachine and not ubootconfig: | ||
| 114 | FILE = os.path.basename(d.getVar("FILE")) | ||
| 115 | bb.debug(1, "To build %s, see %s for instructions on \ | ||
| 116 | setting up your machine config" % (recipename, FILE)) | ||
| 117 | raise bb.parse.SkipRecipe("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE")) | ||
| 118 | |||
| 119 | if ubootmachine and ubootconfig: | ||
| 120 | raise bb.parse.SkipRecipe("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.") | ||
| 121 | |||
| 122 | if ubootconfigflags and ubootbinaries: | ||
| 123 | raise bb.parse.SkipRecipe("You cannot use UBOOT_BINARIES as it is internal to uboot_config.bbclass.") | ||
| 124 | |||
| 125 | if ubootconfigflags and ubootconfigmakeopts: | ||
| 126 | raise bb.parse.SkipRecipe("You cannot use UBOOT_CONFIG_MAKE_OPTS as it is internal to uboot_config.bbclass.") | ||
| 127 | |||
| 128 | if len(ubootconfig) > 0: | ||
| 129 | for config in ubootconfig: | ||
| 130 | found = False | ||
| 131 | for f, v in ubootconfigflags.items(): | ||
| 132 | if config == f: | ||
| 133 | found = True | ||
| 134 | items = v.split(',') | ||
| 135 | if items[0] and len(items) > 4: | ||
| 136 | raise bb.parse.SkipRecipe('Only config,images,binary,make_opts can be specified!') | ||
| 137 | d.appendVar('UBOOT_MACHINE', ' ' + items[0]) | ||
| 138 | # IMAGE_FSTYPES appending | ||
| 139 | if len(items) > 1 and items[1]: | ||
| 140 | bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1]) | ||
| 141 | d.appendVar('IMAGE_FSTYPES', ' ' + items[1]) | ||
| 142 | if len(items) > 2 and items[2]: | ||
| 143 | bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % items[2]) | ||
| 144 | d.appendVar('UBOOT_BINARIES', ' ' + items[2]) | ||
| 145 | else: | ||
| 146 | bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % ubootbinary) | ||
| 147 | d.appendVar('UBOOT_BINARIES', ' ' + ubootbinary) | ||
| 148 | if len(items) > 3 and items[3]: | ||
| 149 | bb.debug(1, "Appending '%s' to UBOOT_CONFIG_MAKE_OPTS." % items[3]) | ||
| 150 | d.appendVar('UBOOT_CONFIG_MAKE_OPTS', items[3] + " ? ") | ||
| 151 | else: | ||
| 152 | bb.debug(1, "Appending '%s' to UBOOT_CONFIG_MAKE_OPTS." % "") | ||
| 153 | d.appendVar('UBOOT_CONFIG_MAKE_OPTS', " ? ") | ||
| 154 | break | ||
| 155 | |||
| 156 | if not found: | ||
| 157 | raise bb.parse.SkipRecipe("The selected UBOOT_CONFIG key %s has no match in %s." % (ubootconfig, ubootconfigflags.keys())) | ||
| 158 | |||
| 159 | # This recipe might be inherited e.g. by the kernel recipe via kernel-fitimage.bbclass | ||
| 160 | # Ensure the uboot specific menuconfig settings do not leak into other recipes | ||
| 161 | if 'u-boot' in recipename: | ||
| 162 | if len(ubootconfig) == 1: | ||
| 163 | d.setVar('KCONFIG_CONFIG_ROOTDIR', os.path.join("${B}", d.getVar("UBOOT_MACHINE").strip())) | ||
| 164 | else: | ||
| 165 | # Disable menuconfig for multiple configs | ||
| 166 | d.setVar('KCONFIG_CONFIG_ENABLE_MENUCONFIG', "false") | ||
| 167 | } | ||
diff --git a/meta/classes-recipe/uboot-extlinux-config.bbclass b/meta/classes-recipe/uboot-extlinux-config.bbclass deleted file mode 100644 index 099476f5d6..0000000000 --- a/meta/classes-recipe/uboot-extlinux-config.bbclass +++ /dev/null | |||
| @@ -1,169 +0,0 @@ | |||
| 1 | # uboot-extlinux-config.bbclass | ||
| 2 | # | ||
| 3 | # This class allow the extlinux.conf generation for U-Boot use. The | ||
| 4 | # U-Boot support for it is given to allow the Generic Distribution | ||
| 5 | # Configuration specification use by OpenEmbedded-based products. | ||
| 6 | # | ||
| 7 | # External variables: | ||
| 8 | # | ||
| 9 | # UBOOT_EXTLINUX - Set to "1" to enable generation | ||
| 10 | # of extlinux.conf using this class. | ||
| 11 | # UBOOT_EXTLINUX_CONSOLE - Set to "console=ttyX" to change kernel boot | ||
| 12 | # default console. | ||
| 13 | # UBOOT_EXTLINUX_LABELS - A list of targets for the automatic config. | ||
| 14 | # UBOOT_EXTLINUX_KERNEL_ARGS - Add additional kernel arguments. | ||
| 15 | # UBOOT_EXTLINUX_KERNEL_IMAGE - Kernel image name. | ||
| 16 | # UBOOT_EXTLINUX_FDTDIR - Device tree directory. | ||
| 17 | # UBOOT_EXTLINUX_FDT - Device tree file. | ||
| 18 | # UBOOT_EXTLINUX_FDTOVERLAYS - Device tree overlay files. Space-separated list. | ||
| 19 | # UBOOT_EXTLINUX_INITRD - Indicates a list of filesystem images to | ||
| 20 | # concatenate and use as an initrd (optional). | ||
| 21 | # UBOOT_EXTLINUX_MENU_DESCRIPTION - Name to use as description. | ||
| 22 | # UBOOT_EXTLINUX_ROOT - Root kernel cmdline. | ||
| 23 | # UBOOT_EXTLINUX_TIMEOUT - Timeout before DEFAULT selection is made. | ||
| 24 | # Measured in 1/10 of a second. | ||
| 25 | # UBOOT_EXTLINUX_DEFAULT_LABEL - Target to be selected by default after | ||
| 26 | # the timeout period. | ||
| 27 | # UBOOT_EXTLINUX_MENU_TITLE - Menu title. If empty, MENU TITLE entry | ||
| 28 | # will not be added to the output file. | ||
| 29 | # UBOOT_EXTLINUX_CONFIG - Output file. | ||
| 30 | # | ||
| 31 | # If there's only one label system will boot automatically and menu won't be | ||
| 32 | # created. If you want to use more than one labels, e.g linux and alternate, | ||
| 33 | # use overrides to set menu description, console and others variables. | ||
| 34 | # | ||
| 35 | # Ex: | ||
| 36 | # | ||
| 37 | # UBOOT_EXTLINUX_LABELS ??= "default fallback" | ||
| 38 | # | ||
| 39 | # UBOOT_EXTLINUX_DEFAULT_LABEL ??= "Linux Default" | ||
| 40 | # UBOOT_EXTLINUX_TIMEOUT ??= "30" | ||
| 41 | # | ||
| 42 | # UBOOT_EXTLINUX_KERNEL_IMAGE:default ??= "../zImage" | ||
| 43 | # UBOOT_EXTLINUX_MENU_DESCRIPTION:default ??= "Linux Default" | ||
| 44 | # | ||
| 45 | # UBOOT_EXTLINUX_KERNEL_IMAGE:fallback ??= "../zImage-fallback" | ||
| 46 | # UBOOT_EXTLINUX_MENU_DESCRIPTION:fallback ??= "Linux Fallback" | ||
| 47 | # | ||
| 48 | # Results: | ||
| 49 | # | ||
| 50 | # menu title Select the boot mode | ||
| 51 | # TIMEOUT 30 | ||
| 52 | # DEFAULT Linux Default | ||
| 53 | # LABEL Linux Default | ||
| 54 | # KERNEL ../zImage | ||
| 55 | # FDTDIR ../ | ||
| 56 | # APPEND root=/dev/mmcblk2p2 rootwait rw console=${console} | ||
| 57 | # LABEL Linux Fallback | ||
| 58 | # KERNEL ../zImage-fallback | ||
| 59 | # FDTDIR ../ | ||
| 60 | # APPEND root=/dev/mmcblk2p2 rootwait rw console=${console} | ||
| 61 | # | ||
| 62 | # Copyright (C) 2016, O.S. Systems Software LTDA. All Rights Reserved | ||
| 63 | # SPDX-License-Identifier: MIT | ||
| 64 | # | ||
| 65 | # The kernel has an internal default console, which you can override with | ||
| 66 | # a console=...some_tty... | ||
| 67 | UBOOT_EXTLINUX_CONSOLE ??= "console=${console},${baudrate}" | ||
| 68 | UBOOT_EXTLINUX_LABELS ??= "linux" | ||
| 69 | UBOOT_EXTLINUX_FDT ??= "" | ||
| 70 | UBOOT_EXTLINUX_FDTOVERLAYS ??= "" | ||
| 71 | UBOOT_EXTLINUX_FDTDIR ??= "../" | ||
| 72 | UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}" | ||
| 73 | UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw" | ||
| 74 | UBOOT_EXTLINUX_MENU_DESCRIPTION:linux ??= "${DISTRO_NAME}" | ||
| 75 | UBOOT_EXTLINUX_MENU_TITLE ??= "Select the boot mode" | ||
| 76 | |||
| 77 | UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf" | ||
| 78 | |||
| 79 | python do_create_extlinux_config() { | ||
| 80 | if d.getVar("UBOOT_EXTLINUX") != "1": | ||
| 81 | return | ||
| 82 | |||
| 83 | if not d.getVar('WORKDIR'): | ||
| 84 | bb.error("WORKDIR not defined, unable to package") | ||
| 85 | |||
| 86 | labels = d.getVar('UBOOT_EXTLINUX_LABELS') | ||
| 87 | if not labels: | ||
| 88 | bb.fatal("UBOOT_EXTLINUX_LABELS not defined, nothing to do") | ||
| 89 | |||
| 90 | if not labels.strip(): | ||
| 91 | bb.fatal("No labels, nothing to do") | ||
| 92 | |||
| 93 | cfile = d.getVar('UBOOT_EXTLINUX_CONFIG') | ||
| 94 | if not cfile: | ||
| 95 | bb.fatal('Unable to read UBOOT_EXTLINUX_CONFIG') | ||
| 96 | |||
| 97 | localdata = bb.data.createCopy(d) | ||
| 98 | |||
| 99 | try: | ||
| 100 | with open(cfile, 'w') as cfgfile: | ||
| 101 | cfgfile.write('# Generic Distro Configuration file generated by OpenEmbedded\n') | ||
| 102 | |||
| 103 | menu_title = localdata.getVar('UBOOT_EXTLINUX_MENU_TITLE') | ||
| 104 | if len(labels.split()) > 1 and menu_title: | ||
| 105 | cfgfile.write('MENU TITLE %s\n' % (menu_title)) | ||
| 106 | |||
| 107 | timeout = localdata.getVar('UBOOT_EXTLINUX_TIMEOUT') | ||
| 108 | if timeout: | ||
| 109 | cfgfile.write('TIMEOUT %s\n' % (timeout)) | ||
| 110 | |||
| 111 | if len(labels.split()) > 1: | ||
| 112 | default = localdata.getVar('UBOOT_EXTLINUX_DEFAULT_LABEL') | ||
| 113 | if default: | ||
| 114 | cfgfile.write('DEFAULT %s\n' % (default)) | ||
| 115 | |||
| 116 | # Need to deconflict the labels with existing overrides | ||
| 117 | label_overrides = labels.split() | ||
| 118 | default_overrides = localdata.getVar('OVERRIDES').split(':') | ||
| 119 | # We're keeping all the existing overrides that aren't used as a label | ||
| 120 | # an override for that label will be added back in while we're processing that label | ||
| 121 | keep_overrides = list(filter(lambda x: x not in label_overrides, default_overrides)) | ||
| 122 | |||
| 123 | for label in labels.split(): | ||
| 124 | |||
| 125 | localdata.setVar('OVERRIDES', ':'.join(keep_overrides + [label])) | ||
| 126 | |||
| 127 | extlinux_console = localdata.getVar('UBOOT_EXTLINUX_CONSOLE') | ||
| 128 | |||
| 129 | menu_description = localdata.getVar('UBOOT_EXTLINUX_MENU_DESCRIPTION') | ||
| 130 | if not menu_description: | ||
| 131 | menu_description = label | ||
| 132 | |||
| 133 | root = localdata.getVar('UBOOT_EXTLINUX_ROOT') | ||
| 134 | if not root: | ||
| 135 | bb.fatal('UBOOT_EXTLINUX_ROOT not defined') | ||
| 136 | |||
| 137 | kernel_image = localdata.getVar('UBOOT_EXTLINUX_KERNEL_IMAGE') | ||
| 138 | fdtdir = localdata.getVar('UBOOT_EXTLINUX_FDTDIR') | ||
| 139 | |||
| 140 | fdt = localdata.getVar('UBOOT_EXTLINUX_FDT') | ||
| 141 | fdtoverlays = localdata.getVar('UBOOT_EXTLINUX_FDTOVERLAYS') | ||
| 142 | |||
| 143 | cfgfile.write('LABEL %s\n\tKERNEL %s\n' % (menu_description, kernel_image)) | ||
| 144 | |||
| 145 | if fdt: | ||
| 146 | cfgfile.write('\tFDT %s\n' % (fdt)) | ||
| 147 | elif fdtdir: | ||
| 148 | cfgfile.write('\tFDTDIR %s\n' % (fdtdir)) | ||
| 149 | |||
| 150 | if fdtoverlays: | ||
| 151 | cfgfile.write('\tFDTOVERLAYS %s\n' % (' '.join(fdtoverlays.split()))) | ||
| 152 | |||
| 153 | kernel_args = localdata.getVar('UBOOT_EXTLINUX_KERNEL_ARGS') | ||
| 154 | |||
| 155 | initrd = localdata.getVar('UBOOT_EXTLINUX_INITRD') | ||
| 156 | if initrd: | ||
| 157 | cfgfile.write('\tINITRD %s\n'% initrd) | ||
| 158 | |||
| 159 | kernel_args = root + " " + kernel_args | ||
| 160 | cfgfile.write('\tAPPEND %s %s\n' % (kernel_args, extlinux_console)) | ||
| 161 | |||
| 162 | except OSError: | ||
| 163 | bb.fatal('Unable to open %s' % (cfile)) | ||
| 164 | } | ||
| 165 | UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT FDTOVERLAYS KERNEL_ARGS INITRD" | ||
| 166 | do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s:%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}" | ||
| 167 | do_create_extlinux_config[vardepsexclude] += "OVERRIDES" | ||
| 168 | |||
| 169 | addtask create_extlinux_config before do_install do_deploy after do_compile | ||
diff --git a/meta/classes-recipe/uboot-sign.bbclass b/meta/classes-recipe/uboot-sign.bbclass deleted file mode 100644 index 66b9698c1d..0000000000 --- a/meta/classes-recipe/uboot-sign.bbclass +++ /dev/null | |||
| @@ -1,631 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # This file is part of U-Boot verified boot support and is intended to be | ||
| 8 | # inherited from the u-boot recipe. | ||
| 9 | # | ||
| 10 | # The signature procedure requires the user to generate an RSA key and | ||
| 11 | # certificate in a directory and to define the following variable: | ||
| 12 | # | ||
| 13 | # UBOOT_SIGN_KEYDIR = "/keys/directory" | ||
| 14 | # UBOOT_SIGN_KEYNAME = "dev" # keys name in keydir (eg. "dev.crt", "dev.key") | ||
| 15 | # UBOOT_MKIMAGE_DTCOPTS = "-I dts -O dtb -p 2000" | ||
| 16 | # UBOOT_SIGN_ENABLE = "1" | ||
| 17 | # | ||
| 18 | # As verified boot depends on fitImage generation, following is also required: | ||
| 19 | # | ||
| 20 | # KERNEL_CLASSES ?= " kernel-fitimage " | ||
| 21 | # KERNEL_IMAGETYPE ?= "fitImage" | ||
| 22 | # | ||
| 23 | # The signature support is limited to the use of CONFIG_OF_SEPARATE in U-Boot. | ||
| 24 | # | ||
| 25 | # For more details on signature process, please refer to U-Boot documentation. | ||
| 26 | |||
| 27 | # We need some variables from u-boot-config | ||
| 28 | inherit uboot-config | ||
| 29 | require conf/image-fitimage.conf | ||
| 30 | |||
| 31 | # Enable use of a U-Boot fitImage | ||
| 32 | UBOOT_FITIMAGE_ENABLE ?= "0" | ||
| 33 | |||
| 34 | # Signature activation - this requires UBOOT_FITIMAGE_ENABLE = "1" | ||
| 35 | SPL_SIGN_ENABLE ?= "0" | ||
| 36 | |||
| 37 | # Default value for deployment filenames. | ||
| 38 | UBOOT_DTB_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.dtb" | ||
| 39 | UBOOT_DTB_BINARY ?= "u-boot.dtb" | ||
| 40 | UBOOT_DTB_SIGNED ?= "${UBOOT_DTB_BINARY}-signed" | ||
| 41 | UBOOT_DTB_SYMLINK ?= "u-boot-${MACHINE}.dtb" | ||
| 42 | UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.bin" | ||
| 43 | UBOOT_NODTB_BINARY ?= "u-boot-nodtb.bin" | ||
| 44 | UBOOT_NODTB_SYMLINK ?= "u-boot-nodtb-${MACHINE}.bin" | ||
| 45 | UBOOT_ITS_IMAGE ?= "u-boot-its-${MACHINE}-${PV}-${PR}" | ||
| 46 | UBOOT_ITS ?= "u-boot.its" | ||
| 47 | UBOOT_ITS_SYMLINK ?= "u-boot-its-${MACHINE}" | ||
| 48 | UBOOT_FITIMAGE_IMAGE ?= "u-boot-fitImage-${MACHINE}-${PV}-${PR}" | ||
| 49 | UBOOT_FITIMAGE_BINARY ?= "u-boot-fitImage" | ||
| 50 | UBOOT_FITIMAGE_SYMLINK ?= "u-boot-fitImage-${MACHINE}" | ||
| 51 | SPL_DIR ?= "spl" | ||
| 52 | SPL_DTB_IMAGE ?= "u-boot-spl-${MACHINE}-${PV}-${PR}.dtb" | ||
| 53 | # When SPL is not used, set SPL_DTB_BINARY ?= "" to explicitly indicate | ||
| 54 | # that no SPL DTB should be created or signed. | ||
| 55 | SPL_DTB_BINARY ?= "u-boot-spl.dtb" | ||
| 56 | SPL_DTB_SIGNED ?= "${SPL_DTB_BINARY}-signed" | ||
| 57 | SPL_DTB_SYMLINK ?= "u-boot-spl-${MACHINE}.dtb" | ||
| 58 | SPL_NODTB_IMAGE ?= "u-boot-spl-nodtb-${MACHINE}-${PV}-${PR}.bin" | ||
| 59 | SPL_NODTB_BINARY ?= "u-boot-spl-nodtb.bin" | ||
| 60 | SPL_NODTB_SYMLINK ?= "u-boot-spl-nodtb-${MACHINE}.bin" | ||
| 61 | |||
| 62 | # U-Boot fitImage description | ||
| 63 | UBOOT_FIT_DESC ?= "U-Boot fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}" | ||
| 64 | |||
| 65 | # U-Boot fitImage Hash Algo | ||
| 66 | UBOOT_FIT_HASH_ALG ?= "sha256" | ||
| 67 | |||
| 68 | # U-Boot fitImage Signature Algo | ||
| 69 | UBOOT_FIT_SIGN_ALG ?= "rsa2048" | ||
| 70 | |||
| 71 | # Generate keys for signing U-Boot fitImage | ||
| 72 | UBOOT_FIT_GENERATE_KEYS ?= "0" | ||
| 73 | |||
| 74 | # Size of private keys in number of bits | ||
| 75 | UBOOT_FIT_SIGN_NUMBITS ?= "2048" | ||
| 76 | |||
| 77 | # args to openssl genrsa (Default is just the public exponent) | ||
| 78 | UBOOT_FIT_KEY_GENRSA_ARGS ?= "-F4" | ||
| 79 | |||
| 80 | # args to openssl req (Default is -batch for non interactive mode and | ||
| 81 | # -new for new certificate) | ||
| 82 | UBOOT_FIT_KEY_REQ_ARGS ?= "-batch -new" | ||
| 83 | |||
| 84 | # Standard format for public key certificate | ||
| 85 | UBOOT_FIT_KEY_SIGN_PKCS ?= "-x509" | ||
| 86 | |||
| 87 | # length of address in number of <u32> cells | ||
| 88 | # ex: 1 32bits address, 2 64bits address | ||
| 89 | UBOOT_FIT_ADDRESS_CELLS ?= "1" | ||
| 90 | |||
| 91 | # ARM Trusted Firmware(ATF) is a reference implementation of secure world | ||
| 92 | # software for Arm A-Profile architectures, (Armv8-A and Armv7-A), including | ||
| 93 | # an Exception Level 3 (EL3) Secure Monitor. | ||
| 94 | UBOOT_FIT_ARM_TRUSTED_FIRMWARE ?= "0" | ||
| 95 | UBOOT_FIT_ARM_TRUSTED_FIRMWARE_IMAGE ?= "bl31.bin" | ||
| 96 | |||
| 97 | # A Trusted Execution Environment(TEE) is an environment for executing code, | ||
| 98 | # in which those executing the code can have high levels of trust in the asset | ||
| 99 | # management of that surrounding environment. | ||
| 100 | UBOOT_FIT_TEE ?= "0" | ||
| 101 | UBOOT_FIT_TEE_IMAGE ?= "tee-raw.bin" | ||
| 102 | |||
| 103 | # User specific settings | ||
| 104 | UBOOT_FIT_USER_SETTINGS ?= "" | ||
| 105 | |||
| 106 | # Sets the firmware property to select the image to boot first. | ||
| 107 | # If not set, the first entry in "loadables" is used instead. | ||
| 108 | UBOOT_FIT_CONF_FIRMWARE ?= "" | ||
| 109 | |||
| 110 | # Unit name containing a list of users additional binaries to be loaded. | ||
| 111 | # It is a comma-separated list of strings. | ||
| 112 | UBOOT_FIT_CONF_USER_LOADABLES ?= '' | ||
| 113 | |||
| 114 | UBOOT_FIT_UBOOT_LOADADDRESS ?= "${UBOOT_LOADADDRESS}" | ||
| 115 | UBOOT_FIT_UBOOT_ENTRYPOINT ?= "${UBOOT_ENTRYPOINT}" | ||
| 116 | |||
| 117 | |||
| 118 | DEPENDS:append = " ${@'kernel-signing-keys-native' if d.getVar('FIT_GENERATE_KEYS') == '1' else ''}" | ||
| 119 | |||
| 120 | python() { | ||
| 121 | # We need u-boot-tools-native if we're creating a U-Boot fitImage | ||
| 122 | sign = d.getVar('UBOOT_SIGN_ENABLE') == '1' | ||
| 123 | if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1' or sign: | ||
| 124 | d.appendVar('DEPENDS', " u-boot-tools-native dtc-native") | ||
| 125 | } | ||
| 126 | |||
| 127 | concat_dtb() { | ||
| 128 | type="$1" | ||
| 129 | binary="$2" | ||
| 130 | |||
| 131 | if [ -e "${UBOOT_DTB_BINARY}" ]; then | ||
| 132 | # Signing individual images is not recommended as that | ||
| 133 | # makes fitImage susceptible to mix-and-match attack. | ||
| 134 | # | ||
| 135 | # OE FIT_SIGN_INDIVIDUAL is implemented in an unusual manner, | ||
| 136 | # where the resulting signed fitImage contains both signed | ||
| 137 | # images and signed configurations. This is redundant. In | ||
| 138 | # order to prevent mix-and-match attack, it is sufficient | ||
| 139 | # to sign configurations. The FIT_SIGN_INDIVIDUAL = "1" | ||
| 140 | # support is kept to avoid breakage of existing layers, but | ||
| 141 | # it is highly recommended to avoid FIT_SIGN_INDIVIDUAL = "1", | ||
| 142 | # i.e. set FIT_SIGN_INDIVIDUAL = "0" . | ||
| 143 | if [ "${FIT_SIGN_INDIVIDUAL}" = "1" ] ; then | ||
| 144 | # Sign dummy image images in order to | ||
| 145 | # add the image signing keys to our dtb | ||
| 146 | ${UBOOT_MKIMAGE_SIGN} \ | ||
| 147 | ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \ | ||
| 148 | -f auto \ | ||
| 149 | -k "${UBOOT_SIGN_KEYDIR}" \ | ||
| 150 | -o "${FIT_HASH_ALG},${FIT_SIGN_ALG}" \ | ||
| 151 | -g "${UBOOT_SIGN_IMG_KEYNAME}" \ | ||
| 152 | -K "${UBOOT_DTB_BINARY}" \ | ||
| 153 | -d /dev/null \ | ||
| 154 | -r ${B}/unused.itb \ | ||
| 155 | ${UBOOT_MKIMAGE_SIGN_ARGS} | ||
| 156 | fi | ||
| 157 | |||
| 158 | # Sign dummy image configurations in order to | ||
| 159 | # add the configuration signing keys to our dtb | ||
| 160 | ${UBOOT_MKIMAGE_SIGN} \ | ||
| 161 | ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \ | ||
| 162 | -f auto-conf \ | ||
| 163 | -k "${UBOOT_SIGN_KEYDIR}" \ | ||
| 164 | -o "${FIT_HASH_ALG},${FIT_SIGN_ALG}" \ | ||
| 165 | -g "${UBOOT_SIGN_KEYNAME}" \ | ||
| 166 | -K "${UBOOT_DTB_BINARY}" \ | ||
| 167 | -d /dev/null \ | ||
| 168 | -r ${B}/unused.itb \ | ||
| 169 | ${UBOOT_MKIMAGE_SIGN_ARGS} | ||
| 170 | |||
| 171 | # Verify the dummy fitImage signature against u-boot.dtb | ||
| 172 | # augmented using public key material. | ||
| 173 | # | ||
| 174 | # This only works for FIT_SIGN_INDIVIDUAL = "0", because | ||
| 175 | # mkimage -f auto-conf does not support -F to extend the | ||
| 176 | # existing unused.itb , and instead rewrites unused.itb | ||
| 177 | # from scratch. | ||
| 178 | # | ||
| 179 | # Using two separate unused.itb for mkimage -f auto and | ||
| 180 | # mkimage -f auto-conf invocation above would not help, as | ||
| 181 | # the signature verification process below checks whether | ||
| 182 | # all keys inserted into u-boot.dtb /signature node pass | ||
| 183 | # the verification. Separate unused.itb would each miss one | ||
| 184 | # of the signatures. | ||
| 185 | # | ||
| 186 | # The FIT_SIGN_INDIVIDUAL = "1" support is kept to avoid | ||
| 187 | # breakage of existing layers, but it is highly recommended | ||
| 188 | # to not use FIT_SIGN_INDIVIDUAL = "1", i.e. set | ||
| 189 | # FIT_SIGN_INDIVIDUAL = "0" . | ||
| 190 | if [ "${FIT_SIGN_INDIVIDUAL}" != "1" ] ; then | ||
| 191 | ${UBOOT_FIT_CHECK_SIGN} \ | ||
| 192 | -k "${UBOOT_DTB_BINARY}" \ | ||
| 193 | -f ${B}/unused.itb | ||
| 194 | fi | ||
| 195 | cp ${UBOOT_DTB_BINARY} ${UBOOT_DTB_SIGNED} | ||
| 196 | fi | ||
| 197 | |||
| 198 | # If we're not using a signed u-boot fit, concatenate SPL w/o DTB & U-Boot DTB | ||
| 199 | # with public key (otherwise U-Boot will be packaged by uboot_fitimage_assemble) | ||
| 200 | if [ "${SPL_SIGN_ENABLE}" != "1" ] ; then | ||
| 201 | if [ ! -e "${UBOOT_DTB_BINARY}" ]; then | ||
| 202 | bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available." | ||
| 203 | return | ||
| 204 | fi | ||
| 205 | |||
| 206 | if [ "x${UBOOT_SUFFIX}" = "ximg" ] || [ "x${UBOOT_SUFFIX}" = "xrom" ]; then | ||
| 207 | oe_runmake EXT_DTB="${UBOOT_DTB_SIGNED}" ${UBOOT_MAKE_TARGET} | ||
| 208 | if [ -n "${binary}" ]; then | ||
| 209 | cp ${binary} ${UBOOT_BINARYNAME}-${type}.${UBOOT_SUFFIX} | ||
| 210 | fi | ||
| 211 | elif [ -e "${UBOOT_NODTB_BINARY}" ]; then | ||
| 212 | if [ -n "${binary}" ]; then | ||
| 213 | cat ${UBOOT_NODTB_BINARY} ${UBOOT_DTB_SIGNED} | tee ${binary} > \ | ||
| 214 | ${UBOOT_BINARYNAME}-${type}.${UBOOT_SUFFIX} | ||
| 215 | else | ||
| 216 | cat ${UBOOT_NODTB_BINARY} ${UBOOT_DTB_SIGNED} > ${UBOOT_BINARY} | ||
| 217 | fi | ||
| 218 | fi | ||
| 219 | fi | ||
| 220 | } | ||
| 221 | |||
| 222 | deploy_dtb() { | ||
| 223 | type="$1" | ||
| 224 | |||
| 225 | if [ -n "${type}" ]; then | ||
| 226 | uboot_dtb_binary="u-boot-${type}-${PV}-${PR}.dtb" | ||
| 227 | uboot_nodtb_binary="u-boot-nodtb-${type}-${PV}-${PR}.bin" | ||
| 228 | else | ||
| 229 | uboot_dtb_binary="${UBOOT_DTB_IMAGE}" | ||
| 230 | uboot_nodtb_binary="${UBOOT_NODTB_IMAGE}" | ||
| 231 | fi | ||
| 232 | |||
| 233 | if [ -e "${UBOOT_DTB_SIGNED}" ]; then | ||
| 234 | install -Dm644 ${UBOOT_DTB_SIGNED} ${DEPLOYDIR}/${uboot_dtb_binary} | ||
| 235 | if [ -n "${type}" ]; then | ||
| 236 | ln -sf ${uboot_dtb_binary} ${DEPLOYDIR}/${UBOOT_DTB_IMAGE} | ||
| 237 | fi | ||
| 238 | fi | ||
| 239 | |||
| 240 | if [ -f "${UBOOT_NODTB_BINARY}" ]; then | ||
| 241 | install -Dm644 ${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${uboot_nodtb_binary} | ||
| 242 | if [ -n "${type}" ]; then | ||
| 243 | ln -sf ${uboot_nodtb_binary} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE} | ||
| 244 | fi | ||
| 245 | fi | ||
| 246 | } | ||
| 247 | |||
| 248 | concat_spl_dtb() { | ||
| 249 | if [ -e "${SPL_DIR}/${SPL_NODTB_BINARY}" ] && [ -e "${SPL_DIR}/${SPL_DTB_BINARY}" ] ; then | ||
| 250 | cat ${SPL_DIR}/${SPL_NODTB_BINARY} ${SPL_DIR}/${SPL_DTB_SIGNED} > "${SPL_BINARY}" | ||
| 251 | else | ||
| 252 | bbwarn "Failure while adding public key to spl binary. Verified U-Boot boot won't be available." | ||
| 253 | fi | ||
| 254 | } | ||
| 255 | |||
| 256 | deploy_spl_dtb() { | ||
| 257 | type="$1" | ||
| 258 | |||
| 259 | if [ -n "${type}" ]; then | ||
| 260 | spl_dtb_binary="u-boot-spl-${type}-${PV}-${PR}.dtb" | ||
| 261 | spl_nodtb_binary="u-boot-spl-nodtb-${type}-${PV}-${PR}.bin" | ||
| 262 | else | ||
| 263 | spl_dtb_binary="${SPL_DTB_IMAGE}" | ||
| 264 | spl_nodtb_binary="${SPL_NODTB_IMAGE}" | ||
| 265 | fi | ||
| 266 | |||
| 267 | if [ -e "${SPL_DIR}/${SPL_DTB_SIGNED}" ] ; then | ||
| 268 | install -Dm644 ${SPL_DIR}/${SPL_DTB_SIGNED} ${DEPLOYDIR}/${spl_dtb_binary} | ||
| 269 | if [ -n "${type}" ]; then | ||
| 270 | ln -sf ${spl_dtb_binary} ${DEPLOYDIR}/${SPL_DTB_IMAGE} | ||
| 271 | fi | ||
| 272 | fi | ||
| 273 | |||
| 274 | if [ -f "${SPL_DIR}/${SPL_NODTB_BINARY}" ] ; then | ||
| 275 | install -Dm644 ${SPL_DIR}/${SPL_NODTB_BINARY} ${DEPLOYDIR}/${spl_nodtb_binary} | ||
| 276 | if [ -n "${type}" ]; then | ||
| 277 | ln -sf ${spl_nodtb_binary} ${DEPLOYDIR}/${SPL_NODTB_IMAGE} | ||
| 278 | fi | ||
| 279 | fi | ||
| 280 | |||
| 281 | # For backwards compatibility... | ||
| 282 | install -Dm644 ${SPL_BINARY} ${DEPLOYDIR}/${SPL_IMAGE} | ||
| 283 | } | ||
| 284 | |||
| 285 | do_uboot_generate_rsa_keys() { | ||
| 286 | if [ "${SPL_SIGN_ENABLE}" = "0" ] && [ "${UBOOT_FIT_GENERATE_KEYS}" = "1" ]; then | ||
| 287 | bbwarn "UBOOT_FIT_GENERATE_KEYS is set to 1 eventhough SPL_SIGN_ENABLE is set to 0. The keys will not be generated as they won't be used." | ||
| 288 | fi | ||
| 289 | |||
| 290 | if [ "${SPL_SIGN_ENABLE}" = "1" ] && [ "${UBOOT_FIT_GENERATE_KEYS}" = "1" ]; then | ||
| 291 | |||
| 292 | # Generate keys only if they don't already exist | ||
| 293 | if [ ! -f "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key ] || \ | ||
| 294 | [ ! -f "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt ]; then | ||
| 295 | |||
| 296 | # make directory if it does not already exist | ||
| 297 | mkdir -p "${SPL_SIGN_KEYDIR}" | ||
| 298 | |||
| 299 | echo "Generating RSA private key for signing U-Boot fitImage" | ||
| 300 | openssl genrsa ${UBOOT_FIT_KEY_GENRSA_ARGS} -out \ | ||
| 301 | "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \ | ||
| 302 | "${UBOOT_FIT_SIGN_NUMBITS}" | ||
| 303 | |||
| 304 | echo "Generating certificate for signing U-Boot fitImage" | ||
| 305 | openssl req ${UBOOT_FIT_KEY_REQ_ARGS} "${UBOOT_FIT_KEY_SIGN_PKCS}" \ | ||
| 306 | -key "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \ | ||
| 307 | -out "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt | ||
| 308 | fi | ||
| 309 | fi | ||
| 310 | |||
| 311 | } | ||
| 312 | |||
| 313 | addtask uboot_generate_rsa_keys before do_uboot_assemble_fitimage after do_compile | ||
| 314 | |||
| 315 | # Create a ITS file for the atf | ||
| 316 | uboot_fitimage_atf() { | ||
| 317 | cat << EOF >> ${UBOOT_ITS} | ||
| 318 | atf { | ||
| 319 | description = "ARM Trusted Firmware"; | ||
| 320 | data = /incbin/("${UBOOT_FIT_ARM_TRUSTED_FIRMWARE_IMAGE}"); | ||
| 321 | type = "firmware"; | ||
| 322 | arch = "${UBOOT_ARCH}"; | ||
| 323 | os = "arm-trusted-firmware"; | ||
| 324 | load = <${UBOOT_FIT_ARM_TRUSTED_FIRMWARE_LOADADDRESS}>; | ||
| 325 | entry = <${UBOOT_FIT_ARM_TRUSTED_FIRMWARE_ENTRYPOINT}>; | ||
| 326 | compression = "none"; | ||
| 327 | EOF | ||
| 328 | if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then | ||
| 329 | cat << EOF >> ${UBOOT_ITS} | ||
| 330 | signature { | ||
| 331 | algo = "${UBOOT_FIT_HASH_ALG},${UBOOT_FIT_SIGN_ALG}"; | ||
| 332 | key-name-hint = "${SPL_SIGN_KEYNAME}"; | ||
| 333 | }; | ||
| 334 | EOF | ||
| 335 | fi | ||
| 336 | |||
| 337 | cat << EOF >> ${UBOOT_ITS} | ||
| 338 | }; | ||
| 339 | EOF | ||
| 340 | } | ||
| 341 | |||
| 342 | # Create a ITS file for the tee | ||
| 343 | uboot_fitimage_tee() { | ||
| 344 | cat << EOF >> ${UBOOT_ITS} | ||
| 345 | tee { | ||
| 346 | description = "Trusted Execution Environment"; | ||
| 347 | data = /incbin/("${UBOOT_FIT_TEE_IMAGE}"); | ||
| 348 | type = "tee"; | ||
| 349 | arch = "${UBOOT_ARCH}"; | ||
| 350 | os = "tee"; | ||
| 351 | load = <${UBOOT_FIT_TEE_LOADADDRESS}>; | ||
| 352 | entry = <${UBOOT_FIT_TEE_ENTRYPOINT}>; | ||
| 353 | compression = "none"; | ||
| 354 | EOF | ||
| 355 | if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then | ||
| 356 | cat << EOF >> ${UBOOT_ITS} | ||
| 357 | signature { | ||
| 358 | algo = "${UBOOT_FIT_HASH_ALG},${UBOOT_FIT_SIGN_ALG}"; | ||
| 359 | key-name-hint = "${SPL_SIGN_KEYNAME}"; | ||
| 360 | }; | ||
| 361 | EOF | ||
| 362 | fi | ||
| 363 | |||
| 364 | cat << EOF >> ${UBOOT_ITS} | ||
| 365 | }; | ||
| 366 | EOF | ||
| 367 | } | ||
| 368 | |||
| 369 | # Create a ITS file for the U-boot FIT, for use when | ||
| 370 | # we want to sign it so that the SPL can verify it | ||
| 371 | uboot_fitimage_assemble() { | ||
| 372 | conf_loadables="\"uboot\"" | ||
| 373 | conf_firmware="" | ||
| 374 | rm -f ${UBOOT_ITS} ${UBOOT_FITIMAGE_BINARY} | ||
| 375 | |||
| 376 | # First we create the ITS script | ||
| 377 | cat << EOF >> ${UBOOT_ITS} | ||
| 378 | /dts-v1/; | ||
| 379 | |||
| 380 | / { | ||
| 381 | description = "${UBOOT_FIT_DESC}"; | ||
| 382 | #address-cells = <${UBOOT_FIT_ADDRESS_CELLS}>; | ||
| 383 | |||
| 384 | images { | ||
| 385 | uboot { | ||
| 386 | description = "U-Boot image"; | ||
| 387 | data = /incbin/("${UBOOT_NODTB_BINARY}"); | ||
| 388 | type = "standalone"; | ||
| 389 | os = "u-boot"; | ||
| 390 | arch = "${UBOOT_ARCH}"; | ||
| 391 | compression = "none"; | ||
| 392 | load = <${UBOOT_FIT_UBOOT_LOADADDRESS}>; | ||
| 393 | entry = <${UBOOT_FIT_UBOOT_ENTRYPOINT}>; | ||
| 394 | EOF | ||
| 395 | |||
| 396 | if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then | ||
| 397 | cat << EOF >> ${UBOOT_ITS} | ||
| 398 | signature { | ||
| 399 | algo = "${UBOOT_FIT_HASH_ALG},${UBOOT_FIT_SIGN_ALG}"; | ||
| 400 | key-name-hint = "${SPL_SIGN_KEYNAME}"; | ||
| 401 | }; | ||
| 402 | EOF | ||
| 403 | fi | ||
| 404 | |||
| 405 | cat << EOF >> ${UBOOT_ITS} | ||
| 406 | }; | ||
| 407 | fdt { | ||
| 408 | description = "U-Boot FDT"; | ||
| 409 | data = /incbin/("${UBOOT_DTB_BINARY}"); | ||
| 410 | type = "flat_dt"; | ||
| 411 | arch = "${UBOOT_ARCH}"; | ||
| 412 | compression = "none"; | ||
| 413 | EOF | ||
| 414 | |||
| 415 | if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then | ||
| 416 | cat << EOF >> ${UBOOT_ITS} | ||
| 417 | signature { | ||
| 418 | algo = "${UBOOT_FIT_HASH_ALG},${UBOOT_FIT_SIGN_ALG}"; | ||
| 419 | key-name-hint = "${SPL_SIGN_KEYNAME}"; | ||
| 420 | }; | ||
| 421 | EOF | ||
| 422 | fi | ||
| 423 | |||
| 424 | cat << EOF >> ${UBOOT_ITS} | ||
| 425 | }; | ||
| 426 | EOF | ||
| 427 | if [ "${UBOOT_FIT_TEE}" = "1" ] ; then | ||
| 428 | conf_loadables="\"tee\", ${conf_loadables}" | ||
| 429 | uboot_fitimage_tee | ||
| 430 | fi | ||
| 431 | |||
| 432 | if [ "${UBOOT_FIT_ARM_TRUSTED_FIRMWARE}" = "1" ] ; then | ||
| 433 | conf_loadables="\"atf\", ${conf_loadables}" | ||
| 434 | uboot_fitimage_atf | ||
| 435 | fi | ||
| 436 | |||
| 437 | if [ -n "${UBOOT_FIT_USER_SETTINGS}" ] ; then | ||
| 438 | printf "%b" "${UBOOT_FIT_USER_SETTINGS}" >> ${UBOOT_ITS} | ||
| 439 | fi | ||
| 440 | |||
| 441 | if [ -n "${UBOOT_FIT_CONF_USER_LOADABLES}" ] ; then | ||
| 442 | conf_loadables="${conf_loadables}${UBOOT_FIT_CONF_USER_LOADABLES}" | ||
| 443 | fi | ||
| 444 | |||
| 445 | if [ -n "${UBOOT_FIT_CONF_FIRMWARE}" ] ; then | ||
| 446 | conf_firmware="firmware = \"${UBOOT_FIT_CONF_FIRMWARE}\";" | ||
| 447 | fi | ||
| 448 | |||
| 449 | cat << EOF >> ${UBOOT_ITS} | ||
| 450 | }; | ||
| 451 | |||
| 452 | configurations { | ||
| 453 | default = "conf"; | ||
| 454 | conf { | ||
| 455 | description = "Boot with signed U-Boot FIT"; | ||
| 456 | ${conf_firmware} | ||
| 457 | loadables = ${conf_loadables}; | ||
| 458 | fdt = "fdt"; | ||
| 459 | }; | ||
| 460 | }; | ||
| 461 | }; | ||
| 462 | EOF | ||
| 463 | |||
| 464 | # | ||
| 465 | # Assemble the U-boot FIT image | ||
| 466 | # | ||
| 467 | ${UBOOT_MKIMAGE} \ | ||
| 468 | ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \ | ||
| 469 | -f ${UBOOT_ITS} \ | ||
| 470 | ${UBOOT_FITIMAGE_BINARY} | ||
| 471 | |||
| 472 | if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then | ||
| 473 | if [ -n "${SPL_DTB_BINARY}" ] ; then | ||
| 474 | # | ||
| 475 | # Sign the U-boot FIT image and add public key to SPL dtb | ||
| 476 | # | ||
| 477 | ${UBOOT_MKIMAGE_SIGN} \ | ||
| 478 | ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \ | ||
| 479 | -F -k "${SPL_SIGN_KEYDIR}" \ | ||
| 480 | -K "${SPL_DIR}/${SPL_DTB_BINARY}" \ | ||
| 481 | -r ${UBOOT_FITIMAGE_BINARY} \ | ||
| 482 | ${SPL_MKIMAGE_SIGN_ARGS} | ||
| 483 | |||
| 484 | # Verify the U-boot FIT image and SPL dtb | ||
| 485 | ${UBOOT_FIT_CHECK_SIGN} \ | ||
| 486 | -k "${SPL_DIR}/${SPL_DTB_BINARY}" \ | ||
| 487 | -f ${UBOOT_FITIMAGE_BINARY} | ||
| 488 | |||
| 489 | cp ${SPL_DIR}/${SPL_DTB_BINARY} ${SPL_DIR}/${SPL_DTB_SIGNED} | ||
| 490 | else | ||
| 491 | # Sign the U-boot FIT image | ||
| 492 | ${UBOOT_MKIMAGE_SIGN} \ | ||
| 493 | ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \ | ||
| 494 | -F -k "${SPL_SIGN_KEYDIR}" \ | ||
| 495 | -r ${UBOOT_FITIMAGE_BINARY} \ | ||
| 496 | ${SPL_MKIMAGE_SIGN_ARGS} | ||
| 497 | fi | ||
| 498 | fi | ||
| 499 | } | ||
| 500 | |||
| 501 | uboot_assemble_fitimage_helper() { | ||
| 502 | type="$1" | ||
| 503 | binary="$2" | ||
| 504 | |||
| 505 | if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ -n "${UBOOT_DTB_BINARY}" ] ; then | ||
| 506 | concat_dtb "$type" "$binary" | ||
| 507 | fi | ||
| 508 | |||
| 509 | if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ]; then | ||
| 510 | uboot_fitimage_assemble | ||
| 511 | fi | ||
| 512 | |||
| 513 | if [ "${SPL_SIGN_ENABLE}" = "1" ] && [ -n "${SPL_DTB_BINARY}" ] ; then | ||
| 514 | concat_spl_dtb | ||
| 515 | fi | ||
| 516 | } | ||
| 517 | |||
| 518 | do_uboot_assemble_fitimage() { | ||
| 519 | if [ -n "${UBOOT_CONFIG}" ]; then | ||
| 520 | unset i | ||
| 521 | for config in ${UBOOT_MACHINE}; do | ||
| 522 | unset j k | ||
| 523 | i=$(expr $i + 1); | ||
| 524 | for type in ${UBOOT_CONFIG}; do | ||
| 525 | j=$(expr $j + 1); | ||
| 526 | if [ $j -eq $i ]; then | ||
| 527 | break; | ||
| 528 | fi | ||
| 529 | done | ||
| 530 | |||
| 531 | builddir="${config}-${type}" | ||
| 532 | |||
| 533 | for binary in ${UBOOT_BINARIES}; do | ||
| 534 | k=$(expr $k + 1); | ||
| 535 | if [ $k -eq $i ]; then | ||
| 536 | break; | ||
| 537 | fi | ||
| 538 | done | ||
| 539 | |||
| 540 | cd ${B}/${builddir} | ||
| 541 | uboot_assemble_fitimage_helper ${type} ${binary} | ||
| 542 | done | ||
| 543 | else | ||
| 544 | cd ${B} | ||
| 545 | uboot_assemble_fitimage_helper "" ${UBOOT_BINARY} | ||
| 546 | fi | ||
| 547 | } | ||
| 548 | |||
| 549 | addtask uboot_assemble_fitimage before do_install do_deploy after do_compile | ||
| 550 | |||
| 551 | deploy_helper() { | ||
| 552 | type="$1" | ||
| 553 | |||
| 554 | if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ -n "${UBOOT_DTB_SIGNED}" ] ; then | ||
| 555 | deploy_dtb $type | ||
| 556 | fi | ||
| 557 | |||
| 558 | if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ]; then | ||
| 559 | if [ -n "${type}" ]; then | ||
| 560 | uboot_its_image="u-boot-its-${type}-${PV}-${PR}" | ||
| 561 | uboot_fitimage_image="u-boot-fitImage-${type}-${PV}-${PR}" | ||
| 562 | else | ||
| 563 | uboot_its_image="${UBOOT_ITS_IMAGE}" | ||
| 564 | uboot_fitimage_image="${UBOOT_FITIMAGE_IMAGE}" | ||
| 565 | fi | ||
| 566 | |||
| 567 | install -Dm644 ${UBOOT_FITIMAGE_BINARY} ${DEPLOYDIR}/$uboot_fitimage_image | ||
| 568 | install -Dm644 ${UBOOT_ITS} ${DEPLOYDIR}/$uboot_its_image | ||
| 569 | |||
| 570 | if [ -n "${type}" ]; then | ||
| 571 | ln -sf $uboot_its_image ${DEPLOYDIR}/${UBOOT_ITS_IMAGE} | ||
| 572 | ln -sf $uboot_fitimage_image ${DEPLOYDIR}/${UBOOT_FITIMAGE_IMAGE} | ||
| 573 | fi | ||
| 574 | fi | ||
| 575 | |||
| 576 | if [ "${SPL_SIGN_ENABLE}" = "1" ] && [ -n "${SPL_DTB_BINARY}" ] ; then | ||
| 577 | deploy_spl_dtb $type | ||
| 578 | fi | ||
| 579 | } | ||
| 580 | |||
| 581 | do_deploy:prepend() { | ||
| 582 | if [ -n "${UBOOT_CONFIG}" ]; then | ||
| 583 | unset i j k | ||
| 584 | for config in ${UBOOT_MACHINE}; do | ||
| 585 | i=$(expr $i + 1); | ||
| 586 | for type in ${UBOOT_CONFIG}; do | ||
| 587 | j=$(expr $j + 1); | ||
| 588 | if [ $j -eq $i ]; then | ||
| 589 | builddir="${config}-${type}" | ||
| 590 | cd ${B}/${builddir} | ||
| 591 | deploy_helper ${type} | ||
| 592 | fi | ||
| 593 | done | ||
| 594 | unset j | ||
| 595 | done | ||
| 596 | unset i | ||
| 597 | else | ||
| 598 | cd ${B} | ||
| 599 | deploy_helper "" | ||
| 600 | fi | ||
| 601 | |||
| 602 | if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ -n "${UBOOT_DTB_BINARY}" ] ; then | ||
| 603 | ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY} | ||
| 604 | ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK} | ||
| 605 | ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_SYMLINK} | ||
| 606 | ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_BINARY} | ||
| 607 | fi | ||
| 608 | |||
| 609 | if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then | ||
| 610 | ln -sf ${UBOOT_ITS_IMAGE} ${DEPLOYDIR}/${UBOOT_ITS} | ||
| 611 | ln -sf ${UBOOT_ITS_IMAGE} ${DEPLOYDIR}/${UBOOT_ITS_SYMLINK} | ||
| 612 | ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_BINARY} | ||
| 613 | ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_SYMLINK} | ||
| 614 | fi | ||
| 615 | |||
| 616 | if [ "${SPL_SIGN_ENABLE}" = "1" ] && [ -n "${SPL_DTB_BINARY}" ] ; then | ||
| 617 | ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_SYMLINK} | ||
| 618 | ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_BINARY} | ||
| 619 | ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_SYMLINK} | ||
| 620 | ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_BINARY} | ||
| 621 | fi | ||
| 622 | } | ||
| 623 | |||
| 624 | do_deploy:append() { | ||
| 625 | # If we're creating a u-boot fitImage, point u-boot.bin | ||
| 626 | # symlink since it might get used by image recipes | ||
| 627 | if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then | ||
| 628 | ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_BINARY} | ||
| 629 | ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_SYMLINK} | ||
| 630 | fi | ||
| 631 | } | ||
diff --git a/meta/classes-recipe/uki.bbclass b/meta/classes-recipe/uki.bbclass deleted file mode 100644 index fedff222c6..0000000000 --- a/meta/classes-recipe/uki.bbclass +++ /dev/null | |||
| @@ -1,194 +0,0 @@ | |||
| 1 | # Unified kernel image (UKI) class | ||
| 2 | # | ||
| 3 | # This bbclass merges kernel, initrd etc as a UKI standard UEFI binary, | ||
| 4 | # to be loaded with UEFI firmware and systemd-boot on target HW. | ||
| 5 | # TPM PCR pre-calculation is not supported since systemd-measure tooling | ||
| 6 | # is meant to run on target, not in cross compile environment. | ||
| 7 | # | ||
| 8 | # See: | ||
| 9 | # https://www.freedesktop.org/software/systemd/man/latest/ukify.html | ||
| 10 | # https://uapi-group.org/specifications/specs/unified_kernel_image/ | ||
| 11 | # | ||
| 12 | # The UKI contains: | ||
| 13 | # | ||
| 14 | # - UEFI stub | ||
| 15 | # The linux kernel can generate a UEFI stub, however the one from systemd-boot can fetch | ||
| 16 | # the command line from a separate section of the EFI application, avoiding the need to | ||
| 17 | # rebuild the kernel. | ||
| 18 | # - kernel | ||
| 19 | # - initramfs | ||
| 20 | # - kernel command line | ||
| 21 | # - uname -r kernel version | ||
| 22 | # - /etc/os-release to create a boot menu with version details | ||
| 23 | # - optionally secure boot signature(s) | ||
| 24 | # - other metadata (e.g. TPM PCR measurements) | ||
| 25 | # | ||
| 26 | # Usage instructions: | ||
| 27 | # | ||
| 28 | # - requires UEFI compatible firmware on target, e.g. qemuarm64-secureboot u-boot based | ||
| 29 | # from meta-arm or qemux86 ovmf/edk2 based firmware for x86_64 | ||
| 30 | # | ||
| 31 | # - Distro/build config: | ||
| 32 | # | ||
| 33 | # INIT_MANAGER = "systemd" | ||
| 34 | # MACHINE_FEATURES:append = " efi" | ||
| 35 | # EFI_PROVIDER = "systemd-boot" | ||
| 36 | # INITRAMFS_IMAGE = "core-image-minimal-initramfs" | ||
| 37 | # | ||
| 38 | # - image recipe: | ||
| 39 | # | ||
| 40 | # inherit uki | ||
| 41 | # | ||
| 42 | # - qemuboot/runqemu changes in image recipe or build config: | ||
| 43 | # | ||
| 44 | # # Kernel command line must be inside the signed uki | ||
| 45 | # QB_KERNEL_ROOT = "" | ||
| 46 | # # kernel is in the uki image, not loaded separately | ||
| 47 | # QB_DEFAULT_KERNEL = "none" | ||
| 48 | # | ||
| 49 | # - for UEFI secure boot, systemd-boot and uki (including kernel) can | ||
| 50 | # be signed but require sbsign-tool-native (recipe available from meta-secure-core, | ||
| 51 | # see also qemuarm64-secureboot from meta-arm). Set variable | ||
| 52 | # UKI_SB_KEY to path of private key and UKI_SB_CERT for certificate. | ||
| 53 | # Note that systemd-boot also need to be signed with the same key. | ||
| 54 | # | ||
| 55 | # - at runtime, UEFI firmware will load and boot systemd-boot which | ||
| 56 | # creates a menu from all detected uki binaries. No need to manually | ||
| 57 | # setup boot menu entries. | ||
| 58 | # | ||
| 59 | # - see efi-uki-bootdisk.wks.in how to create ESP partition which hosts systemd-boot, | ||
| 60 | # config file(s) for systemd-boot and the UKI binaries. | ||
| 61 | # | ||
| 62 | |||
| 63 | DEPENDS += "\ | ||
| 64 | os-release \ | ||
| 65 | systemd-boot \ | ||
| 66 | systemd-boot-native \ | ||
| 67 | virtual/cross-binutils \ | ||
| 68 | virtual/kernel \ | ||
| 69 | " | ||
| 70 | |||
| 71 | inherit image-artifact-names | ||
| 72 | require ../conf/image-uefi.conf | ||
| 73 | |||
| 74 | INITRAMFS_IMAGE ?= "core-image-minimal-initramfs" | ||
| 75 | |||
| 76 | INITRD_ARCHIVE ?= "${INITRAMFS_IMAGE}-${MACHINE}.${INITRAMFS_FSTYPES}" | ||
| 77 | |||
| 78 | do_image_complete[depends] += "${INITRAMFS_IMAGE}:do_image_complete" | ||
| 79 | |||
| 80 | UKIFY_CMD ?= "ukify build" | ||
| 81 | UKI_CONFIG_FILE ?= "${UNPACKDIR}/uki.conf" | ||
| 82 | UKI_FILENAME ?= "uki.efi" | ||
| 83 | UKI_KERNEL_FILENAME ?= "${KERNEL_IMAGETYPE}" | ||
| 84 | UKI_CMDLINE ?= "rootwait root=LABEL=root" | ||
| 85 | # secure boot keys and cert, needs sbsign-tools-native (meta-secure-core) | ||
| 86 | #UKI_SB_KEY ?= "" | ||
| 87 | #UKI_SB_CERT ?= "" | ||
| 88 | |||
| 89 | IMAGE_EFI_BOOT_FILES ?= "${UKI_FILENAME};EFI/Linux/${UKI_FILENAME}" | ||
| 90 | |||
| 91 | do_uki[depends] += " \ | ||
| 92 | systemd-boot:do_deploy \ | ||
| 93 | virtual/kernel:do_deploy \ | ||
| 94 | " | ||
| 95 | do_uki[depends] += "${@ '${INITRAMFS_IMAGE}:do_image_complete' if d.getVar('INITRAMFS_IMAGE') else ''}" | ||
| 96 | |||
| 97 | # ensure that the build directory is empty everytime we generate a newly-created uki | ||
| 98 | do_uki[cleandirs] = "${B}" | ||
| 99 | # influence the build directory at the start of the builds | ||
| 100 | do_uki[dirs] = "${B}" | ||
| 101 | |||
| 102 | # we want to allow specifying files in SRC_URI, such as for signing the UKI | ||
| 103 | python () { | ||
| 104 | d.delVarFlag("do_fetch","noexec") | ||
| 105 | d.delVarFlag("do_unpack","noexec") | ||
| 106 | } | ||
| 107 | |||
| 108 | # main task | ||
| 109 | python do_uki() { | ||
| 110 | import glob | ||
| 111 | import bb.process | ||
| 112 | |||
| 113 | # base ukify command, can be extended if needed | ||
| 114 | ukify_cmd = d.getVar('UKIFY_CMD') | ||
| 115 | |||
| 116 | deploy_dir_image = d.getVar('DEPLOY_DIR_IMAGE') | ||
| 117 | |||
| 118 | # architecture | ||
| 119 | target_arch = d.getVar('EFI_ARCH') | ||
| 120 | if target_arch: | ||
| 121 | ukify_cmd += " --efi-arch %s" % (target_arch) | ||
| 122 | |||
| 123 | # systemd stubs | ||
| 124 | stub = "%s/linux%s.efi.stub" % (d.getVar('DEPLOY_DIR_IMAGE'), target_arch) | ||
| 125 | if not os.path.exists(stub): | ||
| 126 | bb.fatal(f"ERROR: cannot find {stub}.") | ||
| 127 | ukify_cmd += " --stub %s" % (stub) | ||
| 128 | |||
| 129 | # initrd | ||
| 130 | initramfs_image = "%s" % (d.getVar('INITRD_ARCHIVE')) | ||
| 131 | ukify_cmd += " --initrd=%s" % (os.path.join(deploy_dir_image, initramfs_image)) | ||
| 132 | |||
| 133 | # kernel | ||
| 134 | kernel_filename = d.getVar('UKI_KERNEL_FILENAME') or None | ||
| 135 | if kernel_filename: | ||
| 136 | kernel = "%s/%s" % (deploy_dir_image, kernel_filename) | ||
| 137 | if not os.path.exists(kernel): | ||
| 138 | bb.fatal(f"ERROR: cannot find %s" % (kernel)) | ||
| 139 | ukify_cmd += " --linux=%s" % (kernel) | ||
| 140 | # not always needed, ukify can detect version from kernel binary | ||
| 141 | kernel_version = d.getVar('KERNEL_VERSION') | ||
| 142 | if kernel_version: | ||
| 143 | ukify_cmd += "--uname %s" % (kernel_version) | ||
| 144 | else: | ||
| 145 | bb.fatal("ERROR - UKI_KERNEL_FILENAME not set") | ||
| 146 | |||
| 147 | # command line | ||
| 148 | cmdline = d.getVar('UKI_CMDLINE') | ||
| 149 | if cmdline: | ||
| 150 | ukify_cmd += " --cmdline='%s'" % (cmdline) | ||
| 151 | |||
| 152 | # dtb | ||
| 153 | if d.getVar('KERNEL_DEVICETREE'): | ||
| 154 | for dtb in d.getVar('KERNEL_DEVICETREE').split(): | ||
| 155 | dtb_path = "%s/%s" % (deploy_dir_image, dtb) | ||
| 156 | if not os.path.exists(dtb_path): | ||
| 157 | bb.fatal(f"ERROR: cannot find {dtb_path}.") | ||
| 158 | ukify_cmd += " --devicetree %s" % (dtb_path) | ||
| 159 | |||
| 160 | # custom config for ukify | ||
| 161 | if os.path.exists(d.getVar('UKI_CONFIG_FILE')): | ||
| 162 | ukify_cmd += " --config=%s" % (d.getVar('UKI_CONFIG_FILE')) | ||
| 163 | |||
| 164 | # systemd tools | ||
| 165 | ukify_cmd += " --tools=%s%s/lib/systemd/tools" % \ | ||
| 166 | (d.getVar("RECIPE_SYSROOT_NATIVE"), d.getVar("prefix")) | ||
| 167 | |||
| 168 | # version | ||
| 169 | ukify_cmd += " --os-release=@%s%s/lib/os-release" % \ | ||
| 170 | (d.getVar("RECIPE_SYSROOT"), d.getVar("prefix")) | ||
| 171 | |||
| 172 | # TODO: tpm2 measure for secure boot, depends on systemd-native and TPM tooling | ||
| 173 | # needed in systemd > 254 to fulfill ConditionSecurity=measured-uki | ||
| 174 | # Requires TPM device on build host, thus not supported at build time. | ||
| 175 | #ukify_cmd += " --measure" | ||
| 176 | |||
| 177 | # securebooot signing, also for kernel | ||
| 178 | key = d.getVar('UKI_SB_KEY') | ||
| 179 | if key: | ||
| 180 | ukify_cmd += " --sign-kernel --secureboot-private-key='%s'" % (key) | ||
| 181 | cert = d.getVar('UKI_SB_CERT') | ||
| 182 | if cert: | ||
| 183 | ukify_cmd += " --secureboot-certificate='%s'" % (cert) | ||
| 184 | |||
| 185 | # custom output UKI filename | ||
| 186 | output = " --output=%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('UKI_FILENAME')) | ||
| 187 | ukify_cmd += " %s" % (output) | ||
| 188 | |||
| 189 | # Run the ukify command | ||
| 190 | bb.debug(2, "uki: running command: %s" % (ukify_cmd)) | ||
| 191 | out, err = bb.process.run(ukify_cmd, shell=True) | ||
| 192 | bb.debug(2, "%s\n%s" % (out, err)) | ||
| 193 | } | ||
| 194 | addtask uki after do_rootfs before do_deploy do_image_complete do_image_wic | ||
diff --git a/meta/classes-recipe/update-alternatives.bbclass b/meta/classes-recipe/update-alternatives.bbclass deleted file mode 100644 index 5f40dc23ea..0000000000 --- a/meta/classes-recipe/update-alternatives.bbclass +++ /dev/null | |||
| @@ -1,319 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # This class is used to help the alternatives system which is useful when | ||
| 8 | # multiple sources provide the same command. You can use update-alternatives | ||
| 9 | # command directly in your recipe, but in most cases this class simplifies | ||
| 10 | # that job. | ||
| 11 | # | ||
| 12 | # To use this class a number of variables should be defined: | ||
| 13 | # | ||
| 14 | # List all of the alternatives needed by a package: | ||
| 15 | # ALTERNATIVE:<pkg> = "name1 name2 name3 ..." | ||
| 16 | # | ||
| 17 | # i.e. ALTERNATIVE:busybox = "sh sed test bracket" | ||
| 18 | # | ||
| 19 | # The pathname of the link | ||
| 20 | # ALTERNATIVE_LINK_NAME[name] = "target" | ||
| 21 | # | ||
| 22 | # This is the name of the binary once it's been installed onto the runtime. | ||
| 23 | # This name is global to all split packages in this recipe, and should match | ||
| 24 | # other recipes with the same functionality. | ||
| 25 | # i.e. ALTERNATIVE_LINK_NAME[bracket] = "/usr/bin/[" | ||
| 26 | # | ||
| 27 | # NOTE: If ALTERNATIVE_LINK_NAME is not defined, it defaults to ${bindir}/name | ||
| 28 | # | ||
| 29 | # The default link to create for all targets | ||
| 30 | # ALTERNATIVE_TARGET = "target" | ||
| 31 | # | ||
| 32 | # This is useful in a multicall binary case | ||
| 33 | # i.e. ALTERNATIVE_TARGET = "/bin/busybox" | ||
| 34 | # | ||
| 35 | # A non-default link to create for a target | ||
| 36 | # ALTERNATIVE_TARGET[name] = "target" | ||
| 37 | # | ||
| 38 | # This is the name of the binary as it's been installed by do_install | ||
| 39 | # i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash" | ||
| 40 | # | ||
| 41 | # A package specific link for a target | ||
| 42 | # ALTERNATIVE_TARGET_<pkg>[name] = "target" | ||
| 43 | # | ||
| 44 | # This is useful when a recipe provides multiple alternatives for the | ||
| 45 | # same item. | ||
| 46 | # | ||
| 47 | # NOTE: If ALTERNATIVE_TARGET is not defined, it will inherit the value | ||
| 48 | # from ALTERNATIVE_LINK_NAME. | ||
| 49 | # | ||
| 50 | # NOTE: If the ALTERNATIVE_LINK_NAME and ALTERNATIVE_TARGET are the same, | ||
| 51 | # ALTERNATIVE_TARGET will have '.{BPN}' appended to it. If the file | ||
| 52 | # referenced has not been renamed, it will also be renamed. (This avoids | ||
| 53 | # the need to rename alternative files in the do_install step, but still | ||
| 54 | # supports it if necessary for some reason.) | ||
| 55 | # | ||
| 56 | # The default priority for any alternatives | ||
| 57 | # ALTERNATIVE_PRIORITY = "priority" | ||
| 58 | # | ||
| 59 | # i.e. default is ALTERNATIVE_PRIORITY = "10" | ||
| 60 | # | ||
| 61 | # The non-default priority for a specific target | ||
| 62 | # ALTERNATIVE_PRIORITY[name] = "priority" | ||
| 63 | # | ||
| 64 | # The package priority for a specific target | ||
| 65 | # ALTERNATIVE_PRIORITY_<pkg>[name] = "priority" | ||
| 66 | |||
| 67 | ALTERNATIVE_PRIORITY = "10" | ||
| 68 | |||
| 69 | # We need special processing for vardeps because it can not work on | ||
| 70 | # modified flag values. So we aggregate the flags into a new variable | ||
| 71 | # and include that variable in the set. | ||
| 72 | UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY" | ||
| 73 | |||
| 74 | PACKAGE_WRITE_DEPS += "virtual/update-alternatives-native" | ||
| 75 | |||
| 76 | def ua_extend_depends(d): | ||
| 77 | if not 'virtual/update-alternatives' in d.getVar('PROVIDES'): | ||
| 78 | d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives') | ||
| 79 | |||
| 80 | def update_alternatives_enabled(d): | ||
| 81 | # Update Alternatives only works on target packages... | ||
| 82 | if bb.data.inherits_class('native', d) or \ | ||
| 83 | bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or \ | ||
| 84 | bb.data.inherits_class('cross-canadian', d): | ||
| 85 | return False | ||
| 86 | |||
| 87 | # Disable when targeting mingw32 (no target support) | ||
| 88 | if d.getVar("TARGET_OS") == "mingw32": | ||
| 89 | return False | ||
| 90 | |||
| 91 | return True | ||
| 92 | |||
| 93 | python __anonymous() { | ||
| 94 | if not update_alternatives_enabled(d): | ||
| 95 | return | ||
| 96 | |||
| 97 | # extend the depends to include virtual/update-alternatives | ||
| 98 | ua_extend_depends(d) | ||
| 99 | } | ||
| 100 | |||
| 101 | def gen_updatealternativesvars(d): | ||
| 102 | ret = [] | ||
| 103 | pkgs = (d.getVar("PACKAGES") or "").split() | ||
| 104 | vars = (d.getVar("UPDALTVARS") or "").split() | ||
| 105 | |||
| 106 | # First compute them for non_pkg versions | ||
| 107 | for v in vars: | ||
| 108 | for flag in sorted((d.getVarFlags(v) or {}).keys()): | ||
| 109 | if flag == "doc" or flag == "vardeps" or flag == "vardepsexp": | ||
| 110 | continue | ||
| 111 | ret.append(v + "[" + flag + "]") | ||
| 112 | |||
| 113 | for p in pkgs: | ||
| 114 | for v in vars: | ||
| 115 | for flag in sorted((d.getVarFlags("%s:%s" % (v,p)) or {}).keys()): | ||
| 116 | if flag == "doc" or flag == "vardeps" or flag == "vardepsexp": | ||
| 117 | continue | ||
| 118 | ret.append('%s:%s' % (v,p) + "[" + flag + "]") | ||
| 119 | |||
| 120 | return " ".join(ret) | ||
| 121 | |||
| 122 | # Now the new stuff, we use a custom function to generate the right values | ||
| 123 | populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}" | ||
| 124 | |||
| 125 | # We need to do the rename after the image creation step, but before | ||
| 126 | # the split and strip steps.. PACKAGE_PREPROCESS_FUNCS is the right | ||
| 127 | # place for that. | ||
| 128 | PACKAGE_PREPROCESS_FUNCS += "apply_update_alternative_renames" | ||
| 129 | python apply_update_alternative_renames () { | ||
| 130 | if not update_alternatives_enabled(d): | ||
| 131 | return | ||
| 132 | |||
| 133 | import re | ||
| 134 | |||
| 135 | def update_files(alt_target, alt_target_rename, pkg, d): | ||
| 136 | f = d.getVar('FILES:' + pkg) | ||
| 137 | if f: | ||
| 138 | f = re.sub(r'(^|\s)%s(\s|$)' % re.escape (alt_target), r'\1%s\2' % alt_target_rename, f) | ||
| 139 | d.setVar('FILES:' + pkg, f) | ||
| 140 | |||
| 141 | # Check for deprecated usage... | ||
| 142 | pn = d.getVar('BPN') | ||
| 143 | if d.getVar('ALTERNATIVE_LINKS') != None: | ||
| 144 | bb.fatal('%s: Use of ALTERNATIVE_LINKS/ALTERNATIVE_PATH/ALTERNATIVE_NAME is no longer supported, please convert to the updated syntax, see update-alternatives.bbclass for more info.' % pn) | ||
| 145 | |||
| 146 | # Do actual update alternatives processing | ||
| 147 | pkgdest = d.getVar('PKGD') | ||
| 148 | for pkg in (d.getVar('PACKAGES') or "").split(): | ||
| 149 | # If the src == dest, we know we need to rename the dest by appending ${BPN} | ||
| 150 | link_rename = [] | ||
| 151 | for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split(): | ||
| 152 | alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name) | ||
| 153 | if not alt_link: | ||
| 154 | alt_link = "%s/%s" % (d.getVar('bindir'), alt_name) | ||
| 155 | d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link) | ||
| 156 | if alt_link.startswith(os.path.join(d.getVar('sysconfdir'), 'init.d')): | ||
| 157 | # Managing init scripts does not work (bug #10433), foremost | ||
| 158 | # because of a race with update-rc.d | ||
| 159 | bb.fatal("Using update-alternatives for managing SysV init scripts is not supported") | ||
| 160 | |||
| 161 | alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name) | ||
| 162 | alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link | ||
| 163 | # Sometimes alt_target is specified as relative to the link name. | ||
| 164 | alt_target = os.path.join(os.path.dirname(alt_link), alt_target) | ||
| 165 | |||
| 166 | # If the link and target are the same name, we need to rename the target. | ||
| 167 | if alt_link == alt_target: | ||
| 168 | src = '%s/%s' % (pkgdest, alt_target) | ||
| 169 | alt_target_rename = '%s.%s' % (alt_target, pn) | ||
| 170 | dest = '%s/%s' % (pkgdest, alt_target_rename) | ||
| 171 | if os.path.lexists(dest): | ||
| 172 | bb.note('%s: Already renamed: %s' % (pn, alt_target_rename)) | ||
| 173 | elif os.path.lexists(src): | ||
| 174 | if os.path.islink(src): | ||
| 175 | # Delay rename of links | ||
| 176 | link_rename.append((alt_target, alt_target_rename)) | ||
| 177 | else: | ||
| 178 | bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename)) | ||
| 179 | bb.utils.rename(src, dest) | ||
| 180 | update_files(alt_target, alt_target_rename, pkg, d) | ||
| 181 | else: | ||
| 182 | bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename)) | ||
| 183 | continue | ||
| 184 | d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, alt_target_rename) | ||
| 185 | |||
| 186 | # Process delayed link names | ||
| 187 | # Do these after other renames so we can correct broken links | ||
| 188 | for (alt_target, alt_target_rename) in link_rename: | ||
| 189 | src = '%s/%s' % (pkgdest, alt_target) | ||
| 190 | dest = '%s/%s' % (pkgdest, alt_target_rename) | ||
| 191 | link_target = oe.path.realpath(src, pkgdest, True) | ||
| 192 | |||
| 193 | if os.path.lexists(link_target): | ||
| 194 | # Ok, the link_target exists, we can rename | ||
| 195 | bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, alt_target_rename)) | ||
| 196 | bb.utils.rename(src, dest) | ||
| 197 | else: | ||
| 198 | # Try to resolve the broken link to link.${BPN} | ||
| 199 | link_maybe = '%s.%s' % (os.readlink(src), pn) | ||
| 200 | if os.path.lexists(os.path.join(os.path.dirname(src), link_maybe)): | ||
| 201 | # Ok, the renamed link target exists.. create a new link, and remove the original | ||
| 202 | bb.note('%s: Creating new link %s -> %s' % (pn, alt_target_rename, link_maybe)) | ||
| 203 | os.symlink(link_maybe, dest) | ||
| 204 | os.unlink(src) | ||
| 205 | else: | ||
| 206 | bb.warn('%s: Unable to resolve dangling symlink: %s' % (pn, alt_target)) | ||
| 207 | continue | ||
| 208 | update_files(alt_target, alt_target_rename, pkg, d) | ||
| 209 | } | ||
| 210 | |||
| 211 | def update_alternatives_alt_targets(d, pkg): | ||
| 212 | """ | ||
| 213 | Returns the update-alternatives metadata for a package. | ||
| 214 | |||
| 215 | The returned format is a list of tuples where the tuple contains: | ||
| 216 | alt_name: The binary name | ||
| 217 | alt_link: The path for the binary (Shared by different packages) | ||
| 218 | alt_target: The path for the renamed binary (Unique per package) | ||
| 219 | alt_priority: The priority of the alt_target | ||
| 220 | |||
| 221 | All the alt_targets will be installed into the sysroot. The alt_link is | ||
| 222 | a symlink pointing to the alt_target with the highest priority. | ||
| 223 | """ | ||
| 224 | |||
| 225 | pn = d.getVar('BPN') | ||
| 226 | pkgdest = d.getVar('PKGD') | ||
| 227 | updates = list() | ||
| 228 | for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split(): | ||
| 229 | alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name) | ||
| 230 | alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or \ | ||
| 231 | d.getVarFlag('ALTERNATIVE_TARGET', alt_name) or \ | ||
| 232 | d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \ | ||
| 233 | d.getVar('ALTERNATIVE_TARGET') or \ | ||
| 234 | alt_link | ||
| 235 | alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name) or \ | ||
| 236 | d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name) or \ | ||
| 237 | d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg) or \ | ||
| 238 | d.getVar('ALTERNATIVE_PRIORITY') | ||
| 239 | |||
| 240 | # This shouldn't trigger, as it should have been resolved earlier! | ||
| 241 | if alt_link == alt_target: | ||
| 242 | bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target)) | ||
| 243 | alt_target = '%s.%s' % (alt_target, pn) | ||
| 244 | |||
| 245 | if not os.path.lexists('%s/%s' % (pkgdest, alt_target)): | ||
| 246 | bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target)) | ||
| 247 | continue | ||
| 248 | |||
| 249 | alt_target = os.path.normpath(alt_target) | ||
| 250 | updates.append( (alt_name, alt_link, alt_target, alt_priority) ) | ||
| 251 | |||
| 252 | return updates | ||
| 253 | |||
| 254 | PACKAGESPLITFUNCS =+ "populate_packages_updatealternatives" | ||
| 255 | |||
| 256 | python populate_packages_updatealternatives () { | ||
| 257 | if not update_alternatives_enabled(d): | ||
| 258 | return | ||
| 259 | |||
| 260 | # Do actual update alternatives processing | ||
| 261 | for pkg in (d.getVar('PACKAGES') or "").split(): | ||
| 262 | # Create post install/removal scripts | ||
| 263 | alt_setup_links = "" | ||
| 264 | alt_remove_links = "" | ||
| 265 | updates = update_alternatives_alt_targets(d, pkg) | ||
| 266 | for alt_name, alt_link, alt_target, alt_priority in updates: | ||
| 267 | alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority) | ||
| 268 | alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target) | ||
| 269 | |||
| 270 | if alt_setup_links: | ||
| 271 | # RDEPENDS setup | ||
| 272 | provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives') | ||
| 273 | if provider: | ||
| 274 | #bb.note('adding runtime requirement for update-alternatives for %s' % pkg) | ||
| 275 | d.appendVar('RDEPENDS:%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider) | ||
| 276 | |||
| 277 | bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg) | ||
| 278 | bb.note('%s' % alt_setup_links) | ||
| 279 | postinst = d.getVar('pkg_postinst:%s' % pkg) | ||
| 280 | if postinst: | ||
| 281 | postinst = alt_setup_links + postinst | ||
| 282 | else: | ||
| 283 | postinst = '#!/bin/sh\n' + alt_setup_links | ||
| 284 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 285 | |||
| 286 | bb.note('%s' % alt_remove_links) | ||
| 287 | prerm = d.getVar('pkg_prerm:%s' % pkg) or '#!/bin/sh\n' | ||
| 288 | prerm += alt_remove_links | ||
| 289 | d.setVar('pkg_prerm:%s' % pkg, prerm) | ||
| 290 | } | ||
| 291 | |||
| 292 | python package_do_filedeps:append () { | ||
| 293 | if update_alternatives_enabled(d): | ||
| 294 | apply_update_alternative_provides(d) | ||
| 295 | } | ||
| 296 | |||
| 297 | def apply_update_alternative_provides(d): | ||
| 298 | pn = d.getVar('BPN') | ||
| 299 | pkgdest = d.getVar('PKGDEST') | ||
| 300 | |||
| 301 | for pkg in d.getVar('PACKAGES').split(): | ||
| 302 | for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split(): | ||
| 303 | alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name) | ||
| 304 | alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name) | ||
| 305 | alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link | ||
| 306 | |||
| 307 | if alt_link == alt_target: | ||
| 308 | bb.warn('%s: alt_link == alt_target: %s == %s' % (pn, alt_link, alt_target)) | ||
| 309 | alt_target = '%s.%s' % (alt_target, pn) | ||
| 310 | |||
| 311 | if not os.path.lexists('%s/%s/%s' % (pkgdest, pkg, alt_target)): | ||
| 312 | continue | ||
| 313 | |||
| 314 | # Add file provide | ||
| 315 | trans_target = oe.package.file_translate(alt_target) | ||
| 316 | d.appendVar('FILERPROVIDES:%s:%s' % (trans_target, pkg), " " + alt_link) | ||
| 317 | if not trans_target in (d.getVar('FILERPROVIDESFLIST:%s' % pkg) or ""): | ||
| 318 | d.appendVar('FILERPROVIDESFLIST:%s' % pkg, " " + trans_target) | ||
| 319 | |||
diff --git a/meta/classes-recipe/update-rc.d.bbclass b/meta/classes-recipe/update-rc.d.bbclass deleted file mode 100644 index a19e704741..0000000000 --- a/meta/classes-recipe/update-rc.d.bbclass +++ /dev/null | |||
| @@ -1,129 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | UPDATERCPN ?= "${PN}" | ||
| 8 | |||
| 9 | DEPENDS:append:class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}" | ||
| 10 | |||
| 11 | UPDATERCD = "update-rc.d" | ||
| 12 | UPDATERCD:class-cross = "" | ||
| 13 | UPDATERCD:class-native = "" | ||
| 14 | UPDATERCD:class-nativesdk = "" | ||
| 15 | |||
| 16 | INITSCRIPT_PARAMS ?= "defaults" | ||
| 17 | |||
| 18 | INIT_D_DIR = "${sysconfdir}/init.d" | ||
| 19 | |||
| 20 | def use_updatercd(d): | ||
| 21 | # If the distro supports both sysvinit and systemd, and the current recipe | ||
| 22 | # supports systemd, only call update-rc.d on rootfs creation or if systemd | ||
| 23 | # is not running. That's because systemctl enable/disable will already call | ||
| 24 | # update-rc.d if it detects initscripts. | ||
| 25 | if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and bb.data.inherits_class('systemd', d): | ||
| 26 | return '[ -n "$D" -o ! -d /run/systemd/system ]' | ||
| 27 | return 'true' | ||
| 28 | |||
| 29 | PACKAGE_WRITE_DEPS += "update-rc.d-native" | ||
| 30 | |||
| 31 | updatercd_postinst() { | ||
| 32 | if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then | ||
| 33 | if [ -n "$D" ]; then | ||
| 34 | OPT="-r $D" | ||
| 35 | else | ||
| 36 | OPT="-s" | ||
| 37 | fi | ||
| 38 | update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS} | ||
| 39 | fi | ||
| 40 | } | ||
| 41 | |||
| 42 | updatercd_prerm() { | ||
| 43 | if ${@use_updatercd(d)} && [ -z "$D" -a -x "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then | ||
| 44 | ${INIT_D_DIR}/${INITSCRIPT_NAME} stop || : | ||
| 45 | fi | ||
| 46 | } | ||
| 47 | |||
| 48 | updatercd_postrm() { | ||
| 49 | if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then | ||
| 50 | if [ -n "$D" ]; then | ||
| 51 | OPT="-f -r $D" | ||
| 52 | else | ||
| 53 | OPT="-f" | ||
| 54 | fi | ||
| 55 | update-rc.d $OPT ${INITSCRIPT_NAME} remove | ||
| 56 | fi | ||
| 57 | } | ||
| 58 | |||
| 59 | |||
| 60 | def update_rc_after_parse(d): | ||
| 61 | if d.getVar('INITSCRIPT_PACKAGES', False) == None: | ||
| 62 | if d.getVar('INITSCRIPT_NAME', False) == None: | ||
| 63 | bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE', False)) | ||
| 64 | if d.getVar('INITSCRIPT_PARAMS', False) == None: | ||
| 65 | bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE', False)) | ||
| 66 | |||
| 67 | python __anonymous() { | ||
| 68 | update_rc_after_parse(d) | ||
| 69 | } | ||
| 70 | |||
| 71 | PACKAGESPLITFUNCS =+ "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd', '', d)}" | ||
| 72 | PACKAGESPLITFUNCS:remove:class-nativesdk = "populate_packages_updatercd" | ||
| 73 | |||
| 74 | populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_postinst" | ||
| 75 | populate_packages_updatercd[vardepsexclude] += "OVERRIDES" | ||
| 76 | |||
| 77 | python populate_packages_updatercd () { | ||
| 78 | def update_rcd_auto_depend(pkg): | ||
| 79 | import subprocess | ||
| 80 | import os | ||
| 81 | path = d.expand("${D}${INIT_D_DIR}/${INITSCRIPT_NAME}") | ||
| 82 | if not os.path.exists(path): | ||
| 83 | return | ||
| 84 | statement = "grep -q -w '/etc/init.d/functions' %s" % path | ||
| 85 | if subprocess.call(statement, shell=True) == 0: | ||
| 86 | mlprefix = d.getVar('MLPREFIX') or "" | ||
| 87 | d.appendVar('RDEPENDS:' + pkg, ' %sinitd-functions' % (mlprefix)) | ||
| 88 | |||
| 89 | def update_rcd_package(pkg): | ||
| 90 | bb.debug(1, 'adding update-rc.d calls to postinst/prerm/postrm for %s' % pkg) | ||
| 91 | |||
| 92 | localdata = bb.data.createCopy(d) | ||
| 93 | overrides = localdata.getVar("OVERRIDES") | ||
| 94 | localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides)) | ||
| 95 | |||
| 96 | update_rcd_auto_depend(pkg) | ||
| 97 | |||
| 98 | postinst = d.getVar('pkg_postinst:%s' % pkg) | ||
| 99 | if not postinst: | ||
| 100 | postinst = '#!/bin/sh\n' | ||
| 101 | postinst += localdata.getVar('updatercd_postinst') | ||
| 102 | d.setVar('pkg_postinst:%s' % pkg, postinst) | ||
| 103 | |||
| 104 | prerm = d.getVar('pkg_prerm:%s' % pkg) | ||
| 105 | if not prerm: | ||
| 106 | prerm = '#!/bin/sh\n' | ||
| 107 | prerm += localdata.getVar('updatercd_prerm') | ||
| 108 | d.setVar('pkg_prerm:%s' % pkg, prerm) | ||
| 109 | |||
| 110 | postrm = d.getVar('pkg_postrm:%s' % pkg) | ||
| 111 | if not postrm: | ||
| 112 | postrm = '#!/bin/sh\n' | ||
| 113 | postrm += localdata.getVar('updatercd_postrm') | ||
| 114 | d.setVar('pkg_postrm:%s' % pkg, postrm) | ||
| 115 | |||
| 116 | d.appendVar('RRECOMMENDS:' + pkg, " ${MLPREFIX}${UPDATERCD}") | ||
| 117 | |||
| 118 | # Check that this class isn't being inhibited (generally, by | ||
| 119 | # systemd.bbclass) before doing any work. | ||
| 120 | if not d.getVar("INHIBIT_UPDATERCD_BBCLASS"): | ||
| 121 | pkgs = d.getVar('INITSCRIPT_PACKAGES') | ||
| 122 | if pkgs == None: | ||
| 123 | pkgs = d.getVar('UPDATERCPN') | ||
| 124 | packages = (d.getVar('PACKAGES') or "").split() | ||
| 125 | if not pkgs in packages and packages != []: | ||
| 126 | pkgs = packages[0] | ||
| 127 | for pkg in pkgs.split(): | ||
| 128 | update_rcd_package(pkg) | ||
| 129 | } | ||
diff --git a/meta/classes-recipe/upstream-version-is-even.bbclass b/meta/classes-recipe/upstream-version-is-even.bbclass deleted file mode 100644 index 19587cb12c..0000000000 --- a/meta/classes-recipe/upstream-version-is-even.bbclass +++ /dev/null | |||
| @@ -1,11 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # This class ensures that the upstream version check only | ||
| 8 | # accepts even minor versions (i.e. 3.0.x, 3.2.x, 3.4.x, etc.) | ||
| 9 | # This scheme is used by Gnome and a number of other projects | ||
| 10 | # to signify stable releases vs development releases. | ||
| 11 | UPSTREAM_CHECK_REGEX = "[^\d\.](?P<pver>\d+\.(\d*[02468])+(\.\d+)+)\.tar" | ||
diff --git a/meta/classes-recipe/vala.bbclass b/meta/classes-recipe/vala.bbclass deleted file mode 100644 index 460ddb36f0..0000000000 --- a/meta/classes-recipe/vala.bbclass +++ /dev/null | |||
| @@ -1,30 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Everyone needs vala-native and targets need vala, too, | ||
| 8 | # because that is where target builds look for .vapi files. | ||
| 9 | # | ||
| 10 | VALADEPENDS = "" | ||
| 11 | VALADEPENDS:class-target = "vala" | ||
| 12 | DEPENDS:append = " vala-native ${VALADEPENDS}" | ||
| 13 | |||
| 14 | # Our patched version of Vala looks in STAGING_DATADIR for .vapi files | ||
| 15 | export STAGING_DATADIR | ||
| 16 | # Upstream Vala >= 0.11 looks in XDG_DATA_DIRS for .vapi files | ||
| 17 | export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}" | ||
| 18 | |||
| 19 | # Package additional files | ||
| 20 | FILES:${PN}-dev += "\ | ||
| 21 | ${datadir}/vala/vapi/*.vapi \ | ||
| 22 | ${datadir}/vala/vapi/*.deps \ | ||
| 23 | ${datadir}/gir-1.0 \ | ||
| 24 | " | ||
| 25 | |||
| 26 | # Remove vapigen.m4 that is bundled with tarballs | ||
| 27 | # because it does not yet have our cross-compile fixes | ||
| 28 | do_configure:prepend() { | ||
| 29 | rm -f ${S}/m4/vapigen.m4 | ||
| 30 | } | ||
diff --git a/meta/classes-recipe/waf.bbclass b/meta/classes-recipe/waf.bbclass deleted file mode 100644 index 01707c8e2c..0000000000 --- a/meta/classes-recipe/waf.bbclass +++ /dev/null | |||
| @@ -1,94 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # avoids build breaks when using no-static-libs.inc | ||
| 8 | DISABLE_STATIC = "" | ||
| 9 | |||
| 10 | # What Python interpretter to use. Defaults to Python 3 but can be | ||
| 11 | # overridden if required. | ||
| 12 | WAF_PYTHON ?= "python3" | ||
| 13 | |||
| 14 | B = "${WORKDIR}/build" | ||
| 15 | do_configure[cleandirs] += "${B}" | ||
| 16 | |||
| 17 | EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}" | ||
| 18 | |||
| 19 | EXTRA_OEWAF_BUILD ??= "" | ||
| 20 | # In most cases, you want to pass the same arguments to `waf build` and `waf | ||
| 21 | # install`, but you can override it if necessary | ||
| 22 | EXTRA_OEWAF_INSTALL ??= "${EXTRA_OEWAF_BUILD}" | ||
| 23 | |||
| 24 | def waflock_hash(d): | ||
| 25 | # Calculates the hash used for the waf lock file. This should include | ||
| 26 | # all of the user controllable inputs passed to waf configure. Note | ||
| 27 | # that the full paths for ${B} and ${S} are used; this is OK and desired | ||
| 28 | # because a change to either of these should create a unique lock file | ||
| 29 | # to prevent collisions. | ||
| 30 | import hashlib | ||
| 31 | h = hashlib.sha512() | ||
| 32 | def update(name): | ||
| 33 | val = d.getVar(name) | ||
| 34 | if val is not None: | ||
| 35 | h.update(val.encode('utf-8')) | ||
| 36 | update('S') | ||
| 37 | update('B') | ||
| 38 | update('prefix') | ||
| 39 | update('EXTRA_OECONF') | ||
| 40 | return h.hexdigest() | ||
| 41 | |||
| 42 | # Use WAFLOCK to specify a separate lock file. The build is already | ||
| 43 | # sufficiently isolated by setting the output directory, this ensures that | ||
| 44 | # bitbake won't step on toes of any other configured context in the source | ||
| 45 | # directory (e.g. if the source is coming from externalsrc and was previously | ||
| 46 | # configured elsewhere). | ||
| 47 | export WAFLOCK = ".lock-waf_oe_${@waflock_hash(d)}_build" | ||
| 48 | BB_BASEHASH_IGNORE_VARS += "WAFLOCK" | ||
| 49 | |||
| 50 | python waf_preconfigure() { | ||
| 51 | import subprocess | ||
| 52 | subsrcdir = d.getVar('S') | ||
| 53 | python = d.getVar('WAF_PYTHON') | ||
| 54 | wafbin = os.path.join(subsrcdir, 'waf') | ||
| 55 | try: | ||
| 56 | result = subprocess.check_output([python, wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT) | ||
| 57 | # Output looks like: | ||
| 58 | # # output from lower modules (e.g. warnings, ...) | ||
| 59 | # waf X.Y.Z ... | ||
| 60 | # So, look for the line starting with "waf " | ||
| 61 | version = None | ||
| 62 | for line in result.decode('utf-8').split("\n"): | ||
| 63 | if line.startswith("waf "): | ||
| 64 | version = line.split()[1] | ||
| 65 | break | ||
| 66 | |||
| 67 | if not version or not bb.utils.is_semver(version): | ||
| 68 | bb.warn("Unable to parse \"waf --version\" output. Assuming waf version without bindir/libdir support.") | ||
| 69 | bb.warn("waf·--version·output = \n%s" % result.decode('utf-8')) | ||
| 70 | elif bb.utils.vercmp_string_op(version, "1.8.7", ">="): | ||
| 71 | bb.note("waf version is high enough to add --bindir and --libdir") | ||
| 72 | d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}") | ||
| 73 | except subprocess.CalledProcessError as e: | ||
| 74 | bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % e.returncode) | ||
| 75 | except FileNotFoundError: | ||
| 76 | bb.fatal("waf does not exist in %s" % subsrcdir) | ||
| 77 | } | ||
| 78 | |||
| 79 | do_configure[prefuncs] += "waf_preconfigure" | ||
| 80 | |||
| 81 | waf_do_configure() { | ||
| 82 | (cd ${S} && ${WAF_PYTHON} ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF}) | ||
| 83 | } | ||
| 84 | |||
| 85 | do_compile[progress] = "outof:^\[\s*(\d+)/\s*(\d+)\]\s+" | ||
| 86 | waf_do_compile() { | ||
| 87 | (cd ${S} && ${WAF_PYTHON} ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)} ${EXTRA_OEWAF_BUILD}) | ||
| 88 | } | ||
| 89 | |||
| 90 | waf_do_install() { | ||
| 91 | (cd ${S} && ${WAF_PYTHON} ./waf install --destdir=${D} ${EXTRA_OEWAF_INSTALL}) | ||
| 92 | } | ||
| 93 | |||
| 94 | EXPORT_FUNCTIONS do_configure do_compile do_install | ||
diff --git a/meta/classes-recipe/xmlcatalog.bbclass b/meta/classes-recipe/xmlcatalog.bbclass deleted file mode 100644 index d3ef7ff43c..0000000000 --- a/meta/classes-recipe/xmlcatalog.bbclass +++ /dev/null | |||
| @@ -1,36 +0,0 @@ | |||
| 1 | # | ||
| 2 | # Copyright OpenEmbedded Contributors | ||
| 3 | # | ||
| 4 | # SPDX-License-Identifier: MIT | ||
| 5 | # | ||
| 6 | |||
| 7 | # Note that this recipe only handles XML catalogues in the native sysroot, and doesn't | ||
| 8 | # yet support catalogue management in the target sysroot or on the target itself. | ||
| 9 | # (https://bugzilla.yoctoproject.org/13271) | ||
| 10 | |||
| 11 | # A whitespace-separated list of XML catalogs to be registered, for example | ||
| 12 | # "${sysconfdir}/xml/docbook-xml.xml". | ||
| 13 | XMLCATALOGS ?= "" | ||
| 14 | |||
| 15 | DEPENDS:append = " libxml2-native" | ||
| 16 | |||
| 17 | SYSROOT_PREPROCESS_FUNCS:append:class-native = " xmlcatalog_sstate_postinst" | ||
| 18 | |||
| 19 | xmlcatalog_complete() { | ||
| 20 | ROOTCATALOG="${STAGING_ETCDIR_NATIVE}/xml/catalog" | ||
| 21 | if [ ! -f $ROOTCATALOG ]; then | ||
| 22 | mkdir --parents $(dirname $ROOTCATALOG) | ||
| 23 | xmlcatalog --noout --create $ROOTCATALOG | ||
| 24 | fi | ||
| 25 | for CATALOG in ${XMLCATALOGS}; do | ||
| 26 | xmlcatalog --noout --add nextCatalog unused file://$CATALOG $ROOTCATALOG | ||
| 27 | done | ||
| 28 | } | ||
| 29 | |||
| 30 | xmlcatalog_sstate_postinst() { | ||
| 31 | mkdir -p ${SYSROOT_DESTDIR}${bindir} | ||
| 32 | dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}-xmlcatalog | ||
| 33 | echo '#!/bin/sh' > $dest | ||
| 34 | echo '${xmlcatalog_complete}' >> $dest | ||
| 35 | chmod 0755 $dest | ||
| 36 | } | ||
