From 972dcfcdbfe75dcfeb777150c136576cf1a71e99 Mon Sep 17 00:00:00 2001 From: Tudor Florea Date: Fri, 9 Oct 2015 22:59:03 +0200 Subject: initial commit for Enea Linux 5.0 arm Signed-off-by: Tudor Florea --- meta/classes/allarch.bbclass | 43 + meta/classes/archiver.bbclass | 380 +++++ meta/classes/autotools-brokensep.bbclass | 5 + meta/classes/autotools.bbclass | 302 ++++ meta/classes/autotools_stage.bbclass | 2 + meta/classes/base.bbclass | 566 ++++++++ meta/classes/bin_package.bbclass | 36 + meta/classes/binconfig-disabled.bbclass | 15 + meta/classes/binconfig.bbclass | 63 + meta/classes/blacklist.bbclass | 45 + meta/classes/boot-directdisk.bbclass | 191 +++ meta/classes/bootimg.bbclass | 267 ++++ meta/classes/bugzilla.bbclass | 187 +++ meta/classes/buildhistory.bbclass | 696 +++++++++ meta/classes/buildstats-summary.bbclass | 39 + meta/classes/buildstats.bbclass | 289 ++++ meta/classes/ccache.bbclass | 8 + meta/classes/chrpath.bbclass | 115 ++ meta/classes/clutter.bbclass | 22 + meta/classes/cmake.bbclass | 121 ++ meta/classes/cml1.bbclass | 74 + meta/classes/compress_doc.bbclass | 256 ++++ meta/classes/copyleft_compliance.bbclass | 64 + meta/classes/copyleft_filter.bbclass | 62 + meta/classes/core-image.bbclass | 80 ++ meta/classes/cpan-base.bbclass | 55 + meta/classes/cpan.bbclass | 55 + meta/classes/cpan_build.bbclass | 53 + meta/classes/cross-canadian.bbclass | 142 ++ meta/classes/cross.bbclass | 75 + meta/classes/crosssdk.bbclass | 36 + meta/classes/debian.bbclass | 141 ++ meta/classes/deploy.bbclass | 10 + meta/classes/devshell.bbclass | 154 ++ meta/classes/distro_features_check.bbclass | 28 + meta/classes/distrodata.bbclass | 902 ++++++++++++ meta/classes/distutils-base.bbclass | 4 + meta/classes/distutils-common-base.bbclass | 24 + meta/classes/distutils-native-base.bbclass | 3 + meta/classes/distutils-tools.bbclass | 77 + meta/classes/distutils.bbclass | 80 ++ meta/classes/distutils3-base.bbclass | 8 + meta/classes/distutils3-native-base.bbclass | 4 + meta/classes/distutils3.bbclass | 96 ++ meta/classes/externalsrc.bbclass | 53 + meta/classes/extrausers.bbclass | 65 + meta/classes/fontcache.bbclass | 45 + meta/classes/gconf.bbclass | 70 + meta/classes/gettext.bbclass | 19 + meta/classes/gnome.bbclass | 5 + meta/classes/gnomebase.bbclass | 30 + meta/classes/grub-efi.bbclass | 141 ++ meta/classes/gsettings.bbclass | 37 + meta/classes/gtk-doc.bbclass | 25 + meta/classes/gtk-icon-cache.bbclass | 62 + meta/classes/gtk-immodules-cache.bbclass | 83 ++ meta/classes/gummiboot.bbclass | 114 ++ meta/classes/gzipnative.bbclass | 5 + meta/classes/icecc.bbclass | 332 +++++ meta/classes/image-live.bbclass | 18 + meta/classes/image-mklibs.bbclass | 71 + meta/classes/image-prelink.bbclass | 33 + meta/classes/image-swab.bbclass | 94 ++ meta/classes/image-vmdk.bbclass | 35 + meta/classes/image.bbclass | 448 ++++++ meta/classes/image_types.bbclass | 163 +++ meta/classes/image_types_uboot.bbclass | 23 + meta/classes/insane.bbclass | 1153 +++++++++++++++ meta/classes/insserv.bbclass | 5 + meta/classes/kernel-arch.bbclass | 60 + meta/classes/kernel-grub.bbclass | 91 ++ meta/classes/kernel-module-split.bbclass | 200 +++ meta/classes/kernel-yocto.bbclass | 361 +++++ meta/classes/kernel.bbclass | 505 +++++++ meta/classes/lib_package.bbclass | 7 + meta/classes/libc-common.bbclass | 43 + meta/classes/libc-package.bbclass | 390 +++++ meta/classes/license.bbclass | 397 ++++++ meta/classes/linux-kernel-base.bbclass | 32 + meta/classes/logging.bbclass | 72 + meta/classes/meta.bbclass | 4 + meta/classes/metadata_scm.bbclass | 82 ++ meta/classes/migrate_localcount.bbclass | 46 + meta/classes/mime.bbclass | 56 + meta/classes/mirrors.bbclass | 82 ++ meta/classes/module-base.bbclass | 18 + meta/classes/module.bbclass | 32 + meta/classes/multilib.bbclass | 145 ++ meta/classes/multilib_global.bbclass | 158 ++ meta/classes/multilib_header.bbclass | 54 + meta/classes/native.bbclass | 175 +++ meta/classes/nativesdk.bbclass | 95 ++ meta/classes/oelint.bbclass | 85 ++ meta/classes/own-mirrors.bbclass | 13 + meta/classes/package.bbclass | 2060 +++++++++++++++++++++++++++ meta/classes/package_deb.bbclass | 330 +++++ meta/classes/package_ipk.bbclass | 286 ++++ meta/classes/package_rpm.bbclass | 754 ++++++++++ meta/classes/package_tar.bbclass | 69 + meta/classes/packagedata.bbclass | 26 + meta/classes/packagegroup.bbclass | 52 + meta/classes/packageinfo.bbclass | 22 + meta/classes/patch.bbclass | 188 +++ meta/classes/perlnative.bbclass | 3 + meta/classes/pixbufcache.bbclass | 72 + meta/classes/pkgconfig.bbclass | 2 + meta/classes/populate_sdk.bbclass | 7 + meta/classes/populate_sdk_base.bbclass | 164 +++ meta/classes/prexport.bbclass | 58 + meta/classes/primport.bbclass | 21 + meta/classes/prserv.bbclass | 2 + meta/classes/ptest-gnome.bbclass | 8 + meta/classes/ptest.bbclass | 62 + meta/classes/python-dir.bbclass | 5 + meta/classes/python3native.bbclass | 7 + meta/classes/pythonnative.bbclass | 6 + meta/classes/qemu.bbclass | 48 + meta/classes/qmake2.bbclass | 27 + meta/classes/qmake_base.bbclass | 119 ++ meta/classes/qt4e.bbclass | 24 + meta/classes/qt4x11.bbclass | 14 + meta/classes/recipe_sanity.bbclass | 167 +++ meta/classes/relocatable.bbclass | 7 + meta/classes/report-error.bbclass | 70 + meta/classes/rm_work.bbclass | 120 ++ meta/classes/rootfs_deb.bbclass | 39 + meta/classes/rootfs_ipk.bbclass | 39 + meta/classes/rootfs_rpm.bbclass | 47 + meta/classes/sanity.bbclass | 887 ++++++++++++ meta/classes/scons.bbclass | 15 + meta/classes/sdl.bbclass | 6 + meta/classes/setuptools.bbclass | 8 + meta/classes/setuptools3.bbclass | 8 + meta/classes/sip.bbclass | 61 + meta/classes/siteconfig.bbclass | 33 + meta/classes/siteinfo.bbclass | 164 +++ meta/classes/spdx.bbclass | 325 +++++ meta/classes/sstate.bbclass | 837 +++++++++++ meta/classes/staging.bbclass | 122 ++ meta/classes/syslinux.bbclass | 187 +++ meta/classes/systemd.bbclass | 197 +++ meta/classes/terminal.bbclass | 94 ++ meta/classes/testimage-auto.bbclass | 23 + meta/classes/testimage.bbclass | 323 +++++ meta/classes/texinfo.bbclass | 15 + meta/classes/tinderclient.bbclass | 368 +++++ meta/classes/toaster.bbclass | 343 +++++ meta/classes/toolchain-scripts.bbclass | 138 ++ meta/classes/typecheck.bbclass | 12 + meta/classes/uboot-config.bbclass | 61 + meta/classes/uninative.bbclass | 44 + meta/classes/update-alternatives.bbclass | 267 ++++ meta/classes/update-rc.d.bbclass | 135 ++ meta/classes/useradd-staticids.bbclass | 276 ++++ meta/classes/useradd.bbclass | 213 +++ meta/classes/useradd_base.bbclass | 230 +++ meta/classes/utility-tasks.bbclass | 69 + meta/classes/utils.bbclass | 379 +++++ meta/classes/vala.bbclass | 21 + meta/classes/waf.bbclass | 13 + 160 files changed, 23006 insertions(+) create mode 100644 meta/classes/allarch.bbclass create mode 100644 meta/classes/archiver.bbclass create mode 100644 meta/classes/autotools-brokensep.bbclass create mode 100644 meta/classes/autotools.bbclass create mode 100644 meta/classes/autotools_stage.bbclass create mode 100644 meta/classes/base.bbclass create mode 100644 meta/classes/bin_package.bbclass create mode 100644 meta/classes/binconfig-disabled.bbclass create mode 100644 meta/classes/binconfig.bbclass create mode 100644 meta/classes/blacklist.bbclass create mode 100644 meta/classes/boot-directdisk.bbclass create mode 100644 meta/classes/bootimg.bbclass create mode 100644 meta/classes/bugzilla.bbclass create mode 100644 meta/classes/buildhistory.bbclass create mode 100644 meta/classes/buildstats-summary.bbclass create mode 100644 meta/classes/buildstats.bbclass create mode 100644 meta/classes/ccache.bbclass create mode 100644 meta/classes/chrpath.bbclass create mode 100644 meta/classes/clutter.bbclass create mode 100644 meta/classes/cmake.bbclass create mode 100644 meta/classes/cml1.bbclass create mode 100644 meta/classes/compress_doc.bbclass create mode 100644 meta/classes/copyleft_compliance.bbclass create mode 100644 meta/classes/copyleft_filter.bbclass create mode 100644 meta/classes/core-image.bbclass create mode 100644 meta/classes/cpan-base.bbclass create mode 100644 meta/classes/cpan.bbclass create mode 100644 meta/classes/cpan_build.bbclass create mode 100644 meta/classes/cross-canadian.bbclass create mode 100644 meta/classes/cross.bbclass create mode 100644 meta/classes/crosssdk.bbclass create mode 100644 meta/classes/debian.bbclass create mode 100644 meta/classes/deploy.bbclass create mode 100644 meta/classes/devshell.bbclass create mode 100644 meta/classes/distro_features_check.bbclass create mode 100644 meta/classes/distrodata.bbclass create mode 100644 meta/classes/distutils-base.bbclass create mode 100644 meta/classes/distutils-common-base.bbclass create mode 100644 meta/classes/distutils-native-base.bbclass create mode 100644 meta/classes/distutils-tools.bbclass create mode 100644 meta/classes/distutils.bbclass create mode 100644 meta/classes/distutils3-base.bbclass create mode 100644 meta/classes/distutils3-native-base.bbclass create mode 100644 meta/classes/distutils3.bbclass create mode 100644 meta/classes/externalsrc.bbclass create mode 100644 meta/classes/extrausers.bbclass create mode 100644 meta/classes/fontcache.bbclass create mode 100644 meta/classes/gconf.bbclass create mode 100644 meta/classes/gettext.bbclass create mode 100644 meta/classes/gnome.bbclass create mode 100644 meta/classes/gnomebase.bbclass create mode 100644 meta/classes/grub-efi.bbclass create mode 100644 meta/classes/gsettings.bbclass create mode 100644 meta/classes/gtk-doc.bbclass create mode 100644 meta/classes/gtk-icon-cache.bbclass create mode 100644 meta/classes/gtk-immodules-cache.bbclass create mode 100644 meta/classes/gummiboot.bbclass create mode 100644 meta/classes/gzipnative.bbclass create mode 100644 meta/classes/icecc.bbclass create mode 100644 meta/classes/image-live.bbclass create mode 100644 meta/classes/image-mklibs.bbclass create mode 100644 meta/classes/image-prelink.bbclass create mode 100644 meta/classes/image-swab.bbclass create mode 100644 meta/classes/image-vmdk.bbclass create mode 100644 meta/classes/image.bbclass create mode 100644 meta/classes/image_types.bbclass create mode 100644 meta/classes/image_types_uboot.bbclass create mode 100644 meta/classes/insane.bbclass create mode 100644 meta/classes/insserv.bbclass create mode 100644 meta/classes/kernel-arch.bbclass create mode 100644 meta/classes/kernel-grub.bbclass create mode 100644 meta/classes/kernel-module-split.bbclass create mode 100644 meta/classes/kernel-yocto.bbclass create mode 100644 meta/classes/kernel.bbclass create mode 100644 meta/classes/lib_package.bbclass create mode 100644 meta/classes/libc-common.bbclass create mode 100644 meta/classes/libc-package.bbclass create mode 100644 meta/classes/license.bbclass create mode 100644 meta/classes/linux-kernel-base.bbclass create mode 100644 meta/classes/logging.bbclass create mode 100644 meta/classes/meta.bbclass create mode 100644 meta/classes/metadata_scm.bbclass create mode 100644 meta/classes/migrate_localcount.bbclass create mode 100644 meta/classes/mime.bbclass create mode 100644 meta/classes/mirrors.bbclass create mode 100644 meta/classes/module-base.bbclass create mode 100644 meta/classes/module.bbclass create mode 100644 meta/classes/multilib.bbclass create mode 100644 meta/classes/multilib_global.bbclass create mode 100644 meta/classes/multilib_header.bbclass create mode 100644 meta/classes/native.bbclass create mode 100644 meta/classes/nativesdk.bbclass create mode 100644 meta/classes/oelint.bbclass create mode 100644 meta/classes/own-mirrors.bbclass create mode 100644 meta/classes/package.bbclass create mode 100644 meta/classes/package_deb.bbclass create mode 100644 meta/classes/package_ipk.bbclass create mode 100644 meta/classes/package_rpm.bbclass create mode 100644 meta/classes/package_tar.bbclass create mode 100644 meta/classes/packagedata.bbclass create mode 100644 meta/classes/packagegroup.bbclass create mode 100644 meta/classes/packageinfo.bbclass create mode 100644 meta/classes/patch.bbclass create mode 100644 meta/classes/perlnative.bbclass create mode 100644 meta/classes/pixbufcache.bbclass create mode 100644 meta/classes/pkgconfig.bbclass create mode 100644 meta/classes/populate_sdk.bbclass create mode 100644 meta/classes/populate_sdk_base.bbclass create mode 100644 meta/classes/prexport.bbclass create mode 100644 meta/classes/primport.bbclass create mode 100644 meta/classes/prserv.bbclass create mode 100644 meta/classes/ptest-gnome.bbclass create mode 100644 meta/classes/ptest.bbclass create mode 100644 meta/classes/python-dir.bbclass create mode 100644 meta/classes/python3native.bbclass create mode 100644 meta/classes/pythonnative.bbclass create mode 100644 meta/classes/qemu.bbclass create mode 100644 meta/classes/qmake2.bbclass create mode 100644 meta/classes/qmake_base.bbclass create mode 100644 meta/classes/qt4e.bbclass create mode 100644 meta/classes/qt4x11.bbclass create mode 100644 meta/classes/recipe_sanity.bbclass create mode 100644 meta/classes/relocatable.bbclass create mode 100644 meta/classes/report-error.bbclass create mode 100644 meta/classes/rm_work.bbclass create mode 100644 meta/classes/rootfs_deb.bbclass create mode 100644 meta/classes/rootfs_ipk.bbclass create mode 100644 meta/classes/rootfs_rpm.bbclass create mode 100644 meta/classes/sanity.bbclass create mode 100644 meta/classes/scons.bbclass create mode 100644 meta/classes/sdl.bbclass create mode 100644 meta/classes/setuptools.bbclass create mode 100644 meta/classes/setuptools3.bbclass create mode 100644 meta/classes/sip.bbclass create mode 100644 meta/classes/siteconfig.bbclass create mode 100644 meta/classes/siteinfo.bbclass create mode 100644 meta/classes/spdx.bbclass create mode 100644 meta/classes/sstate.bbclass create mode 100644 meta/classes/staging.bbclass create mode 100644 meta/classes/syslinux.bbclass create mode 100644 meta/classes/systemd.bbclass create mode 100644 meta/classes/terminal.bbclass create mode 100644 meta/classes/testimage-auto.bbclass create mode 100644 meta/classes/testimage.bbclass create mode 100644 meta/classes/texinfo.bbclass create mode 100644 meta/classes/tinderclient.bbclass create mode 100644 meta/classes/toaster.bbclass create mode 100644 meta/classes/toolchain-scripts.bbclass create mode 100644 meta/classes/typecheck.bbclass create mode 100644 meta/classes/uboot-config.bbclass create mode 100644 meta/classes/uninative.bbclass create mode 100644 meta/classes/update-alternatives.bbclass create mode 100644 meta/classes/update-rc.d.bbclass create mode 100644 meta/classes/useradd-staticids.bbclass create mode 100644 meta/classes/useradd.bbclass create mode 100644 meta/classes/useradd_base.bbclass create mode 100644 meta/classes/utility-tasks.bbclass create mode 100644 meta/classes/utils.bbclass create mode 100644 meta/classes/vala.bbclass create mode 100644 meta/classes/waf.bbclass (limited to 'meta/classes') diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass new file mode 100644 index 0000000000..4bc99272c4 --- /dev/null +++ b/meta/classes/allarch.bbclass @@ -0,0 +1,43 @@ +# +# This class is used for architecture independent recipes/data files (usally scripts) +# + +# Expand STAGING_DIR_HOST since for cross-canadian/native/nativesdk, this will +# point elsewhere after these changes. +STAGING_DIR_HOST := "${STAGING_DIR_HOST}" + +PACKAGE_ARCH = "all" + +python () { + # Allow this class to be included but overridden - only set + # the values if we're still "all" package arch. + if d.getVar("PACKAGE_ARCH") == "all": + # No need for virtual/libc or a cross compiler + d.setVar("INHIBIT_DEFAULT_DEPS","1") + + # Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory + # naming anyway + d.setVar("TARGET_ARCH", "allarch") + d.setVar("TARGET_OS", "linux") + d.setVar("TARGET_CC_ARCH", "none") + d.setVar("TARGET_LD_ARCH", "none") + d.setVar("TARGET_AS_ARCH", "none") + d.setVar("TARGET_FPU", "") + d.setVar("TARGET_PREFIX", "") + d.setVar("PACKAGE_EXTRA_ARCHS", "") + d.setVar("SDK_ARCH", "none") + d.setVar("SDK_CC_ARCH", "none") + + # Avoid this being unnecessarily different due to nuances of + # the target machine that aren't important for "all" arch + # packages. + d.setVar("LDFLAGS", "") + + # No need to do shared library processing or debug symbol handling + d.setVar("EXCLUDE_FROM_SHLIBS", "1") + d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1") + d.setVar("INHIBIT_PACKAGE_STRIP", "1") + elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d): + bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE", True)) +} + diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass new file mode 100644 index 0000000000..939624ae1d --- /dev/null +++ b/meta/classes/archiver.bbclass @@ -0,0 +1,380 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# This bbclass is used for creating archive for: +# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original" +# 2) patched source: ARCHIVER_MODE[src] = "patched" (default) +# 3) configured source: ARCHIVER_MODE[src] = "configured" +# 4) The patches between do_unpack and do_patch: +# ARCHIVER_MODE[diff] = "1" +# And you can set the one that you'd like to exclude from the diff: +# ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches" +# 5) The environment data, similar to 'bitbake -e recipe': +# ARCHIVER_MODE[dumpdata] = "1" +# 6) The recipe (.bb and .inc): ARCHIVER_MODE[recipe] = "1" +# 7) Whether output the .src.rpm package: +# ARCHIVER_MODE[srpm] = "1" +# 8) Filter the license, the recipe whose license in +# COPYLEFT_LICENSE_INCLUDE will be included, and in +# COPYLEFT_LICENSE_EXCLUDE will be excluded. +# COPYLEFT_LICENSE_INCLUDE = 'GPL* LGPL*' +# COPYLEFT_LICENSE_EXCLUDE = 'CLOSED Proprietary' +# 9) The recipe type that will be archived: +# COPYLEFT_RECIPE_TYPES = 'target' +# + +# Don't filter the license by default +COPYLEFT_LICENSE_INCLUDE ?= '' +COPYLEFT_LICENSE_EXCLUDE ?= '' +# Create archive for all the recipe types +COPYLEFT_RECIPE_TYPES ?= 'target native nativesdk cross crosssdk cross-canadian' +inherit copyleft_filter + +ARCHIVER_MODE[srpm] ?= "0" +ARCHIVER_MODE[src] ?= "patched" +ARCHIVER_MODE[diff] ?= "0" +ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches" +ARCHIVER_MODE[dumpdata] ?= "0" +ARCHIVER_MODE[recipe] ?= "0" + +DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources" +ARCHIVER_TOPDIR ?= "${WORKDIR}/deploy-sources" +ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/" +ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/" + +do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}" +do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}" +do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}" + +# This is a convenience for the shell script to use it + + +python () { + pn = d.getVar('PN', True) + + if d.getVar('COPYLEFT_LICENSE_INCLUDE', True) or \ + d.getVar('COPYLEFT_LICENSE_EXCLUDE', True): + included, reason = copyleft_should_include(d) + if not included: + bb.debug(1, 'archiver: %s is excluded: %s' % (pn, reason)) + return + else: + bb.debug(1, 'archiver: %s is included: %s' % (pn, reason)) + + ar_src = d.getVarFlag('ARCHIVER_MODE', 'src', True) + ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata', True) + ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe', True) + + if ar_src == "original": + d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_original' % pn) + elif ar_src == "patched": + d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn) + elif ar_src == "configured": + # We can't use "addtask do_ar_configured after do_configure" since it + # will cause the deptask of do_populate_sysroot to run not matter what + # archives we need, so we add the depends here. + d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn) + d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_configured' % pn) + elif ar_src: + bb.fatal("Invalid ARCHIVER_MODE[src]: %s" % ar_src) + + if ar_dumpdata == "1": + d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_dumpdata' % pn) + + if ar_recipe == "1": + d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_recipe' % pn) + + # Output the srpm package + ar_srpm = d.getVarFlag('ARCHIVER_MODE', 'srpm', True) + if ar_srpm == "1": + if d.getVar('PACKAGES', True) != '' and d.getVar('IMAGE_PKGTYPE', True) == 'rpm': + d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_package_write_rpm' % pn) + if ar_dumpdata == "1": + d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_dumpdata' % pn) + if ar_recipe == "1": + d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_recipe' % pn) + if ar_src == "original": + d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_original' % pn) + elif ar_src == "patched": + d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_patched' % pn) + elif ar_src == "configured": + d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn) + + # The gcc staff uses shared source + flag = d.getVarFlag("do_unpack", "stamp-base", True) + if flag: + if ar_src in [ 'original', 'patched' ]: + ar_outdir = os.path.join(d.getVar('ARCHIVER_TOPDIR', True), 'work-shared') + d.setVar('ARCHIVER_OUTDIR', ar_outdir) + d.setVarFlag('do_ar_original', 'stamp-base', flag) + d.setVarFlag('do_ar_patched', 'stamp-base', flag) + d.setVarFlag('do_unpack_and_patch', 'stamp-base', flag) + d.setVarFlag('do_ar_original', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR') + d.setVarFlag('do_unpack_and_patch', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR') + d.setVarFlag('do_ar_patched', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR') + d.setVarFlag('create_diff_gz', 'vardepsexclude', 'PF') + d.setVarFlag('create_tarball', 'vardepsexclude', 'PF') + + flag_clean = d.getVarFlag('do_unpack', 'stamp-base-clean', True) + if flag_clean: + d.setVarFlag('do_ar_original', 'stamp-base-clean', flag_clean) + d.setVarFlag('do_ar_patched', 'stamp-base-clean', flag_clean) + d.setVarFlag('do_unpack_and_patch', 'stamp-base-clean', flag_clean) +} + +# Take all the sources for a recipe and puts them in WORKDIR/archiver-work/. +# Files in SRC_URI are copied directly, anything that's a directory +# (e.g. git repositories) is "unpacked" and then put into a tarball. +python do_ar_original() { + + import shutil, tarfile, tempfile + + if d.getVarFlag('ARCHIVER_MODE', 'src', True) != "original": + return + + ar_outdir = d.getVar('ARCHIVER_OUTDIR', True) + bb.note('Archiving the original source...') + fetch = bb.fetch2.Fetch([], d) + for url in fetch.urls: + local = fetch.localpath(url).rstrip("/"); + if os.path.isfile(local): + shutil.copy(local, ar_outdir) + elif os.path.isdir(local): + basename = os.path.basename(local) + + tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR', True)) + fetch.unpack(tmpdir, (url,)) + + os.chdir(tmpdir) + # We split on '+' to chuck any annoying AUTOINC+ in the revision. + try: + src_rev = bb.fetch2.get_srcrev(d).split('+')[-1][:10] + except: + src_rev = 'NOREV' + tarname = os.path.join(ar_outdir, basename + '.' + src_rev + '.tar.gz') + tar = tarfile.open(tarname, 'w:gz') + tar.add('.') + tar.close() + + # Emit patch series files for 'original' + bb.note('Writing patch series files...') + for patch in src_patches(d): + _, _, local, _, _, parm = bb.fetch.decodeurl(patch) + patchdir = parm.get('patchdir') + if patchdir: + series = os.path.join(ar_outdir, 'series.subdir.%s' % patchdir.replace('/', '_')) + else: + series = os.path.join(ar_outdir, 'series') + + with open(series, 'a') as s: + s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel'])) +} + +python do_ar_patched() { + + if d.getVarFlag('ARCHIVER_MODE', 'src', True) != 'patched': + return + + # Get the ARCHIVER_OUTDIR before we reset the WORKDIR + ar_outdir = d.getVar('ARCHIVER_OUTDIR', True) + bb.note('Archiving the patched source...') + d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True)) + # The gcc staff uses shared source + flag = d.getVarFlag('do_unpack', 'stamp-base', True) + if flag: + create_tarball(d, d.getVar('S', True), 'patched', ar_outdir, 'gcc') + else: + create_tarball(d, d.getVar('S', True), 'patched', ar_outdir) +} + +python do_ar_configured() { + import shutil + + ar_outdir = d.getVar('ARCHIVER_OUTDIR', True) + if d.getVarFlag('ARCHIVER_MODE', 'src', True) == 'configured': + bb.note('Archiving the configured source...') + # The libtool-native's do_configure will remove the + # ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the + # do_configure, we archive the already configured ${S} to + # instead of. + if d.getVar('PN', True) != 'libtool-native': + # Change the WORKDIR to make do_configure run in another dir. + d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True)) + if bb.data.inherits_class('kernel-yocto', d): + bb.build.exec_func('do_kernel_configme', d) + if bb.data.inherits_class('cmake', d): + bb.build.exec_func('do_generate_toolchain_file', d) + prefuncs = d.getVarFlag('do_configure', 'prefuncs', True) + for func in (prefuncs or '').split(): + if func != "sysroot_cleansstate": + bb.build.exec_func(func, d) + bb.build.exec_func('do_configure', d) + postfuncs = d.getVarFlag('do_configure', 'postfuncs', True) + for func in (postfuncs or '').split(): + if func != "do_qa_configure": + bb.build.exec_func(func, d) + srcdir = d.getVar('S', True) + builddir = d.getVar('B', True) + if srcdir != builddir: + if os.path.exists(builddir): + oe.path.copytree(builddir, os.path.join(srcdir, \ + 'build.%s.ar_configured' % d.getVar('PF', True))) + create_tarball(d, srcdir, 'configured', ar_outdir) +} + +def create_tarball(d, srcdir, suffix, ar_outdir, pf=None): + """ + create the tarball from srcdir + """ + import tarfile + + bb.utils.mkdirhier(ar_outdir) + if pf: + tarname = os.path.join(ar_outdir, '%s-%s.tar.gz' % (pf, suffix)) + else: + tarname = os.path.join(ar_outdir, '%s-%s.tar.gz' % \ + (d.getVar('PF', True), suffix)) + + srcdir = srcdir.rstrip('/') + dirname = os.path.dirname(srcdir) + basename = os.path.basename(srcdir) + os.chdir(dirname) + bb.note('Creating %s' % tarname) + tar = tarfile.open(tarname, 'w:gz') + tar.add(basename) + tar.close() + +# creating .diff.gz between source.orig and source +def create_diff_gz(d, src_orig, src, ar_outdir): + + import subprocess + + if not os.path.isdir(src) or not os.path.isdir(src_orig): + return + + # The diff --exclude can't exclude the file with path, so we copy + # the patched source, and remove the files that we'd like to + # exclude. + src_patched = src + '.patched' + oe.path.copyhardlinktree(src, src_patched) + for i in d.getVarFlag('ARCHIVER_MODE', 'diff-exclude', True).split(): + bb.utils.remove(os.path.join(src_orig, i), recurse=True) + bb.utils.remove(os.path.join(src_patched, i), recurse=True) + + dirname = os.path.dirname(src) + basename = os.path.basename(src) + os.chdir(dirname) + out_file = os.path.join(ar_outdir, '%s-diff.gz' % d.getVar('PF', True)) + diff_cmd = 'diff -Naur %s.orig %s.patched | gzip -c > %s' % (basename, basename, out_file) + subprocess.call(diff_cmd, shell=True) + bb.utils.remove(src_patched, recurse=True) + +# Run do_unpack and do_patch +python do_unpack_and_patch() { + if d.getVarFlag('ARCHIVER_MODE', 'src', True) not in \ + [ 'patched', 'configured'] and \ + d.getVarFlag('ARCHIVER_MODE', 'diff', True) != '1': + return + + ar_outdir = d.getVar('ARCHIVER_OUTDIR', True) + + # Change the WORKDIR to make do_unpack do_patch run in another dir. + d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True)) + + # The changed 'WORKDIR' also casued 'B' changed, create dir 'B' for the + # possibly requiring of the following tasks (such as some recipes's + # do_patch required 'B' existed). + bb.utils.mkdirhier(d.getVar('B', True)) + + # The kernel source is ready after do_validate_branches + if bb.data.inherits_class('kernel-yocto', d): + bb.build.exec_func('do_unpack', d) + bb.build.exec_func('do_kernel_checkout', d) + bb.build.exec_func('do_validate_branches', d) + else: + bb.build.exec_func('do_unpack', d) + + # Save the original source for creating the patches + if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1': + src = d.getVar('S', True).rstrip('/') + src_orig = '%s.orig' % src + oe.path.copytree(src, src_orig) + bb.build.exec_func('do_patch', d) + # Create the patches + if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1': + bb.note('Creating diff gz...') + create_diff_gz(d, src_orig, src, ar_outdir) + bb.utils.remove(src_orig, recurse=True) +} + +python do_ar_recipe () { + """ + archive the recipe, including .bb and .inc. + """ + import re + import shutil + + require_re = re.compile( r"require\s+(.+)" ) + include_re = re.compile( r"include\s+(.+)" ) + bbfile = d.getVar('FILE', True) + outdir = os.path.join(d.getVar('WORKDIR', True), \ + '%s-recipe' % d.getVar('PF', True)) + bb.utils.mkdirhier(outdir) + shutil.copy(bbfile, outdir) + + dirname = os.path.dirname(bbfile) + bbpath = '%s:%s' % (dirname, d.getVar('BBPATH', True)) + f = open(bbfile, 'r') + for line in f.readlines(): + incfile = None + if require_re.match(line): + incfile = require_re.match(line).group(1) + elif include_re.match(line): + incfile = include_re.match(line).group(1) + if incfile: + incfile = bb.data.expand(incfile, d) + incfile = bb.utils.which(bbpath, incfile) + if incfile: + shutil.copy(incfile, outdir) + + create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR', True)) + bb.utils.remove(outdir, recurse=True) +} + +python do_dumpdata () { + """ + dump environment data to ${PF}-showdata.dump + """ + + dumpfile = os.path.join(d.getVar('ARCHIVER_OUTDIR', True), \ + '%s-showdata.dump' % d.getVar('PF', True)) + bb.note('Dumping metadata into %s' % dumpfile) + f = open(dumpfile, 'w') + # emit variables and shell functions + bb.data.emit_env(f, d, True) + # emit the metadata which isn't valid shell + for e in d.keys(): + if bb.data.getVarFlag(e, 'python', d): + f.write("\npython %s () {\n%s}\n" % (e, bb.data.getVar(e, d, True))) + f.close() +} + +SSTATETASKS += "do_deploy_archives" +do_deploy_archives () { + echo "Deploying source archive files ..." +} +python do_deploy_archives_setscene () { + sstate_setscene(d) +} +do_deploy_archives[sstate-inputdirs] = "${ARCHIVER_TOPDIR}" +do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}" + +addtask do_ar_original after do_unpack +addtask do_unpack_and_patch after do_patch +addtask do_ar_patched after do_unpack_and_patch +addtask do_ar_configured after do_unpack_and_patch +addtask do_dumpdata +addtask do_ar_recipe +addtask do_deploy_archives before do_build + +do_unpack_and_patch[depends] += "gcc-source:do_patch" diff --git a/meta/classes/autotools-brokensep.bbclass b/meta/classes/autotools-brokensep.bbclass new file mode 100644 index 0000000000..71cf97a391 --- /dev/null +++ b/meta/classes/autotools-brokensep.bbclass @@ -0,0 +1,5 @@ +# Autotools class for recipes where separate build dir doesn't work +# Ideally we should fix software so it does work. Standard autotools supports +# this. +inherit autotools +B = "${S}" diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass new file mode 100644 index 0000000000..b5f45160ed --- /dev/null +++ b/meta/classes/autotools.bbclass @@ -0,0 +1,302 @@ +def autotools_dep_prepend(d): + if d.getVar('INHIBIT_AUTOTOOLS_DEPS', True): + return '' + + pn = d.getVar('PN', True) + deps = '' + + if pn in ['autoconf-native', 'automake-native', 'help2man-native']: + return deps + deps += 'autoconf-native automake-native ' + + if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"): + deps += 'libtool-native ' + if not bb.data.inherits_class('native', d) \ + and not bb.data.inherits_class('nativesdk', d) \ + and not bb.data.inherits_class('cross', d) \ + and not d.getVar('INHIBIT_DEFAULT_DEPS', True): + deps += 'libtool-cross ' + + return deps + 'gnu-config-native ' + +EXTRA_OEMAKE = "" + +DEPENDS_prepend = "${@autotools_dep_prepend(d)}" + +inherit siteinfo + +# Space separated list of shell scripts with variables defined to supply test +# results for autoconf tests we cannot run at build time. +export CONFIG_SITE = "${@siteinfo_get_files(d)}" + +acpaths = "default" +EXTRA_AUTORECONF = "--exclude=autopoint" + +export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}" + +# When building tools for use at build-time it's recommended for the build +# system to use these variables when cross-compiling. +# (http://sources.redhat.com/autobook/autobook/autobook_270.html) +export CPP_FOR_BUILD = "${BUILD_CPP}" +export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}" + +export CC_FOR_BUILD = "${BUILD_CC}" +export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}" + +export CXX_FOR_BUILD = "${BUILD_CXX}" +export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}" + +export LD_FOR_BUILD = "${BUILD_LD}" +export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}" + +def append_libtool_sysroot(d): + # Only supply libtool sysroot option for non-native packages + if not bb.data.inherits_class('native', d): + return '--with-libtool-sysroot=${STAGING_DIR_HOST}' + return "" + +CONFIGUREOPTS = " --build=${BUILD_SYS} \ + --host=${HOST_SYS} \ + --target=${TARGET_SYS} \ + --prefix=${prefix} \ + --exec_prefix=${exec_prefix} \ + --bindir=${bindir} \ + --sbindir=${sbindir} \ + --libexecdir=${libexecdir} \ + --datadir=${datadir} \ + --sysconfdir=${sysconfdir} \ + --sharedstatedir=${sharedstatedir} \ + --localstatedir=${localstatedir} \ + --libdir=${libdir} \ + --includedir=${includedir} \ + --oldincludedir=${oldincludedir} \ + --infodir=${infodir} \ + --mandir=${mandir} \ + --disable-silent-rules \ + ${CONFIGUREOPT_DEPTRACK} \ + ${@append_libtool_sysroot(d)}" +CONFIGUREOPT_DEPTRACK = "--disable-dependency-tracking" + + +oe_runconf () { + cfgscript="${S}/configure" + if [ -x "$cfgscript" ] ; then + bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@" + set +e + ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@" + if [ "$?" != "0" ]; then + echo "Configure failed. The contents of all config.log files follows to aid debugging" + find ${S} -ignore_readdir_race -name config.log -print -exec cat {} \; + bbfatal "oe_runconf failed" + fi + set -e + else + bbfatal "no configure script found at $cfgscript" + fi +} + +AUTOTOOLS_AUXDIR ?= "${S}" + +CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate" + +autotools_preconfigure() { + if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then + if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then + if [ "${S}" != "${B}" ]; then + echo "Previously configured separate build directory detected, cleaning ${B}" + rm -rf ${B} + mkdir ${B} + else + # At least remove the .la files since automake won't automatically + # regenerate them even if CFLAGS/LDFLAGS are different + cd ${S}; find ${S} -name \*.la -delete + fi + fi + fi +} + +autotools_postconfigure(){ + if [ -n "${CONFIGURESTAMPFILE}" ]; then + echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE} + fi +} + +EXTRACONFFUNCS ??= "" + +do_configure[prefuncs] += "autotools_preconfigure autotools_copy_aclocals ${EXTRACONFFUNCS}" +do_configure[postfuncs] += "autotools_postconfigure" + +ACLOCALDIR = "${B}/aclocal-copy" + +python autotools_copy_aclocals () { + s = d.getVar("S", True) + if not os.path.exists(s + "/configure.in") and not os.path.exists(s + "/configure.ac"): + if not d.getVar("AUTOTOOLS_COPYACLOCAL"): + return + + taskdepdata = d.getVar("BB_TASKDEPDATA", False) + #bb.warn(str(taskdepdata)) + pn = d.getVar("PN", True) + aclocaldir = d.getVar("ACLOCALDIR", True) + oe.path.remove(aclocaldir) + bb.utils.mkdirhier(aclocaldir) + start = None + configuredeps = [] + + for dep in taskdepdata: + data = taskdepdata[dep] + if data[1] == "do_configure" and data[0] == pn: + start = dep + break + if start is None: + bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?") + + # We need to find configure tasks which are either from -> + # or -> but not -> unless they're direct + # dependencies. This mirrors what would get restored from sstate. + done = [dep] + next = [dep] + while next: + new = [] + for dep in next: + data = taskdepdata[dep] + for datadep in data[3]: + if datadep in done: + continue + done.append(datadep) + if (not data[0].endswith("-native")) and taskdepdata[datadep][0].endswith("-native") and dep != start: + continue + new.append(datadep) + if taskdepdata[datadep][1] == "do_configure": + configuredeps.append(taskdepdata[datadep][0]) + next = new + + #configuredeps2 = [] + #for dep in taskdepdata: + # data = taskdepdata[dep] + # if data[1] == "do_configure" and data[0] != pn: + # configuredeps2.append(data[0]) + #configuredeps.sort() + #configuredeps2.sort() + #bb.warn(str(configuredeps)) + #bb.warn(str(configuredeps2)) + + cp = [] + for c in configuredeps: + if c.endswith("-native"): + manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}-%s.populate_sysroot" % c) + elif c.startswith("nativesdk-"): + manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${SDK_ARCH}_${SDK_OS}-%s.populate_sysroot" % c) + elif "-cross-" in c or "-crosssdk" in c: + continue + else: + manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${MACHINE}-%s.populate_sysroot" % c) + try: + f = open(manifest, "r") + for l in f: + if "/aclocal/" in l and l.strip().endswith(".m4"): + cp.append(l.strip()) + except: + bb.warn("%s not found" % manifest) + + for c in cp: + t = os.path.join(aclocaldir, os.path.basename(c)) + if not os.path.exists(t): + os.symlink(c, t) +} +autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA" + +autotools_do_configure() { + # WARNING: gross hack follows: + # An autotools built package generally needs these scripts, however only + # automake or libtoolize actually install the current versions of them. + # This is a problem in builds that do not use libtool or automake, in the case + # where we -need- the latest version of these scripts. e.g. running a build + # for a package whose autotools are old, on an x86_64 machine, which the old + # config.sub does not support. Work around this by installing them manually + # regardless. + ( for ac in `find ${S} -name configure.in -o -name configure.ac`; do + rm -f `dirname $ac`/configure + done ) + if [ -e ${S}/configure.in -o -e ${S}/configure.ac ]; then + olddir=`pwd` + cd ${S} + ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/" + if [ x"${acpaths}" = xdefault ]; then + acpaths= + for i in `find ${S} -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \ + grep -v 'acinclude.m4' | grep -v 'aclocal-copy' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do + acpaths="$acpaths -I $i" + done + else + acpaths="${acpaths}" + fi + AUTOV=`automake --version | sed -e '1{s/.* //;s/\.[0-9]\+$//};q'` + automake --version + echo "AUTOV is $AUTOV" + if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then + ACLOCAL="$ACLOCAL --automake-acdir=${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV" + fi + # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look + # like it was auto-generated. Work around this by blowing it away + # by hand, unless the package specifically asked not to run aclocal. + if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then + rm -f aclocal.m4 + fi + if [ -e configure.in ]; then + CONFIGURE_AC=configure.in + else + CONFIGURE_AC=configure.ac + fi + if grep "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then + if grep "sed.*POTFILES" $CONFIGURE_AC >/dev/null; then + : do nothing -- we still have an old unmodified configure.ac + else + bbnote Executing glib-gettextize --force --copy + echo "no" | glib-gettextize --force --copy + fi + else if grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then + # We'd call gettextize here if it wasn't so broken... + cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/ + if [ -d ${S}/po/ ]; then + cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/ + if [ ! -e ${S}/po/remove-potcdate.sin ]; then + cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/ + fi + fi + for i in gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4; do + for j in `find ${S} -name $i | grep -v aclocal-copy`; do + rm $j + done + done + fi + fi + mkdir -p m4 + if grep "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then + bbnote Executing intltoolize --copy --force --automake + intltoolize --copy --force --automake + fi + bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths + ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || bbfatal "autoreconf execution failed." + cd $olddir + fi + if [ -e ${S}/configure ]; then + oe_runconf + else + bbnote "nothing to configure" + fi +} + +autotools_do_install() { + oe_runmake 'DESTDIR=${D}' install + # Info dir listing isn't interesting at this point so remove it if it exists. + if [ -e "${D}${infodir}/dir" ]; then + rm -f ${D}${infodir}/dir + fi +} + +inherit siteconfig + +EXPORT_FUNCTIONS do_configure do_install + +B = "${WORKDIR}/build" diff --git a/meta/classes/autotools_stage.bbclass b/meta/classes/autotools_stage.bbclass new file mode 100644 index 0000000000..b3c41e4b4d --- /dev/null +++ b/meta/classes/autotools_stage.bbclass @@ -0,0 +1,2 @@ +inherit autotools + diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass new file mode 100644 index 0000000000..ff8c63394f --- /dev/null +++ b/meta/classes/base.bbclass @@ -0,0 +1,566 @@ +BB_DEFAULT_TASK ?= "build" +CLASSOVERRIDE ?= "class-target" + +inherit patch +inherit staging + +inherit mirrors +inherit utils +inherit utility-tasks +inherit metadata_scm +inherit logging + +OE_IMPORTS += "os sys time oe.path oe.utils oe.data oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath" +OE_IMPORTS[type] = "list" + +def oe_import(d): + import sys + + bbpath = d.getVar("BBPATH", True).split(":") + sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath] + + def inject(name, value): + """Make a python object accessible from the metadata""" + if hasattr(bb.utils, "_context"): + bb.utils._context[name] = value + else: + __builtins__[name] = value + + import oe.data + for toimport in oe.data.typed_value("OE_IMPORTS", d): + imported = __import__(toimport) + inject(toimport.split(".", 1)[0], imported) + + return "" + +# We need the oe module name space early (before INHERITs get added) +OE_IMPORTED := "${@oe_import(d)}" + +def lsb_distro_identifier(d): + adjust = d.getVar('LSB_DISTRO_ADJUST', True) + adjust_func = None + if adjust: + try: + adjust_func = globals()[adjust] + except KeyError: + pass + return oe.lsb.distro_identifier(adjust_func) + +die() { + bbfatal "$*" +} + +oe_runmake_call() { + bbnote ${MAKE} ${EXTRA_OEMAKE} "$@" + ${MAKE} ${EXTRA_OEMAKE} "$@" +} + +oe_runmake() { + oe_runmake_call "$@" || die "oe_runmake failed" +} + + +def base_dep_prepend(d): + # + # Ideally this will check a flag so we will operate properly in + # the case where host == build == target, for now we don't work in + # that case though. + # + + deps = "" + # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not + # we need that built is the responsibility of the patch function / class, not + # the application. + if not d.getVar('INHIBIT_DEFAULT_DEPS'): + if (d.getVar('HOST_SYS', True) != d.getVar('BUILD_SYS', True)): + deps += " virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc " + return deps + +BASEDEPENDS = "${@base_dep_prepend(d)}" + +DEPENDS_prepend="${BASEDEPENDS} " + +FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}" +# THISDIR only works properly with imediate expansion as it has to run +# in the context of the location its used (:=) +THISDIR = "${@os.path.dirname(d.getVar('FILE', True))}" + +def extra_path_elements(d): + path = "" + elements = (d.getVar('EXTRANATIVEPATH', True) or "").split() + for e in elements: + path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":" + return path + +PATH_prepend = "${@extra_path_elements(d)}" + +addtask fetch +do_fetch[dirs] = "${DL_DIR}" +do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}" +do_fetch[vardeps] += "SRCREV" +python base_do_fetch() { + + src_uri = (d.getVar('SRC_URI', True) or "").split() + if len(src_uri) == 0: + return + + try: + fetcher = bb.fetch2.Fetch(src_uri, d) + fetcher.download() + except bb.fetch2.BBFetchException as e: + raise bb.build.FuncFailed(e) +} + +addtask unpack after do_fetch +do_unpack[dirs] = "${WORKDIR}" +do_unpack[cleandirs] = "${S}/patches" +python base_do_unpack() { + src_uri = (d.getVar('SRC_URI', True) or "").split() + if len(src_uri) == 0: + return + + rootdir = d.getVar('WORKDIR', True) + + try: + fetcher = bb.fetch2.Fetch(src_uri, d) + fetcher.unpack(rootdir) + except bb.fetch2.BBFetchException as e: + raise bb.build.FuncFailed(e) +} + +def pkgarch_mapping(d): + # Compatibility mappings of TUNE_PKGARCH (opt in) + if d.getVar("PKGARCHCOMPAT_ARMV7A", True): + if d.getVar("TUNE_PKGARCH", True) == "armv7a-vfp-neon": + d.setVar("TUNE_PKGARCH", "armv7a") + +def get_layers_branch_rev(d): + layers = (d.getVar("BBLAYERS", True) or "").split() + layers_branch_rev = ["%-17s = \"%s:%s\"" % (os.path.basename(i), \ + base_get_metadata_git_branch(i, None).strip(), \ + base_get_metadata_git_revision(i, None)) \ + for i in layers] + i = len(layers_branch_rev)-1 + p1 = layers_branch_rev[i].find("=") + s1 = layers_branch_rev[i][p1:] + while i > 0: + p2 = layers_branch_rev[i-1].find("=") + s2= layers_branch_rev[i-1][p2:] + if s1 == s2: + layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2] + i -= 1 + else: + i -= 1 + p1 = layers_branch_rev[i].find("=") + s1= layers_branch_rev[i][p1:] + return layers_branch_rev + + +BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars" +BUILDCFG_FUNCS[type] = "list" + +def buildcfg_vars(d): + statusvars = oe.data.typed_value('BUILDCFG_VARS', d) + for var in statusvars: + value = d.getVar(var, True) + if value is not None: + yield '%-17s = "%s"' % (var, value) + +def buildcfg_neededvars(d): + needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d) + pesteruser = [] + for v in needed_vars: + val = d.getVar(v, True) + if not val or val == 'INVALID': + pesteruser.append(v) + + if pesteruser: + bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser)) + +addhandler base_eventhandler +base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise" +python base_eventhandler() { + if isinstance(e, bb.event.ConfigParsed): + e.data.setVar("NATIVELSBSTRING", lsb_distro_identifier(e.data)) + e.data.setVar('BB_VERSION', bb.__version__) + pkgarch_mapping(e.data) + oe.utils.features_backfill("DISTRO_FEATURES", e.data) + oe.utils.features_backfill("MACHINE_FEATURES", e.data) + + if isinstance(e, bb.event.BuildStarted): + localdata = bb.data.createCopy(e.data) + bb.data.update_data(localdata) + statuslines = [] + for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata): + g = globals() + if func not in g: + bb.warn("Build configuration function '%s' does not exist" % func) + else: + flines = g[func](localdata) + if flines: + statuslines.extend(flines) + + statusheader = e.data.getVar('BUILDCFG_HEADER', True) + bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines))) + + # This code is to silence warnings where the SDK variables overwrite the + # target ones and we'd see dulpicate key names overwriting each other + # for various PREFERRED_PROVIDERS + if isinstance(e, bb.event.RecipePreFinalise): + if e.data.getVar("TARGET_PREFIX", True) == e.data.getVar("SDK_PREFIX", True): + e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils") + e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc-initial") + e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc") + e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++") + e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs") + +} + +addtask configure after do_patch +do_configure[dirs] = "${S} ${B}" +do_configure[deptask] = "do_populate_sysroot" +base_do_configure() { + : +} + +addtask compile after do_configure +do_compile[dirs] = "${S} ${B}" +base_do_compile() { + if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then + oe_runmake || die "make failed" + else + bbnote "nothing to compile" + fi +} + +addtask install after do_compile +do_install[dirs] = "${D} ${S} ${B}" +# Remove and re-create ${D} so that is it guaranteed to be empty +do_install[cleandirs] = "${D}" + +base_do_install() { + : +} + +base_do_package() { + : +} + +addtask build after do_populate_sysroot +do_build[noexec] = "1" +do_build[recrdeptask] += "do_deploy" +do_build () { + : +} + +def set_packagetriplet(d): + archs = [] + tos = [] + tvs = [] + + archs.append(d.getVar("PACKAGE_ARCHS", True).split()) + tos.append(d.getVar("TARGET_OS", True)) + tvs.append(d.getVar("TARGET_VENDOR", True)) + + def settriplet(d, varname, archs, tos, tvs): + triplets = [] + for i in range(len(archs)): + for arch in archs[i]: + triplets.append(arch + tvs[i] + "-" + tos[i]) + triplets.reverse() + d.setVar(varname, " ".join(triplets)) + + settriplet(d, "PKGTRIPLETS", archs, tos, tvs) + + variants = d.getVar("MULTILIB_VARIANTS", True) or "" + for item in variants.split(): + localdata = bb.data.createCopy(d) + overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item + localdata.setVar("OVERRIDES", overrides) + bb.data.update_data(localdata) + + archs.append(localdata.getVar("PACKAGE_ARCHS", True).split()) + tos.append(localdata.getVar("TARGET_OS", True)) + tvs.append(localdata.getVar("TARGET_VENDOR", True)) + + settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs) + +python () { + import string, re + + # Handle PACKAGECONFIG + # + # These take the form: + # + # PACKAGECONFIG ??= "" + # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends" + pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {} + if pkgconfigflags: + pkgconfig = (d.getVar('PACKAGECONFIG', True) or "").split() + pn = d.getVar("PN", True) + mlprefix = d.getVar("MLPREFIX", True) + + def expandFilter(appends, extension, prefix): + appends = bb.utils.explode_deps(d.expand(" ".join(appends))) + newappends = [] + for a in appends: + if a.endswith("-native") or ("-cross-" in a): + newappends.append(a) + elif a.startswith("virtual/"): + subs = a.split("/", 1)[1] + newappends.append("virtual/" + prefix + subs + extension) + else: + if a.startswith(prefix): + newappends.append(a + extension) + else: + newappends.append(prefix + a + extension) + return newappends + + def appendVar(varname, appends): + if not appends: + return + if varname.find("DEPENDS") != -1: + if pn.startswith("nativesdk-"): + appends = expandFilter(appends, "", "nativesdk-") + if pn.endswith("-native"): + appends = expandFilter(appends, "-native", "") + if mlprefix: + appends = expandFilter(appends, "", mlprefix) + varname = d.expand(varname) + d.appendVar(varname, " " + " ".join(appends)) + + extradeps = [] + extrardeps = [] + extraconf = [] + for flag, flagval in sorted(pkgconfigflags.items()): + if flag == "defaultval": + continue + items = flagval.split(",") + num = len(items) + if num > 4: + bb.error("Only enable,disable,depend,rdepend can be specified!") + + if flag in pkgconfig: + if num >= 3 and items[2]: + extradeps.append(items[2]) + if num >= 4 and items[3]: + extrardeps.append(items[3]) + if num >= 1 and items[0]: + extraconf.append(items[0]) + elif num >= 2 and items[1]: + extraconf.append(items[1]) + appendVar('DEPENDS', extradeps) + appendVar('RDEPENDS_${PN}', extrardeps) + if bb.data.inherits_class('cmake', d): + appendVar('EXTRA_OECMAKE', extraconf) + else: + appendVar('EXTRA_OECONF', extraconf) + + # If PRINC is set, try and increase the PR value by the amount specified + # The PR server is now the preferred way to handle PR changes based on + # the checksum of the recipe (including bbappend). The PRINC is now + # obsolete. Return a warning to the user. + princ = d.getVar('PRINC', True) + if princ and princ != "0": + bb.warn("Use of PRINC %s was detected in the recipe %s (or one of its .bbappends)\nUse of PRINC is deprecated. The PR server should be used to automatically increment the PR. See: https://wiki.yoctoproject.org/wiki/PR_Service." % (princ, d.getVar("FILE", True))) + pr = d.getVar('PR', True) + pr_prefix = re.search("\D+",pr) + prval = re.search("\d+",pr) + if pr_prefix is None or prval is None: + bb.error("Unable to analyse format of PR variable: %s" % pr) + nval = int(prval.group(0)) + int(princ) + pr = pr_prefix.group(0) + str(nval) + pr[prval.end():] + d.setVar('PR', pr) + + pn = d.getVar('PN', True) + license = d.getVar('LICENSE', True) + if license == "INVALID": + bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn) + + if bb.data.inherits_class('license', d): + unmatched_license_flag = check_license_flags(d) + if unmatched_license_flag: + bb.debug(1, "Skipping %s because it has a restricted license not" + " whitelisted in LICENSE_FLAGS_WHITELIST" % pn) + raise bb.parse.SkipPackage("because it has a restricted license not" + " whitelisted in LICENSE_FLAGS_WHITELIST") + + # If we're building a target package we need to use fakeroot (pseudo) + # in order to capture permissions, owners, groups and special files + if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d): + d.setVarFlag('do_unpack', 'umask', '022') + d.setVarFlag('do_configure', 'umask', '022') + d.setVarFlag('do_compile', 'umask', '022') + d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') + d.setVarFlag('do_install', 'fakeroot', 1) + d.setVarFlag('do_install', 'umask', '022') + d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') + d.setVarFlag('do_package', 'fakeroot', 1) + d.setVarFlag('do_package', 'umask', '022') + d.setVarFlag('do_package_setscene', 'fakeroot', 1) + d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') + d.setVarFlag('do_devshell', 'fakeroot', 1) + d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') + source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', 0) + if not source_mirror_fetch: + need_host = d.getVar('COMPATIBLE_HOST', True) + if need_host: + import re + this_host = d.getVar('HOST_SYS', True) + if not re.match(need_host, this_host): + raise bb.parse.SkipPackage("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host) + + need_machine = d.getVar('COMPATIBLE_MACHINE', True) + if need_machine: + import re + compat_machines = (d.getVar('MACHINEOVERRIDES', True) or "").split(":") + for m in compat_machines: + if re.match(need_machine, m): + break + else: + raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE', True)) + + + bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split() + + check_license = False if pn.startswith("nativesdk-") else True + for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}", + "-crosssdk-${SDK_ARCH}", "-crosssdk-initial-${SDK_ARCH}", + "-cross-canadian-${TRANSLATED_TARGET_ARCH}"]: + if pn.endswith(d.expand(t)): + check_license = False + + if check_license and bad_licenses: + bad_licenses = map(lambda l: canonical_license(d, l), bad_licenses) + + whitelist = [] + for lic in bad_licenses: + for w in ["HOSTTOOLS_WHITELIST_", "LGPLv2_WHITELIST_", "WHITELIST_"]: + whitelist.extend((d.getVar(w + lic, True) or "").split()) + spdx_license = return_spdx(d, lic) + if spdx_license: + whitelist.extend((d.getVar('HOSTTOOLS_WHITELIST_%s' % spdx_license, True) or "").split()) + if not pn in whitelist: + recipe_license = d.getVar('LICENSE', True) + pkgs = d.getVar('PACKAGES', True).split() + skipped_pkgs = [] + unskipped_pkgs = [] + for pkg in pkgs: + if incompatible_license(d, bad_licenses, pkg): + skipped_pkgs.append(pkg) + else: + unskipped_pkgs.append(pkg) + all_skipped = skipped_pkgs and not unskipped_pkgs + if unskipped_pkgs: + for pkg in skipped_pkgs: + bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + recipe_license) + d.setVar('LICENSE_EXCLUSION-' + pkg, 1) + for pkg in unskipped_pkgs: + bb.debug(1, "INCLUDING the package " + pkg) + elif all_skipped or incompatible_license(d, bad_licenses): + bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, recipe_license)) + raise bb.parse.SkipPackage("incompatible with license %s" % recipe_license) + + srcuri = d.getVar('SRC_URI', True) + # Svn packages should DEPEND on subversion-native + if "svn://" in srcuri: + d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot') + + # Git packages should DEPEND on git-native + if "git://" in srcuri: + d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot') + + # Mercurial packages should DEPEND on mercurial-native + elif "hg://" in srcuri: + d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot') + + # OSC packages should DEPEND on osc-native + elif "osc://" in srcuri: + d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot') + + # *.lz4 should depends on lz4-native for unpacking + # Not endswith because of "*.patch.lz4;patch=1". Need bb.fetch.decodeurl in future + if '.lz4' in srcuri: + d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot') + + # *.xz should depends on xz-native for unpacking + # Not endswith because of "*.patch.xz;patch=1". Need bb.fetch.decodeurl in future + if '.xz' in srcuri: + d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot') + + # unzip-native should already be staged before unpacking ZIP recipes + if ".zip" in srcuri: + d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot') + + # file is needed by rpm2cpio.sh + if ".src.rpm" in srcuri: + d.appendVarFlag('do_unpack', 'depends', ' file-native:do_populate_sysroot') + + set_packagetriplet(d) + + # 'multimachine' handling + mach_arch = d.getVar('MACHINE_ARCH', True) + pkg_arch = d.getVar('PACKAGE_ARCH', True) + + if (pkg_arch == mach_arch): + # Already machine specific - nothing further to do + return + + # + # We always try to scan SRC_URI for urls with machine overrides + # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0 + # + override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', True) + if override != '0': + paths = [] + fpaths = (d.getVar('FILESPATH', True) or '').split(':') + machine = d.getVar('MACHINE', True) + for p in fpaths: + if os.path.basename(p) == machine and os.path.isdir(p): + paths.append(p) + + if len(paths) != 0: + for s in srcuri.split(): + if not s.startswith("file://"): + continue + fetcher = bb.fetch2.Fetch([s], d) + local = fetcher.localpath(s) + for mp in paths: + if local.startswith(mp): + #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn)) + d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}") + return + + packages = d.getVar('PACKAGES', True).split() + for pkg in packages: + pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg, True) + + # We could look for != PACKAGE_ARCH here but how to choose + # if multiple differences are present? + # Look through PACKAGE_ARCHS for the priority order? + if pkgarch and pkgarch == mach_arch: + d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}") + bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN", True)) +} + +addtask cleansstate after do_clean +python do_cleansstate() { + sstate_clean_cachefiles(d) +} + +addtask cleanall after do_cleansstate +python do_cleanall() { + src_uri = (d.getVar('SRC_URI', True) or "").split() + if len(src_uri) == 0: + return + + try: + fetcher = bb.fetch2.Fetch(src_uri, d) + fetcher.clean() + except bb.fetch2.BBFetchException, e: + raise bb.build.FuncFailed(e) +} +do_cleanall[nostamp] = "1" + + +EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass new file mode 100644 index 0000000000..a52b75be5c --- /dev/null +++ b/meta/classes/bin_package.bbclass @@ -0,0 +1,36 @@ +# +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Common variable and task for the binary package recipe. +# Basic principle: +# * The files have been unpacked to ${S} by base.bbclass +# * Skip do_configure and do_compile +# * Use do_install to install the files to ${D} +# +# Note: +# The "subdir" parameter in the SRC_URI is useful when the input package +# is rpm, ipk, deb and so on, for example: +# +# SRC_URI = "http://foo.com/foo-1.0-r1.i586.rpm;subdir=foo-1.0" +# +# Then the files would be unpacked to ${WORKDIR}/foo-1.0, otherwise +# they would be in ${WORKDIR}. +# + +# Skip the unwanted steps +do_configure[noexec] = "1" +do_compile[noexec] = "1" + +# Install the files to ${D} +bin_package_do_install () { + # Do it carefully + [ -d "${S}" ] || exit 1 + cd ${S} || exit 1 + tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \ + | tar --no-same-owner -xpf - -C ${D} +} + +FILES_${PN} = "/" + +EXPORT_FUNCTIONS do_install diff --git a/meta/classes/binconfig-disabled.bbclass b/meta/classes/binconfig-disabled.bbclass new file mode 100644 index 0000000000..27f904eb42 --- /dev/null +++ b/meta/classes/binconfig-disabled.bbclass @@ -0,0 +1,15 @@ +# +# Class to disable binconfig files instead of installing them +# + +# The list of scripts which should be disabled. +BINCONFIG ?= "" + +FILES_${PN}-dev += "${bindir}/*-config" + +do_install_append () { + for x in ${BINCONFIG}; do + echo "#!/bin/sh" > ${D}$x + echo "exit 1" >> ${D}$x + done +} diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass new file mode 100644 index 0000000000..cbc4173601 --- /dev/null +++ b/meta/classes/binconfig.bbclass @@ -0,0 +1,63 @@ +FILES_${PN}-dev += "${bindir}/*-config" + +# The namespaces can clash here hence the two step replace +def get_binconfig_mangle(d): + s = "-e ''" + if not bb.data.inherits_class('native', d): + optional_quote = r"\(\"\?\)" + s += " -e 's:=%s${base_libdir}:=\\1OEBASELIBDIR:;'" % optional_quote + s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote + s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote + s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote + s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote + s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote + s += " -e 's:-L${libdir}:-LOELIBDIR:;'" + s += " -e 's:-I${includedir}:-IOEINCDIR:;'" + s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'" + s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'" + s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'" + s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'" + s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'" + s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'" + s += " -e 's:-I${WORKDIR}:-I${STAGING_INCDIR}:'" + s += " -e 's:-L${WORKDIR}:-L${STAGING_LIBDIR}:'" + if bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d): + s += bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d) + + return s + +BINCONFIG_GLOB ?= "*-config" + +PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess" + +binconfig_package_preprocess () { + for config in `find ${PKGD} -name '${BINCONFIG_GLOB}'`; do + sed -i \ + -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \ + -e 's:${STAGING_LIBDIR}:${libdir}:g;' \ + -e 's:${STAGING_INCDIR}:${includedir}:g;' \ + -e 's:${STAGING_DATADIR}:${datadir}:' \ + -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \ + $config + done + for lafile in `find ${PKGD} -name "*.la"` ; do + sed -i \ + -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \ + -e 's:${STAGING_LIBDIR}:${libdir}:g;' \ + -e 's:${STAGING_INCDIR}:${includedir}:g;' \ + -e 's:${STAGING_DATADIR}:${datadir}:' \ + -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \ + $lafile + done +} + +SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess" + +binconfig_sysroot_preprocess () { + for config in `find ${S} -name '${BINCONFIG_GLOB}'` `find ${B} -name '${BINCONFIG_GLOB}'`; do + configname=`basename $config` + install -d ${SYSROOT_DESTDIR}${bindir_crossscripts} + sed ${@get_binconfig_mangle(d)} $config > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname + chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname + done +} diff --git a/meta/classes/blacklist.bbclass b/meta/classes/blacklist.bbclass new file mode 100644 index 0000000000..a0141a82c0 --- /dev/null +++ b/meta/classes/blacklist.bbclass @@ -0,0 +1,45 @@ +# anonymous support class from originally from angstrom +# +# To use the blacklist, a distribution should include this +# class in the INHERIT_DISTRO +# +# No longer use ANGSTROM_BLACKLIST, instead use a table of +# recipes in PNBLACKLIST +# +# Features: +# +# * To add a package to the blacklist, set: +# PNBLACKLIST[pn] = "message" +# + +# Cope with PNBLACKLIST flags for multilib case +addhandler blacklist_multilib_eventhandler +blacklist_multilib_eventhandler[eventmask] = "bb.event.ConfigParsed" +python blacklist_multilib_eventhandler() { + multilibs = e.data.getVar('MULTILIBS', True) + if not multilibs: + return + + # this block has been copied from base.bbclass so keep it in sync + prefixes = [] + for ext in multilibs.split(): + eext = ext.split(':') + if len(eext) > 1 and eext[0] == 'multilib': + prefixes.append(eext[1]) + + blacklists = e.data.getVarFlags('PNBLACKLIST') or {} + for pkg, reason in blacklists.items(): + if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in pkg: + continue + for p in prefixes: + newpkg = p + "-" + pkg + if not e.data.getVarFlag('PNBLACKLIST', newpkg, True): + e.data.setVarFlag('PNBLACKLIST', newpkg, reason) +} + +python () { + blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN', True), True) + + if blacklist: + raise bb.parse.SkipPackage("Recipe is blacklisted: %s" % (blacklist)) +} diff --git a/meta/classes/boot-directdisk.bbclass b/meta/classes/boot-directdisk.bbclass new file mode 100644 index 0000000000..09da032049 --- /dev/null +++ b/meta/classes/boot-directdisk.bbclass @@ -0,0 +1,191 @@ +# boot-directdisk.bbclass +# (loosly based off bootimg.bbclass Copyright (C) 2004, Advanced Micro Devices, Inc.) +# +# Create an image which can be placed directly onto a harddisk using dd and then +# booted. +# +# This uses syslinux. extlinux would have been nice but required the ext2/3 +# partition to be mounted. grub requires to run itself as part of the install +# process. +# +# The end result is a 512 boot sector populated with an MBR and partition table +# followed by an msdos fat16 partition containing syslinux and a linux kernel +# completed by the ext2/3 rootfs. +# +# We have to push the msdos parition table size > 16MB so fat 16 is used as parted +# won't touch fat12 partitions. + +# External variables needed + +# ${ROOTFS} - the rootfs image to incorporate + +do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \ + syslinux:do_populate_sysroot \ + syslinux-native:do_populate_sysroot \ + parted-native:do_populate_sysroot \ + mtools-native:do_populate_sysroot " + +PACKAGES = " " +EXCLUDE_FROM_WORLD = "1" + +BOOTDD_VOLUME_ID ?= "boot" +BOOTDD_EXTRA_SPACE ?= "16384" + +EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}" +EFI_PROVIDER ?= "grub-efi" +EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}" + +# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not +# contain "efi". This way legacy is supported by default if neither is +# specified, maintaining the original behavior. +def pcbios(d): + pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d) + if pcbios == "0": + pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d) + return pcbios + +def pcbios_class(d): + if d.getVar("PCBIOS", True) == "1": + return "syslinux" + return "" + +PCBIOS = "${@pcbios(d)}" +PCBIOS_CLASS = "${@pcbios_class(d)}" + +inherit ${PCBIOS_CLASS} +inherit ${EFI_CLASS} + +# Get the build_syslinux_cfg() function from the syslinux class + +AUTO_SYSLINUXCFG = "1" +DISK_SIGNATURE ?= "${DISK_SIGNATURE_GENERATED}" +SYSLINUX_ROOT ?= "root=/dev/sda2" +SYSLINUX_TIMEOUT ?= "10" + +IS_VMDK = '${@bb.utils.contains("IMAGE_FSTYPES", "vmdk", "true", "false", d)}' + +boot_direct_populate() { + dest=$1 + install -d $dest + + # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use. + install -m 0644 ${STAGING_KERNEL_DIR}/bzImage $dest/vmlinuz + + # initrd is made of concatenation of multiple filesystem images + if [ -n "${INITRD}" ]; then + rm -f $dest/initrd + for fs in ${INITRD} + do + if [ -s "${fs}" ]; then + cat ${fs} >> $dest/initrd + else + bbfatal "${fs} is invalid. initrd image creation failed." + fi + done + chmod 0644 $dest/initrd + fi +} + +build_boot_dd() { + HDDDIR="${S}/hdd/boot" + HDDIMG="${S}/hdd.image" + IMAGE=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect + + boot_direct_populate $HDDDIR + + if [ "${PCBIOS}" = "1" ]; then + syslinux_hddimg_populate $HDDDIR + fi + if [ "${EFI}" = "1" ]; then + efi_hddimg_populate $HDDDIR + fi + + if [ "${IS_VMDK}" = "true" ]; then + if [ "x${AUTO_SYSLINUXMENU}" = "x1" ] ; then + install -m 0644 ${STAGING_DIR}/${MACHINE}/usr/share/syslinux/vesamenu.c32 $HDDDIR/${SYSLINUXDIR}/ + if [ "x${SYSLINUX_SPLASH}" != "x" ] ; then + install -m 0644 ${SYSLINUX_SPLASH} $HDDDIR/${SYSLINUXDIR}/splash.lss + fi + fi + fi + + BLOCKS=`du -bks $HDDDIR | cut -f 1` + BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}` + + # Ensure total sectors is an integral number of sectors per + # track or mcopy will complain. Sectors are 512 bytes, and we + # generate images with 32 sectors per track. This calculation is + # done in blocks, thus the mod by 16 instead of 32. + BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16))) + + mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C $HDDIMG $BLOCKS + mcopy -i $HDDIMG -s $HDDDIR/* ::/ + + if [ "${PCBIOS}" = "1" ]; then + syslinux_hdddirect_install $HDDIMG + fi + chmod 644 $HDDIMG + + ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1` + TOTALSIZE=`expr $BLOCKS + $ROOTFSBLOCKS` + END1=`expr $BLOCKS \* 1024` + END2=`expr $END1 + 512` + END3=`expr \( $ROOTFSBLOCKS \* 1024 \) + $END1` + + echo $ROOTFSBLOCKS $TOTALSIZE $END1 $END2 $END3 + rm -rf $IMAGE + dd if=/dev/zero of=$IMAGE bs=1024 seek=$TOTALSIZE count=1 + + parted $IMAGE mklabel msdos + parted $IMAGE mkpart primary fat16 0 ${END1}B + parted $IMAGE unit B mkpart primary ext2 ${END2}B ${END3}B + parted $IMAGE set 1 boot on + + parted $IMAGE print + + awk "BEGIN { printf \"$(echo ${DISK_SIGNATURE} | fold -w 2 | tac | paste -sd '' | sed 's/\(..\)/\\x&/g')\" }" | \ + dd of=$IMAGE bs=1 seek=440 conv=notrunc + + OFFSET=`expr $END2 / 512` + if [ "${PCBIOS}" = "1" ]; then + dd if=${STAGING_DATADIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc + fi + + dd if=$HDDIMG of=$IMAGE conv=notrunc seek=1 bs=512 + dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512 + + cd ${DEPLOY_DIR_IMAGE} + rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect + ln -s ${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect +} + +python do_bootdirectdisk() { + validate_disk_signature(d) + if d.getVar("PCBIOS", True) == "1": + bb.build.exec_func('build_syslinux_cfg', d) + if d.getVar("EFI", True) == "1": + bb.build.exec_func('build_efi_cfg', d) + bb.build.exec_func('build_boot_dd', d) +} + +def generate_disk_signature(): + import uuid + + signature = str(uuid.uuid4())[:8] + + if signature != '00000000': + return signature + else: + return 'ffffffff' + +def validate_disk_signature(d): + import re + + disk_signature = d.getVar("DISK_SIGNATURE", True) + + if not re.match(r'^[0-9a-fA-F]{8}$', disk_signature): + bb.fatal("DISK_SIGNATURE '%s' must be an 8 digit hex string" % disk_signature) + +DISK_SIGNATURE_GENERATED := "${@generate_disk_signature()}" + +addtask bootdirectdisk before do_build diff --git a/meta/classes/bootimg.bbclass b/meta/classes/bootimg.bbclass new file mode 100644 index 0000000000..859d517dbd --- /dev/null +++ b/meta/classes/bootimg.bbclass @@ -0,0 +1,267 @@ +# Copyright (C) 2004, Advanced Micro Devices, Inc. All Rights Reserved +# Released under the MIT license (see packages/COPYING) + +# Creates a bootable image using syslinux, your kernel and an optional +# initrd + +# +# End result is two things: +# +# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel, +# an initrd and a rootfs image. These can be written to harddisks directly and +# also booted on USB flash disks (write them there with dd). +# +# 2. A CD .iso image + +# Boot process is that the initrd will boot and process which label was selected +# in syslinux. Actions based on the label are then performed (e.g. installing to +# an hdd) + +# External variables (also used by syslinux.bbclass) +# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional) +# ${COMPRESSISO} - Transparent compress ISO, reduce size ~40% if set to 1 +# ${NOISO} - skip building the ISO image if set to 1 +# ${NOHDD} - skip building the HDD image if set to 1 +# ${HDDIMG_ID} - FAT image volume-id +# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional) + +do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \ + mtools-native:do_populate_sysroot \ + cdrtools-native:do_populate_sysroot \ + ${@oe.utils.ifelse(d.getVar('COMPRESSISO'),'zisofs-tools-native:do_populate_sysroot','')}" + +PACKAGES = " " +EXCLUDE_FROM_WORLD = "1" + +HDDDIR = "${S}/hddimg" +ISODIR = "${S}/iso" +EFIIMGDIR = "${S}/efi_img" +COMPACT_ISODIR = "${S}/iso.z" +COMPRESSISO ?= "0" + +BOOTIMG_VOLUME_ID ?= "boot" +BOOTIMG_EXTRA_SPACE ?= "512" + +EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}" +EFI_PROVIDER ?= "grub-efi" +EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}" + +# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not +# contain "efi". This way legacy is supported by default if neither is +# specified, maintaining the original behavior. +def pcbios(d): + pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d) + if pcbios == "0": + pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d) + return pcbios + +PCBIOS = "${@pcbios(d)}" + +# The syslinux is required for the isohybrid command and boot catalog +inherit syslinux +inherit ${EFI_CLASS} + +populate() { + DEST=$1 + install -d ${DEST} + + # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use. + install -m 0644 ${STAGING_KERNEL_DIR}/bzImage ${DEST}/vmlinuz + + # initrd is made of concatenation of multiple filesystem images + if [ -n "${INITRD}" ]; then + rm -f ${DEST}/initrd + for fs in ${INITRD} + do + if [ -s "${fs}" ]; then + cat ${fs} >> ${DEST}/initrd + else + bbfatal "${fs} is invalid. initrd image creation failed." + fi + done + chmod 0644 ${DEST}/initrd + fi + + if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then + install -m 0644 ${ROOTFS} ${DEST}/rootfs.img + fi + +} + +build_iso() { + # Only create an ISO if we have an INITRD and NOISO was not set + if [ -z "${INITRD}" ] || [ "${NOISO}" = "1" ]; then + bbnote "ISO image will not be created." + return + fi + # ${INITRD} is a list of multiple filesystem images + for fs in ${INITRD} + do + if [ ! -s "${fs}" ]; then + bbnote "ISO image will not be created. ${fs} is invalid." + return + fi + done + + + populate ${ISODIR} + + if [ "${PCBIOS}" = "1" ]; then + syslinux_iso_populate ${ISODIR} + fi + if [ "${EFI}" = "1" ]; then + efi_iso_populate ${ISODIR} + build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img + fi + + # EFI only + if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then + # Work around bug in isohybrid where it requires isolinux.bin + # In the boot catalog, even though it is not used + mkdir -p ${ISODIR}/${ISOLINUXDIR} + install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR} + fi + + if [ "${COMPRESSISO}" = "1" ] ; then + # create compact directory, compress iso + mkdir -p ${COMPACT_ISODIR} + mkzftree -z 9 -p 4 -F ${ISODIR}/rootfs.img ${COMPACT_ISODIR}/rootfs.img + + # move compact iso to iso, then remove compact directory + mv ${COMPACT_ISODIR}/rootfs.img ${ISODIR}/rootfs.img + rm -Rf ${COMPACT_ISODIR} + mkisofs_compress_opts="-R -z -D -l" + else + mkisofs_compress_opts="-r" + fi + + if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then + # PCBIOS only media + mkisofs -V ${BOOTIMG_VOLUME_ID} \ + -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \ + -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \ + $mkisofs_compress_opts \ + ${MKISOFS_OPTIONS} ${ISODIR} + else + # EFI only OR EFI+PCBIOS + mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \ + -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \ + -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \ + $mkisofs_compress_opts ${MKISOFS_OPTIONS} \ + -eltorito-alt-boot -eltorito-platform efi \ + -b efi.img -no-emul-boot \ + ${ISODIR} + isohybrid_args="-u" + fi + + isohybrid $isohybrid_args ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso + + cd ${DEPLOY_DIR_IMAGE} + rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso + ln -s ${IMAGE_NAME}.iso ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso +} + +build_fat_img() { + FATSOURCEDIR=$1 + FATIMG=$2 + + # Calculate the size required for the final image including the + # data and filesystem overhead. + # Sectors: 512 bytes + # Blocks: 1024 bytes + + # Determine the sector count just for the data + SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2) + + # Account for the filesystem overhead. This includes directory + # entries in the clusters as well as the FAT itself. + # Assumptions: + # FAT32 (12 or 16 may be selected by mkdosfs, but the extra + # padding will be minimal on those smaller images and not + # worth the logic here to caclulate the smaller FAT sizes) + # < 16 entries per directory + # 8.3 filenames only + + # 32 bytes per dir entry + DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32) + # 32 bytes for every end-of-directory dir entry + DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32)) + # 4 bytes per FAT entry per sector of data + FAT_BYTES=$(expr $SECTORS \* 4) + # 4 bytes per FAT entry per end-of-cluster list + FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4)) + + # Use a ceiling function to determine FS overhead in sectors + DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512) + # There are two FATs on the image + FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2) + SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS)) + + # Determine the final size in blocks accounting for some padding + BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE}) + + # Ensure total sectors is an integral number of sectors per + # track or mcopy will complain. Sectors are 512 bytes, and we + # generate images with 32 sectors per track. This calculation is + # done in blocks, thus the mod by 16 instead of 32. + BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16))) + + # mkdosfs will sometimes use FAT16 when it is not appropriate, + # resulting in a boot failure from SYSLINUX. Use FAT32 for + # images larger than 512MB, otherwise let mkdosfs decide. + if [ $(expr $BLOCKS / 1024) -gt 512 ]; then + FATSIZE="-F 32" + fi + + if [ -z "${HDDIMG_ID}" ]; then + mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \ + ${BLOCKS} + else + mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \ + ${BLOCKS} -i ${HDDIMG_ID} + fi + + # Copy FATSOURCEDIR recursively into the image file directly + mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/ +} + +build_hddimg() { + # Create an HDD image + if [ "${NOHDD}" != "1" ] ; then + populate ${HDDDIR} + + if [ "${PCBIOS}" = "1" ]; then + syslinux_hddimg_populate ${HDDDIR} + fi + if [ "${EFI}" = "1" ]; then + efi_hddimg_populate ${HDDDIR} + fi + + build_fat_img ${HDDDIR} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg + + if [ "${PCBIOS}" = "1" ]; then + syslinux_hddimg_install + fi + + chmod 644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg + + cd ${DEPLOY_DIR_IMAGE} + rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg + ln -s ${IMAGE_NAME}.hddimg ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg + fi +} + +python do_bootimg() { + if d.getVar("PCBIOS", True) == "1": + bb.build.exec_func('build_syslinux_cfg', d) + if d.getVar("EFI", True) == "1": + bb.build.exec_func('build_efi_cfg', d) + bb.build.exec_func('build_hddimg', d) + bb.build.exec_func('build_iso', d) +} + +IMAGE_TYPEDEP_iso = "ext3" +IMAGE_TYPEDEP_hddimg = "ext3" +IMAGE_TYPES_MASKED += "iso hddimg" + +addtask bootimg before do_build diff --git a/meta/classes/bugzilla.bbclass b/meta/classes/bugzilla.bbclass new file mode 100644 index 0000000000..3fc8956428 --- /dev/null +++ b/meta/classes/bugzilla.bbclass @@ -0,0 +1,187 @@ +# +# Small event handler to automatically open URLs and file +# bug reports at a bugzilla of your choiche +# it uses XML-RPC interface, so you must have it enabled +# +# Before using you must define BUGZILLA_USER, BUGZILLA_PASS credentials, +# BUGZILLA_XMLRPC - uri of xmlrpc.cgi, +# BUGZILLA_PRODUCT, BUGZILLA_COMPONENT - a place in BTS for build bugs +# BUGZILLA_VERSION - version against which to report new bugs +# + +def bugzilla_find_bug_report(debug_file, server, args, bugname): + args['summary'] = bugname + bugs = server.Bug.search(args) + if len(bugs['bugs']) == 0: + print >> debug_file, "Bugs not found" + return (False,None) + else: # silently pick the first result + print >> debug_file, "Result of bug search is " + print >> debug_file, bugs + status = bugs['bugs'][0]['status'] + id = bugs['bugs'][0]['id'] + return (not status in ["CLOSED", "RESOLVED", "VERIFIED"],id) + +def bugzilla_file_bug(debug_file, server, args, name, text, version): + args['summary'] = name + args['comment'] = text + args['version'] = version + args['op_sys'] = 'Linux' + args['platform'] = 'Other' + args['severity'] = 'normal' + args['priority'] = 'Normal' + try: + return server.Bug.create(args)['id'] + except Exception, e: + print >> debug_file, repr(e) + return None + +def bugzilla_reopen_bug(debug_file, server, args, bug_number): + args['ids'] = [bug_number] + args['status'] = "CONFIRMED" + try: + server.Bug.update(args) + return True + except Exception, e: + print >> debug_file, repr(e) + return False + +def bugzilla_create_attachment(debug_file, server, args, bug_number, text, file_name, log, logdescription): + args['ids'] = [bug_number] + args['file_name'] = file_name + args['summary'] = logdescription + args['content_type'] = "text/plain" + args['data'] = log + args['comment'] = text + try: + server.Bug.add_attachment(args) + return True + except Exception, e: + print >> debug_file, repr(e) + return False + +def bugzilla_add_comment(debug_file, server, args, bug_number, text): + args['id'] = bug_number + args['comment'] = text + try: + server.Bug.add_comment(args) + return True + except Exception, e: + print >> debug_file, repr(e) + return False + +addhandler bugzilla_eventhandler +bugzilla_eventhandler[eventmask] = "bb.event.MsgNote bb.build.TaskFailed" +python bugzilla_eventhandler() { + import glob + import xmlrpclib, httplib + + class ProxiedTransport(xmlrpclib.Transport): + def __init__(self, proxy, use_datetime = 0): + xmlrpclib.Transport.__init__(self, use_datetime) + self.proxy = proxy + self.user = None + self.password = None + + def set_user(self, user): + self.user = user + + def set_password(self, password): + self.password = password + + def make_connection(self, host): + self.realhost = host + return httplib.HTTP(self.proxy) + + def send_request(self, connection, handler, request_body): + connection.putrequest("POST", 'http://%s%s' % (self.realhost, handler)) + if self.user != None: + if self.password != None: + auth = "%s:%s" % (self.user, self.password) + else: + auth = self.user + connection.putheader("Proxy-authorization", "Basic " + base64.encodestring(auth)) + + event = e + data = e.data + name = bb.event.getName(event) + if name == "MsgNote": + # avoid recursion + return + + if name == "TaskFailed": + xmlrpc = data.getVar("BUGZILLA_XMLRPC", True) + user = data.getVar("BUGZILLA_USER", True) + passw = data.getVar("BUGZILLA_PASS", True) + product = data.getVar("BUGZILLA_PRODUCT", True) + compon = data.getVar("BUGZILLA_COMPONENT", True) + version = data.getVar("BUGZILLA_VERSION", True) + + proxy = data.getVar('http_proxy', True ) + if (proxy): + import urllib2 + s, u, p, hostport = urllib2._parse_proxy(proxy) + transport = ProxiedTransport(hostport) + else: + transport = None + + server = xmlrpclib.ServerProxy(xmlrpc, transport=transport, verbose=0) + args = { + 'Bugzilla_login': user, + 'Bugzilla_password': passw, + 'product': product, + 'component': compon} + + # evil hack to figure out what is going on + debug_file = open(os.path.join(data.getVar("TMPDIR", True),"..","bugzilla-log"),"a") + + file = None + bugname = "%(package)s-%(pv)s-autobuild" % { "package" : data.getVar("PN", True), + "pv" : data.getVar("PV", True), + } + log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task)) + text = "The %s step in %s failed at %s for machine %s" % (e.task, data.getVar("PN", True), data.getVar('DATETIME', True), data.getVar( 'MACHINE', True ) ) + if len(log_file) != 0: + print >> debug_file, "Adding log file %s" % log_file[0] + file = open(log_file[0], 'r') + log = file.read() + file.close(); + else: + print >> debug_file, "No log file found for the glob" + log = None + + (bug_open, bug_number) = bugzilla_find_bug_report(debug_file, server, args.copy(), bugname) + print >> debug_file, "Bug is open: %s and bug number: %s" % (bug_open, bug_number) + + # The bug is present and still open, attach an error log + if not bug_number: + bug_number = bugzilla_file_bug(debug_file, server, args.copy(), bugname, text, version) + if not bug_number: + print >> debug_file, "Couldn't acquire a new bug_numer, filing a bugreport failed" + else: + print >> debug_file, "The new bug_number: '%s'" % bug_number + elif not bug_open: + if not bugzilla_reopen_bug(debug_file, server, args.copy(), bug_number): + print >> debug_file, "Failed to reopen the bug #%s" % bug_number + else: + print >> debug_file, "Reopened the bug #%s" % bug_number + + if bug_number and log: + print >> debug_file, "The bug is known as '%s'" % bug_number + desc = "Build log for machine %s" % (data.getVar('MACHINE', True)) + if not bugzilla_create_attachment(debug_file, server, args.copy(), bug_number, text, log_file[0], log, desc): + print >> debug_file, "Failed to attach the build log for bug #%s" % bug_number + else: + print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number) + else: + print >> debug_file, "Not trying to create an attachment for bug #%s" % bug_number + if not bugzilla_add_comment(debug_file, server, args.copy(), bug_number, text, ): + print >> debug_file, "Failed to create a comment the build log for bug #%s" % bug_number + else: + print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number) + + # store bug number for oestats-client + if bug_number: + data.setVar('OESTATS_BUG_NUMBER', bug_number) +} + diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass new file mode 100644 index 0000000000..8b5d5c214c --- /dev/null +++ b/meta/classes/buildhistory.bbclass @@ -0,0 +1,696 @@ +# +# Records history of build output in order to detect regressions +# +# Based in part on testlab.bbclass and packagehistory.bbclass +# +# Copyright (C) 2011-2014 Intel Corporation +# Copyright (C) 2007-2011 Koen Kooi +# + +BUILDHISTORY_FEATURES ?= "image package sdk" +BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory" +BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}" +BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}/${PN}" +BUILDHISTORY_DIR_SDK = "${BUILDHISTORY_DIR}/sdk/${SDK_NAME}/${IMAGE_BASENAME}" +BUILDHISTORY_IMAGE_FILES ?= "/etc/passwd /etc/group" +BUILDHISTORY_COMMIT ?= "0" +BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory " +BUILDHISTORY_PUSH_REPO ?= "" + +SSTATEPOSTINSTFUNCS_append = " buildhistory_emit_pkghistory" +# We want to avoid influence the signatures of sstate tasks - first the function itself: +sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory" +# then the value added to SSTATEPOSTINSTFUNCS: +SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory" + +# +# Write out metadata about this package for comparision when writing future packages +# +python buildhistory_emit_pkghistory() { + if not d.getVar('BB_CURRENTTASK', True) in ['packagedata', 'packagedata_setscene']: + return 0 + + if not "package" in (d.getVar('BUILDHISTORY_FEATURES', True) or "").split(): + return 0 + + import re + import json + import errno + + pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True) + + class RecipeInfo: + def __init__(self, name): + self.name = name + self.pe = "0" + self.pv = "0" + self.pr = "r0" + self.depends = "" + self.packages = "" + self.srcrev = "" + + + class PackageInfo: + def __init__(self, name): + self.name = name + self.pe = "0" + self.pv = "0" + self.pr = "r0" + # pkg/pkge/pkgv/pkgr should be empty because we want to be able to default them + self.pkg = "" + self.pkge = "" + self.pkgv = "" + self.pkgr = "" + self.size = 0 + self.depends = "" + self.rprovides = "" + self.rdepends = "" + self.rrecommends = "" + self.rsuggests = "" + self.rreplaces = "" + self.rconflicts = "" + self.files = "" + self.filelist = "" + # Variables that need to be written to their own separate file + self.filevars = dict.fromkeys(['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']) + + # Should check PACKAGES here to see if anything removed + + def readPackageInfo(pkg, histfile): + pkginfo = PackageInfo(pkg) + with open(histfile, "r") as f: + for line in f: + lns = line.split('=') + name = lns[0].strip() + value = lns[1].strip(" \t\r\n").strip('"') + if name == "PE": + pkginfo.pe = value + elif name == "PV": + pkginfo.pv = value + elif name == "PR": + pkginfo.pr = value + elif name == "PKG": + pkginfo.pkg = value + elif name == "PKGE": + pkginfo.pkge = value + elif name == "PKGV": + pkginfo.pkgv = value + elif name == "PKGR": + pkginfo.pkgr = value + elif name == "RPROVIDES": + pkginfo.rprovides = value + elif name == "RDEPENDS": + pkginfo.rdepends = value + elif name == "RRECOMMENDS": + pkginfo.rrecommends = value + elif name == "RSUGGESTS": + pkginfo.rsuggests = value + elif name == "RREPLACES": + pkginfo.rreplaces = value + elif name == "RCONFLICTS": + pkginfo.rconflicts = value + elif name == "PKGSIZE": + pkginfo.size = long(value) + elif name == "FILES": + pkginfo.files = value + elif name == "FILELIST": + pkginfo.filelist = value + # Apply defaults + if not pkginfo.pkg: + pkginfo.pkg = pkginfo.name + if not pkginfo.pkge: + pkginfo.pkge = pkginfo.pe + if not pkginfo.pkgv: + pkginfo.pkgv = pkginfo.pv + if not pkginfo.pkgr: + pkginfo.pkgr = pkginfo.pr + return pkginfo + + def getlastpkgversion(pkg): + try: + histfile = os.path.join(pkghistdir, pkg, "latest") + return readPackageInfo(pkg, histfile) + except EnvironmentError: + return None + + def sortpkglist(string): + pkgiter = re.finditer(r'[a-zA-Z0-9.+-]+( \([><=]+ [^ )]+\))?', string, 0) + pkglist = [p.group(0) for p in pkgiter] + pkglist.sort() + return ' '.join(pkglist) + + def sortlist(string): + items = string.split(' ') + items.sort() + return ' '.join(items) + + pn = d.getVar('PN', True) + pe = d.getVar('PE', True) or "0" + pv = d.getVar('PV', True) + pr = d.getVar('PR', True) + + pkgdata_dir = d.getVar('PKGDATA_DIR', True) + packages = "" + try: + with open(os.path.join(pkgdata_dir, pn)) as f: + for line in f.readlines(): + if line.startswith('PACKAGES: '): + packages = squashspaces(line.split(': ', 1)[1]) + break + except IOError as e: + if e.errno == errno.ENOENT: + # Probably a -cross recipe, just ignore + return 0 + else: + raise + + packagelist = packages.split() + if not os.path.exists(pkghistdir): + bb.utils.mkdirhier(pkghistdir) + else: + # Remove files for packages that no longer exist + for item in os.listdir(pkghistdir): + if item != "latest" and item != "latest_srcrev": + if item not in packagelist: + subdir = os.path.join(pkghistdir, item) + for subfile in os.listdir(subdir): + os.unlink(os.path.join(subdir, subfile)) + os.rmdir(subdir) + + rcpinfo = RecipeInfo(pn) + rcpinfo.pe = pe + rcpinfo.pv = pv + rcpinfo.pr = pr + rcpinfo.depends = sortlist(squashspaces(d.getVar('DEPENDS', True) or "")) + rcpinfo.packages = packages + write_recipehistory(rcpinfo, d) + + pkgdest = d.getVar('PKGDEST', True) + for pkg in packagelist: + pkgdata = {} + with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f: + for line in f.readlines(): + item = line.rstrip('\n').split(': ', 1) + key = item[0] + if key.endswith('_' + pkg): + key = key[:-len(pkg)-1] + pkgdata[key] = item[1].decode('utf-8').decode('string_escape') + + pkge = pkgdata.get('PKGE', '0') + pkgv = pkgdata['PKGV'] + pkgr = pkgdata['PKGR'] + # + # Find out what the last version was + # Make sure the version did not decrease + # + lastversion = getlastpkgversion(pkg) + if lastversion: + last_pkge = lastversion.pkge + last_pkgv = lastversion.pkgv + last_pkgr = lastversion.pkgr + r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr)) + if r < 0: + msg = "Package version for package %s went backwards which would break package feeds from (%s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr) + package_qa_handle_error("version-going-backwards", msg, d) + + pkginfo = PackageInfo(pkg) + # Apparently the version can be different on a per-package basis (see Python) + pkginfo.pe = pkgdata.get('PE', '0') + pkginfo.pv = pkgdata['PV'] + pkginfo.pr = pkgdata['PR'] + pkginfo.pkg = pkgdata['PKG'] + pkginfo.pkge = pkge + pkginfo.pkgv = pkgv + pkginfo.pkgr = pkgr + pkginfo.rprovides = sortpkglist(squashspaces(pkgdata.get('RPROVIDES', ""))) + pkginfo.rdepends = sortpkglist(squashspaces(pkgdata.get('RDEPENDS', ""))) + pkginfo.rrecommends = sortpkglist(squashspaces(pkgdata.get('RRECOMMENDS', ""))) + pkginfo.rsuggests = sortpkglist(squashspaces(pkgdata.get('RSUGGESTS', ""))) + pkginfo.rreplaces = sortpkglist(squashspaces(pkgdata.get('RREPLACES', ""))) + pkginfo.rconflicts = sortpkglist(squashspaces(pkgdata.get('RCONFLICTS', ""))) + pkginfo.files = squashspaces(pkgdata.get('FILES', "")) + for filevar in pkginfo.filevars: + pkginfo.filevars[filevar] = pkgdata.get(filevar, "") + + # Gather information about packaged files + val = pkgdata.get('FILES_INFO', '') + dictval = json.loads(val) + filelist = dictval.keys() + filelist.sort() + pkginfo.filelist = " ".join(filelist) + + pkginfo.size = int(pkgdata['PKGSIZE']) + + write_pkghistory(pkginfo, d) +} + + +def write_recipehistory(rcpinfo, d): + import codecs + + bb.debug(2, "Writing recipe history") + + pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True) + + infofile = os.path.join(pkghistdir, "latest") + with codecs.open(infofile, "w", encoding='utf8') as f: + if rcpinfo.pe != "0": + f.write(u"PE = %s\n" % rcpinfo.pe) + f.write(u"PV = %s\n" % rcpinfo.pv) + f.write(u"PR = %s\n" % rcpinfo.pr) + f.write(u"DEPENDS = %s\n" % rcpinfo.depends) + f.write(u"PACKAGES = %s\n" % rcpinfo.packages) + + +def write_pkghistory(pkginfo, d): + import codecs + + bb.debug(2, "Writing package history for package %s" % pkginfo.name) + + pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True) + + pkgpath = os.path.join(pkghistdir, pkginfo.name) + if not os.path.exists(pkgpath): + bb.utils.mkdirhier(pkgpath) + + infofile = os.path.join(pkgpath, "latest") + with codecs.open(infofile, "w", encoding='utf8') as f: + if pkginfo.pe != "0": + f.write(u"PE = %s\n" % pkginfo.pe) + f.write(u"PV = %s\n" % pkginfo.pv) + f.write(u"PR = %s\n" % pkginfo.pr) + + pkgvars = {} + pkgvars['PKG'] = pkginfo.pkg if pkginfo.pkg != pkginfo.name else '' + pkgvars['PKGE'] = pkginfo.pkge if pkginfo.pkge != pkginfo.pe else '' + pkgvars['PKGV'] = pkginfo.pkgv if pkginfo.pkgv != pkginfo.pv else '' + pkgvars['PKGR'] = pkginfo.pkgr if pkginfo.pkgr != pkginfo.pr else '' + for pkgvar in pkgvars: + val = pkgvars[pkgvar] + if val: + f.write(u"%s = %s\n" % (pkgvar, val)) + + f.write(u"RPROVIDES = %s\n" % pkginfo.rprovides) + f.write(u"RDEPENDS = %s\n" % pkginfo.rdepends) + f.write(u"RRECOMMENDS = %s\n" % pkginfo.rrecommends) + if pkginfo.rsuggests: + f.write(u"RSUGGESTS = %s\n" % pkginfo.rsuggests) + if pkginfo.rreplaces: + f.write(u"RREPLACES = %s\n" % pkginfo.rreplaces) + if pkginfo.rconflicts: + f.write(u"RCONFLICTS = %s\n" % pkginfo.rconflicts) + f.write(u"PKGSIZE = %d\n" % pkginfo.size) + f.write(u"FILES = %s\n" % pkginfo.files) + f.write(u"FILELIST = %s\n" % pkginfo.filelist) + + for filevar in pkginfo.filevars: + filevarpath = os.path.join(pkgpath, "latest.%s" % filevar) + val = pkginfo.filevars[filevar] + if val: + with codecs.open(filevarpath, "w", encoding='utf8') as f: + f.write(val) + else: + if os.path.exists(filevarpath): + os.unlink(filevarpath) + +# +# rootfs_type can be: image, sdk_target, sdk_host +# +def buildhistory_list_installed(d, rootfs_type="image"): + from oe.rootfs import image_list_installed_packages + from oe.sdk import sdk_list_installed_packages + + process_list = [('file', 'bh_installed_pkgs.txt'),\ + ('deps', 'bh_installed_pkgs_deps.txt')] + + for output_type, output_file in process_list: + output_file_full = os.path.join(d.getVar('WORKDIR', True), output_file) + + with open(output_file_full, 'w') as output: + if rootfs_type == "image": + output.write(image_list_installed_packages(d, output_type)) + else: + output.write(sdk_list_installed_packages(d, rootfs_type == "sdk_target", output_type)) + +python buildhistory_list_installed_image() { + buildhistory_list_installed(d) +} + +python buildhistory_list_installed_sdk_target() { + buildhistory_list_installed(d, "sdk_target") +} + +python buildhistory_list_installed_sdk_host() { + buildhistory_list_installed(d, "sdk_host") +} + +buildhistory_get_installed() { + mkdir -p $1 + + # Get list of installed packages + pkgcache="$1/installed-packages.tmp" + cat ${WORKDIR}/bh_installed_pkgs.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs.txt + + cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt + if [ -s $pkgcache ] ; then + cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt + else + printf "" > $1/installed-packages.txt + fi + + # Produce dependency graph + # First, quote each name to handle characters that cause issues for dot + sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp && \ + rm ${WORKDIR}/bh_installed_pkgs_deps.txt + # Change delimiter from pipe to -> and set style for recommend lines + sed -i -e 's:|: -> :' -e 's:"\[REC\]":[style=dotted]:' -e 's:$:;:' $1/depends.tmp + # Add header, sorted and de-duped contents and footer and then delete the temp file + printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot + cat $1/depends.tmp | sort | uniq >> $1/depends.dot + echo "}" >> $1/depends.dot + rm $1/depends.tmp + + # Produce installed package sizes list + printf "" > $1/installed-package-sizes.tmp + cat $pkgcache | while read pkg pkgfile pkgarch + do + size=`oe-pkgdata-util read-value ${PKGDATA_DIR} "PKGSIZE" ${pkg}_${pkgarch}` + if [ "$size" != "" ] ; then + echo "$size $pkg" >> $1/installed-package-sizes.tmp + fi + done + cat $1/installed-package-sizes.tmp | sort -n -r | awk '{print $1 "\tKiB " $2}' > $1/installed-package-sizes.txt + rm $1/installed-package-sizes.tmp + + # We're now done with the cache, delete it + rm $pkgcache + + if [ "$2" != "sdk" ] ; then + # Produce some cut-down graphs (for readability) + grep -v kernel_image $1/depends.dot | grep -v kernel-2 | grep -v kernel-3 > $1/depends-nokernel.dot + grep -v libc6 $1/depends-nokernel.dot | grep -v libgcc > $1/depends-nokernel-nolibc.dot + grep -v update- $1/depends-nokernel-nolibc.dot > $1/depends-nokernel-nolibc-noupdate.dot + grep -v kernel-module $1/depends-nokernel-nolibc-noupdate.dot > $1/depends-nokernel-nolibc-noupdate-nomodules.dot + fi + + # add complementary package information + if [ -e ${WORKDIR}/complementary_pkgs.txt ]; then + cp ${WORKDIR}/complementary_pkgs.txt $1 + fi +} + +buildhistory_get_image_installed() { + # Anything requiring the use of the packaging system should be done in here + # in case the packaging files are going to be removed for this image + + if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then + return + fi + + buildhistory_get_installed ${BUILDHISTORY_DIR_IMAGE} +} + +buildhistory_get_sdk_installed() { + # Anything requiring the use of the packaging system should be done in here + # in case the packaging files are going to be removed for this SDK + + if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then + return + fi + + buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk +} + +buildhistory_get_sdk_installed_host() { + buildhistory_get_sdk_installed host +} + +buildhistory_get_sdk_installed_target() { + buildhistory_get_sdk_installed target +} + +buildhistory_list_files() { + # List the files in the specified directory, but exclude date/time etc. + # This awk script is somewhat messy, but handles where the size is not printed for device files under pseudo + ( cd $1 && find . -printf "%M %-10u %-10g %10s %p -> %l\n" | sort -k5 | sed 's/ * -> $//' > $2 ) +} + + +buildhistory_get_imageinfo() { + if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then + return + fi + + buildhistory_list_files ${IMAGE_ROOTFS} ${BUILDHISTORY_DIR_IMAGE}/files-in-image.txt + + # Collect files requested in BUILDHISTORY_IMAGE_FILES + rm -rf ${BUILDHISTORY_DIR_IMAGE}/image-files + for f in ${BUILDHISTORY_IMAGE_FILES}; do + if [ -f ${IMAGE_ROOTFS}/$f ] ; then + mkdir -p ${BUILDHISTORY_DIR_IMAGE}/image-files/`dirname $f` + cp ${IMAGE_ROOTFS}/$f ${BUILDHISTORY_DIR_IMAGE}/image-files/$f + fi + done + + # Record some machine-readable meta-information about the image + printf "" > ${BUILDHISTORY_DIR_IMAGE}/image-info.txt + cat >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt <> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt + + # Add some configuration information + echo "${MACHINE}: ${IMAGE_BASENAME} configured for ${DISTRO} ${DISTRO_VERSION}" > ${BUILDHISTORY_DIR_IMAGE}/build-id.txt + + cat >> ${BUILDHISTORY_DIR_IMAGE}/build-id.txt < ${BUILDHISTORY_DIR_SDK}/sdk-info.txt + cat >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt <> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt +} + +# By prepending we get in before the removal of packaging files +ROOTFS_POSTPROCESS_COMMAND =+ " buildhistory_list_installed_image ;\ + buildhistory_get_image_installed ; " + +IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; " + +# We want these to be the last run so that we get called after complementary package installation +POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target ;\ + buildhistory_get_sdk_installed_target ; " +POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host ;\ + buildhistory_get_sdk_installed_host ; " + +SDK_POSTPROCESS_COMMAND += "buildhistory_get_sdkinfo ; " + +def buildhistory_get_build_id(d): + if d.getVar('BB_WORKERCONTEXT', True) != '1': + return "" + localdata = bb.data.createCopy(d) + bb.data.update_data(localdata) + statuslines = [] + for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata): + g = globals() + if func not in g: + bb.warn("Build configuration function '%s' does not exist" % func) + else: + flines = g[func](localdata) + if flines: + statuslines.extend(flines) + + statusheader = d.getVar('BUILDCFG_HEADER', True) + return('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines))) + +def buildhistory_get_metadata_revs(d): + # We want an easily machine-readable format here, so get_layers_branch_rev isn't quite what we want + layers = (d.getVar("BBLAYERS", True) or "").split() + medadata_revs = ["%-17s = %s:%s" % (os.path.basename(i), \ + base_get_metadata_git_branch(i, None).strip(), \ + base_get_metadata_git_revision(i, None)) \ + for i in layers] + return '\n'.join(medadata_revs) + + +def squashspaces(string): + import re + return re.sub("\s+", " ", string).strip() + +def outputvars(vars, listvars, d): + vars = vars.split() + listvars = listvars.split() + ret = "" + for var in vars: + value = d.getVar(var, True) or "" + if var in listvars: + # Squash out spaces + value = squashspaces(value) + ret += "%s = %s\n" % (var, value) + return ret.rstrip('\n') + +def buildhistory_get_imagevars(d): + if d.getVar('BB_WORKERCONTEXT', True) != '1': + return "" + imagevars = "DISTRO DISTRO_VERSION USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE ROOTFS_POSTPROCESS_COMMAND IMAGE_POSTPROCESS_COMMAND" + listvars = "USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS PACKAGE_EXCLUDE" + return outputvars(imagevars, listvars, d) + +def buildhistory_get_sdkvars(d): + if d.getVar('BB_WORKERCONTEXT', True) != '1': + return "" + sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE" + listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE" + return outputvars(sdkvars, listvars, d) + + +def buildhistory_get_cmdline(d): + if sys.argv[0].endswith('bin/bitbake'): + bincmd = 'bitbake' + else: + bincmd = sys.argv[0] + return '%s %s' % (bincmd, ' '.join(sys.argv[1:])) + + +buildhistory_commit() { + if [ ! -d ${BUILDHISTORY_DIR} ] ; then + # Code above that creates this dir never executed, so there can't be anything to commit + return + fi + + # Create a machine-readable list of metadata revisions for each layer + cat > ${BUILDHISTORY_DIR}/metadata-revs < /dev/null 2>&1 || true + git tag -f build-minus-2 build-minus-1 > /dev/null 2>&1 || true + git tag -f build-minus-1 > /dev/null 2>&1 || true + fi + # Check if there are new/changed files to commit (other than metadata-revs) + repostatus=`git status --porcelain | grep -v " metadata-revs$"` + HOSTNAME=`hostname 2>/dev/null || echo unknown` + CMDLINE="${@buildhistory_get_cmdline(d)}" + if [ "$repostatus" != "" ] ; then + git add -A . + # porcelain output looks like "?? packages/foo/bar" + # Ensure we commit metadata-revs with the first commit + for entry in `echo "$repostatus" | awk '{print $2}' | awk -F/ '{print $1}' | sort | uniq` ; do + git commit $entry metadata-revs -m "$entry: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" -m "cmd: $CMDLINE" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null + done + git gc --auto --quiet + if [ "${BUILDHISTORY_PUSH_REPO}" != "" ] ; then + git push -q ${BUILDHISTORY_PUSH_REPO} + fi + else + git commit ${BUILDHISTORY_DIR}/ --allow-empty -m "No changes: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" -m "cmd: $CMDLINE" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null + fi) || true +} + +python buildhistory_eventhandler() { + if e.data.getVar('BUILDHISTORY_FEATURES', True).strip(): + if e.data.getVar("BUILDHISTORY_COMMIT", True) == "1": + bb.note("Writing buildhistory") + bb.build.exec_func("buildhistory_commit", e.data) +} + +addhandler buildhistory_eventhandler +buildhistory_eventhandler[eventmask] = "bb.event.BuildCompleted" + + +# FIXME this ought to be moved into the fetcher +def _get_srcrev_values(d): + """ + Return the version strings for the current recipe + """ + + scms = [] + fetcher = bb.fetch.Fetch(d.getVar('SRC_URI', True).split(), d) + urldata = fetcher.ud + for u in urldata: + if urldata[u].method.supports_srcrev(): + scms.append(u) + + autoinc_templ = 'AUTOINC+' + dict_srcrevs = {} + dict_tag_srcrevs = {} + for scm in scms: + ud = urldata[scm] + for name in ud.names: + try: + rev = ud.method.sortable_revision(ud, d, name) + except TypeError: + # support old bitbake versions + rev = ud.method.sortable_revision(scm, ud, d, name) + # Clean this up when we next bump bitbake version + if type(rev) != str: + autoinc, rev = rev + elif rev.startswith(autoinc_templ): + rev = rev[len(autoinc_templ):] + dict_srcrevs[name] = rev + if 'tag' in ud.parm: + tag = ud.parm['tag']; + key = name+'_'+tag + dict_tag_srcrevs[key] = rev + return (dict_srcrevs, dict_tag_srcrevs) + +do_fetch[postfuncs] += "write_srcrev" +do_fetch[vardepsexclude] += "write_srcrev" +python write_srcrev() { + pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True) + srcrevfile = os.path.join(pkghistdir, 'latest_srcrev') + + srcrevs, tag_srcrevs = _get_srcrev_values(d) + if srcrevs: + if not os.path.exists(pkghistdir): + bb.utils.mkdirhier(pkghistdir) + old_tag_srcrevs = {} + if os.path.exists(srcrevfile): + with open(srcrevfile) as f: + for line in f: + if line.startswith('# tag_'): + key, value = line.split("=", 1) + key = key.replace('# tag_', '').strip() + value = value.replace('"', '').strip() + old_tag_srcrevs[key] = value + with open(srcrevfile, 'w') as f: + orig_srcrev = d.getVar('SRCREV', False) or 'INVALID' + if orig_srcrev != 'INVALID': + f.write('# SRCREV = "%s"\n' % orig_srcrev) + if len(srcrevs) > 1: + for name, srcrev in srcrevs.items(): + orig_srcrev = d.getVar('SRCREV_%s' % name, False) + if orig_srcrev: + f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev)) + f.write('SRCREV_%s = "%s"\n' % (name, srcrev)) + else: + f.write('SRCREV = "%s"\n' % srcrevs.itervalues().next()) + if len(tag_srcrevs) > 0: + for name, srcrev in tag_srcrevs.items(): + f.write('# tag_%s = "%s"\n' % (name, srcrev)) + if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev: + pkg = d.getVar('PN', True) + bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev)) + + else: + if os.path.exists(srcrevfile): + os.remove(srcrevfile) +} diff --git a/meta/classes/buildstats-summary.bbclass b/meta/classes/buildstats-summary.bbclass new file mode 100644 index 0000000000..c8fbb2f1a1 --- /dev/null +++ b/meta/classes/buildstats-summary.bbclass @@ -0,0 +1,39 @@ +# Summarize sstate usage at the end of the build +python buildstats_summary () { + if not isinstance(e, bb.event.BuildCompleted): + return + + import collections + import os.path + + bn = get_bn(e) + bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn) + if not os.path.exists(bsdir): + return + + sstatetasks = (e.data.getVar('SSTATETASKS', True) or '').split() + built = collections.defaultdict(lambda: [set(), set()]) + for pf in os.listdir(bsdir): + taskdir = os.path.join(bsdir, pf) + if not os.path.isdir(taskdir): + continue + + tasks = os.listdir(taskdir) + for t in sstatetasks: + no_sstate, sstate = built[t] + if t in tasks: + no_sstate.add(pf) + elif t + '_setscene' in tasks: + sstate.add(pf) + + header_printed = False + for t in sstatetasks: + no_sstate, sstate = built[t] + if no_sstate | sstate: + if not header_printed: + header_printed = True + bb.note("Build completion summary:") + + bb.note(" {0}: {1}% sstate reuse ({2} setscene, {3} scratch)".format(t, 100*len(sstate)/(len(sstate)+len(no_sstate)), len(sstate), len(no_sstate))) +} +addhandler buildstats_summary diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass new file mode 100644 index 0000000000..89ae72c679 --- /dev/null +++ b/meta/classes/buildstats.bbclass @@ -0,0 +1,289 @@ +BUILDSTATS_BASE = "${TMPDIR}/buildstats/" +BNFILE = "${BUILDSTATS_BASE}/.buildname" +DEVFILE = "${BUILDSTATS_BASE}/.device" + +################################################################################ +# Build statistics gathering. +# +# The CPU and Time gathering/tracking functions and bbevent inspiration +# were written by Christopher Larson and can be seen here: +# http://kergoth.pastey.net/142813 +# +################################################################################ + +def get_process_cputime(pid): + with open("/proc/%d/stat" % pid, "r") as f: + fields = f.readline().rstrip().split() + # 13: utime, 14: stime, 15: cutime, 16: cstime + return sum(int(field) for field in fields[13:16]) + +def get_cputime(): + with open("/proc/stat", "r") as f: + fields = f.readline().rstrip().split()[1:] + return sum(int(field) for field in fields) + +def set_bn(e): + bn = e.getPkgs()[0] + "-" + e.data.getVar('MACHINE', True) + try: + os.remove(e.data.getVar('BNFILE', True)) + except: + pass + with open(e.data.getVar('BNFILE', True), "w") as f: + f.write(os.path.join(bn, e.data.getVar('BUILDNAME', True))) + +def get_bn(e): + with open(e.data.getVar('BNFILE', True)) as f: + bn = f.readline() + return bn + +def set_device(e): + tmpdir = e.data.getVar('TMPDIR', True) + try: + os.remove(e.data.getVar('DEVFILE', True)) + except: + pass + ############################################################################ + # We look for the volume TMPDIR lives on. To do all disks would make little + # sense and not give us any particularly useful data. In theory we could do + # something like stick DL_DIR on a different partition and this would + # throw stats gathering off. The same goes with SSTATE_DIR. However, let's + # get the basics in here and work on the cornercases later. + # A note. /proc/diskstats does not contain info on encryptfs, tmpfs, etc. + # If we end up hitting one of these fs, we'll just skip diskstats collection. + ############################################################################ + device=os.stat(tmpdir) + majordev=os.major(device.st_dev) + minordev=os.minor(device.st_dev) + ############################################################################ + # Bug 1700: + # Because tmpfs/encryptfs/ramfs etc inserts no entry in /proc/diskstats + # we set rdev to NoLogicalDevice and search for it later. If we find NLD + # we do not collect diskstats as the method to collect meaningful statistics + # for these fs types requires a bit more research. + ############################################################################ + rdev="NoLogicalDevice" + try: + with open("/proc/diskstats", "r") as f: + for line in f: + if majordev == int(line.split()[0]) and minordev == int(line.split()[1]): + rdev=line.split()[2] + except: + pass + file = open(e.data.getVar('DEVFILE', True), "w") + file.write(rdev) + file.close() + +def get_device(e): + file = open(e.data.getVar('DEVFILE', True)) + device = file.readline() + file.close() + return device + +def get_diskstats(dev): + import itertools + ############################################################################ + # For info on what these are, see kernel doc file iostats.txt + ############################################################################ + DSTAT_KEYS = ['ReadsComp', 'ReadsMerged', 'SectRead', 'TimeReads', 'WritesComp', 'SectWrite', 'TimeWrite', 'IOinProgress', 'TimeIO', 'WTimeIO'] + try: + with open("/proc/diskstats", "r") as f: + for x in f: + if dev in x: + diskstats_val = x.rstrip().split()[4:] + except IOError as e: + return + diskstats = dict(itertools.izip(DSTAT_KEYS, diskstats_val)) + return diskstats + +def set_diskdata(var, dev, data): + data.setVar(var, get_diskstats(dev)) + +def get_diskdata(var, dev, data): + olddiskdata = data.getVar(var, False) + diskdata = {} + if olddiskdata is None: + return + newdiskdata = get_diskstats(dev) + for key in olddiskdata.iterkeys(): + diskdata["Start"+key] = str(int(olddiskdata[key])) + diskdata["End"+key] = str(int(newdiskdata[key])) + return diskdata + +def set_timedata(var, data, server_time=None): + import time + if server_time: + time = server_time + else: + time = time.time() + cputime = get_cputime() + proctime = get_process_cputime(os.getpid()) + data.setVar(var, (time, cputime, proctime)) + +def get_timedata(var, data, server_time=None): + import time + timedata = data.getVar(var, False) + if timedata is None: + return + oldtime, oldcpu, oldproc = timedata + procdiff = get_process_cputime(os.getpid()) - oldproc + cpudiff = get_cputime() - oldcpu + if server_time: + end_time = server_time + else: + end_time = time.time() + timediff = end_time - oldtime + if cpudiff > 0: + cpuperc = float(procdiff) * 100 / cpudiff + else: + cpuperc = None + return timediff, cpuperc + +def write_task_data(status, logfile, dev, e): + bn = get_bn(e) + bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn) + taskdir = os.path.join(bsdir, e.data.expand("${PF}")) + file = open(os.path.join(logfile), "a") + timedata = get_timedata("__timedata_task", e.data, e.time) + if timedata: + elapsedtime, cpu = timedata + file.write(bb.data.expand("${PF}: %s: Elapsed time: %0.2f seconds \n" % + (e.task, elapsedtime), e.data)) + if cpu: + file.write("CPU usage: %0.1f%% \n" % cpu) + ############################################################################ + # Here we gather up disk data. In an effort to avoid lying with stats + # I do a bare minimum of analysis of collected data. + # The simple fact is, doing disk io collection on a per process basis + # without effecting build time would be difficult. + # For the best information, running things with BB_TOTAL_THREADS = "1" + # would return accurate per task results. + ############################################################################ + if dev != "NoLogicalDevice": + diskdata = get_diskdata("__diskdata_task", dev, e.data) + if diskdata: + for key in sorted(diskdata.iterkeys()): + file.write(key + ": " + diskdata[key] + "\n") + if status is "passed": + file.write("Status: PASSED \n") + else: + file.write("Status: FAILED \n") + file.write("Ended: %0.2f \n" % e.time) + file.close() + +python run_buildstats () { + import bb.build + import bb.event + import bb.data + import time, subprocess, platform + + if isinstance(e, bb.event.BuildStarted): + ######################################################################## + # at first pass make the buildstats heriarchy and then + # set the buildname + ######################################################################## + try: + bb.utils.mkdirhier(e.data.getVar('BUILDSTATS_BASE', True)) + except: + pass + set_bn(e) + bn = get_bn(e) + set_device(e) + device = get_device(e) + + bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn) + try: + bb.utils.mkdirhier(bsdir) + except: + pass + if device != "NoLogicalDevice": + set_diskdata("__diskdata_build", device, e.data) + set_timedata("__timedata_build", e.data) + build_time = os.path.join(bsdir, "build_stats") + # write start of build into build_time + file = open(build_time,"a") + host_info = platform.uname() + file.write("Host Info: ") + for x in host_info: + if x: + file.write(x + " ") + file.write("\n") + file.write("Build Started: %0.2f \n" % time.time()) + file.close() + + elif isinstance(e, bb.event.BuildCompleted): + bn = get_bn(e) + device = get_device(e) + bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn) + taskdir = os.path.join(bsdir, e.data.expand("${PF}")) + build_time = os.path.join(bsdir, "build_stats") + file = open(build_time, "a") + ######################################################################## + # Write build statistics for the build + ######################################################################## + timedata = get_timedata("__timedata_build", e.data) + if timedata: + time, cpu = timedata + # write end of build and cpu used into build_time + file.write("Elapsed time: %0.2f seconds \n" % (time)) + if cpu: + file.write("CPU usage: %0.1f%% \n" % cpu) + if device != "NoLogicalDevice": + diskio = get_diskdata("__diskdata_build", device, e.data) + if diskio: + for key in sorted(diskio.iterkeys()): + file.write(key + ": " + diskio[key] + "\n") + file.close() + + if isinstance(e, bb.build.TaskStarted): + bn = get_bn(e) + device = get_device(e) + bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn) + taskdir = os.path.join(bsdir, e.data.expand("${PF}")) + if device != "NoLogicalDevice": + set_diskdata("__diskdata_task", device, e.data) + set_timedata("__timedata_task", e.data, e.time) + try: + bb.utils.mkdirhier(taskdir) + except: + pass + # write into the task event file the name and start time + file = open(os.path.join(taskdir, e.task), "a") + file.write("Event: %s \n" % bb.event.getName(e)) + file.write("Started: %0.2f \n" % e.time) + file.close() + + elif isinstance(e, bb.build.TaskSucceeded): + bn = get_bn(e) + device = get_device(e) + bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn) + taskdir = os.path.join(bsdir, e.data.expand("${PF}")) + write_task_data("passed", os.path.join(taskdir, e.task), device, e) + if e.task == "do_rootfs": + bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn) + bs=os.path.join(bsdir, "build_stats") + file = open(bs,"a") + rootfs = e.data.getVar('IMAGE_ROOTFS', True) + rootfs_size = subprocess.Popen(["du", "-sh", rootfs], stdout=subprocess.PIPE).stdout.read() + file.write("Uncompressed Rootfs size: %s" % rootfs_size) + file.close() + + elif isinstance(e, bb.build.TaskFailed): + bn = get_bn(e) + device = get_device(e) + bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn) + taskdir = os.path.join(bsdir, e.data.expand("${PF}")) + write_task_data("failed", os.path.join(taskdir, e.task), device, e) + ######################################################################## + # Lets make things easier and tell people where the build failed in + # build_status. We do this here because BuildCompleted triggers no + # matter what the status of the build actually is + ######################################################################## + build_status = os.path.join(bsdir, "build_stats") + file = open(build_status,"a") + file.write(e.data.expand("Failed at: ${PF} at task: %s \n" % e.task)) + file.close() +} + +addhandler run_buildstats +run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed" + diff --git a/meta/classes/ccache.bbclass b/meta/classes/ccache.bbclass new file mode 100644 index 0000000000..2cdce46932 --- /dev/null +++ b/meta/classes/ccache.bbclass @@ -0,0 +1,8 @@ +CCACHE = "${@bb.utils.which(d.getVar('PATH', True), 'ccache') and 'ccache '}" +export CCACHE_DIR ?= "${TMPDIR}/ccache/${MULTIMACH_HOST_SYS}/${PN}" +CCACHE_DISABLE[unexport] = "1" + +do_configure[dirs] =+ "${CCACHE_DIR}" +do_kernel_configme[dirs] =+ "${CCACHE_DIR}" + +do_clean[cleandirs] += "${CCACHE_DIR}" diff --git a/meta/classes/chrpath.bbclass b/meta/classes/chrpath.bbclass new file mode 100644 index 0000000000..77b19372ba --- /dev/null +++ b/meta/classes/chrpath.bbclass @@ -0,0 +1,115 @@ +CHRPATH_BIN ?= "chrpath" +PREPROCESS_RELOCATE_DIRS ?= "" + +def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d): + import subprocess as sub + + p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE) + err, out = p.communicate() + # If returned succesfully, process stderr for results + if p.returncode != 0: + return + + # Throw away everything other than the rpath list + curr_rpath = err.partition("RPATH=")[2] + #bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip())) + rpaths = curr_rpath.split(":") + new_rpaths = [] + modified = False + for rpath in rpaths: + # If rpath is already dynamic copy it to new_rpath and continue + if rpath.find("$ORIGIN") != -1: + new_rpaths.append(rpath.strip()) + continue + rpath = os.path.normpath(rpath) + if baseprefix not in rpath and tmpdir not in rpath: + new_rpaths.append(rpath.strip()) + continue + new_rpaths.append("$ORIGIN/" + os.path.relpath(rpath.strip(), os.path.dirname(fpath.replace(rootdir, "/")))) + modified = True + + # if we have modified some rpaths call chrpath to update the binary + if modified: + args = ":".join(new_rpaths) + #bb.note("Setting rpath for %s to %s" %(fpath, args)) + p = sub.Popen([cmd, '-r', args, fpath],stdout=sub.PIPE,stderr=sub.PIPE) + out, err = p.communicate() + if p.returncode != 0: + bb.error("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN', True), p.returncode, out, err)) + raise bb.build.FuncFailed + +def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d): + import subprocess as sub + + p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE) + err, out = p.communicate() + # If returned succesfully, process stderr for results + if p.returncode != 0: + return + for l in err.split("\n"): + if "(compatibility" not in l: + continue + rpath = l.partition("(compatibility")[0].strip() + if baseprefix not in rpath: + continue + + newpath = "@loader_path/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/"))) + p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, newpath, fpath],stdout=sub.PIPE,stderr=sub.PIPE) + err, out = p.communicate() + +def process_dir (rootdir, directory, d): + import stat + + rootdir = os.path.normpath(rootdir) + cmd = d.expand('${CHRPATH_BIN}') + tmpdir = os.path.normpath(d.getVar('TMPDIR')) + baseprefix = os.path.normpath(d.expand('${base_prefix}')) + hostos = d.getVar("HOST_OS", True) + + #bb.debug("Checking %s for binaries to process" % directory) + if not os.path.exists(directory): + return + + if "linux" in hostos: + process_file = process_file_linux + elif "darwin" in hostos: + process_file = process_file_darwin + else: + # Relocations not supported + return + + dirs = os.listdir(directory) + for file in dirs: + fpath = directory + "/" + file + fpath = os.path.normpath(fpath) + if os.path.islink(fpath): + # Skip symlinks + continue + + if os.path.isdir(fpath): + process_dir(rootdir, fpath, d) + else: + #bb.note("Testing %s for relocatability" % fpath) + + # We need read and write permissions for chrpath, if we don't have + # them then set them temporarily. Take a copy of the files + # permissions so that we can restore them afterwards. + perms = os.stat(fpath)[stat.ST_MODE] + if os.access(fpath, os.W_OK|os.R_OK): + perms = None + else: + # Temporarily make the file writeable so we can chrpath it + os.chmod(fpath, perms|stat.S_IRWXU) + process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d) + + if perms: + os.chmod(fpath, perms) + +def rpath_replace (path, d): + bindirs = d.expand("${bindir} ${sbindir} ${base_sbindir} ${base_bindir} ${libdir} ${base_libdir} ${libexecdir} ${PREPROCESS_RELOCATE_DIRS}").split() + + for bindir in bindirs: + #bb.note ("Processing directory " + bindir) + directory = path + "/" + bindir + process_dir (path, directory, d) + diff --git a/meta/classes/clutter.bbclass b/meta/classes/clutter.bbclass new file mode 100644 index 0000000000..167407dfdc --- /dev/null +++ b/meta/classes/clutter.bbclass @@ -0,0 +1,22 @@ + +def get_minor_dir(v): + import re + m = re.match("^([0-9]+)\.([0-9]+)", v) + return "%s.%s" % (m.group(1), m.group(2)) + +def get_real_name(n): + import re + m = re.match("^([a-z]+(-[a-z]+)?)(-[0-9]+\.[0-9]+)?", n) + return "%s" % (m.group(1)) + +VERMINOR = "${@get_minor_dir("${PV}")}" +REALNAME = "${@get_real_name("${BPN}")}" + +CLUTTER_SRC_FTP = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive" + +CLUTTER_SRC_GIT = "git://git.gnome.org/${REALNAME}" + +SRC_URI = "${CLUTTER_SRC_FTP}" +S = "${WORKDIR}/${REALNAME}-${PV}" + +inherit autotools pkgconfig gtk-doc gettext diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass new file mode 100644 index 0000000000..995ddf1ea2 --- /dev/null +++ b/meta/classes/cmake.bbclass @@ -0,0 +1,121 @@ +# Path to the CMake file to process. +OECMAKE_SOURCEPATH ?= "${S}" + +DEPENDS_prepend = "cmake-native " +B = "${WORKDIR}/build" + +# We need to unset CCACHE otherwise cmake gets too confused +CCACHE = "" + +# We want the staging and installing functions from autotools +inherit autotools + +# C/C++ Compiler (without cpu arch/tune arguments) +OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`" +OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`" + +# Compiler flags +OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}" +OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS}" +OECMAKE_C_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CFLAGS} -DNDEBUG" +OECMAKE_CXX_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CXXFLAGS} -DNDEBUG" +OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}" +OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}" + +OECMAKE_RPATH ?= "" +OECMAKE_PERLNATIVE_DIR ??= "" +OECMAKE_EXTRA_ROOT_PATH ?= "" + +cmake_do_generate_toolchain_file() { + cat > ${WORKDIR}/toolchain.cmake < mtime: + bb.note("Configuration changed, recompile will be forced") + bb.build.write_taint('do_compile', d) +} +do_menuconfig[depends] += "ncurses-native:do_populate_sysroot" +do_menuconfig[nostamp] = "1" +addtask menuconfig after do_configure + +python do_diffconfig() { + import shutil + import subprocess + + workdir = d.getVar('WORKDIR', True) + fragment = workdir + '/fragment.cfg' + configorig = '.config.orig' + config = '.config' + + try: + md5newconfig = bb.utils.md5_file(configorig) + md5config = bb.utils.md5_file(config) + isdiff = md5newconfig != md5config + except IOError as e: + bb.fatal("No config files found. Did you do menuconfig ?\n%s" % e) + + if isdiff: + statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment + subprocess.call(statement, shell=True) + + shutil.copy(configorig, config) + + bb.plain("Config fragment has been dumped into:\n %s" % fragment) + else: + if os.path.exists(fragment): + os.unlink(fragment) +} + +do_diffconfig[nostamp] = "1" +addtask diffconfig diff --git a/meta/classes/compress_doc.bbclass b/meta/classes/compress_doc.bbclass new file mode 100644 index 0000000000..6edbaf531f --- /dev/null +++ b/meta/classes/compress_doc.bbclass @@ -0,0 +1,256 @@ +# Compress man pages in ${mandir} and info pages in ${infodir} +# +# 1. The doc will be compressed to gz format by default. +# +# 2. It will automatically correct the compressed doc which is not +# in ${DOC_COMPRESS} but in ${DOC_COMPRESS_LIST} to the format +# of ${DOC_COMPRESS} policy +# +# 3. It is easy to add a new type compression by editing +# local.conf, such as: +# DOC_COMPRESS_LIST_append = ' abc' +# DOC_COMPRESS = 'abc' +# DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***' +# DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***' + +# All supported compression policy +DOC_COMPRESS_LIST ?= "gz xz bz2" + +# Compression policy, must be one of ${DOC_COMPRESS_LIST} +DOC_COMPRESS ?= "gz" + +# Compression shell command +DOC_COMPRESS_CMD[gz] ?= 'gzip -v -9 -n' +DOC_COMPRESS_CMD[bz2] ?= "bzip2 -v -9" +DOC_COMPRESS_CMD[xz] ?= "xz -v" + +# Decompression shell command +DOC_DECOMPRESS_CMD[gz] ?= 'gunzip -v' +DOC_DECOMPRESS_CMD[bz2] ?= "bunzip2 -v" +DOC_DECOMPRESS_CMD[xz] ?= "unxz -v" + +PACKAGE_PREPROCESS_FUNCS += "package_do_compress_doc compress_doc_updatealternatives" +python package_do_compress_doc() { + compress_mode = d.getVar('DOC_COMPRESS', True) + compress_list = (d.getVar('DOC_COMPRESS_LIST', True) or '').split() + if compress_mode not in compress_list: + bb.fatal('Compression policy %s not supported (not listed in %s)\n' % (compress_mode, compress_list)) + + dvar = d.getVar('PKGD', True) + compress_cmds = {} + decompress_cmds = {} + for mode in compress_list: + compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode) + decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode) + + mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir", True)) + if os.path.exists(mandir): + # Decompress doc files which format is not compress_mode + decompress_doc(mandir, compress_mode, decompress_cmds) + compress_doc(mandir, compress_mode, compress_cmds) + + infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir", True)) + if os.path.exists(infodir): + # Decompress doc files which format is not compress_mode + decompress_doc(infodir, compress_mode, decompress_cmds) + compress_doc(infodir, compress_mode, compress_cmds) +} + +def _get_compress_format(file, compress_format_list): + for compress_format in compress_format_list: + compress_suffix = '.' + compress_format + if file.endswith(compress_suffix): + return compress_format + + return '' + +# Collect hardlinks to dict, each element in dict lists hardlinks +# which points to the same doc file. +# {hardlink10: [hardlink11, hardlink12],,,} +# The hardlink10, hardlink11 and hardlink12 are the same file. +def _collect_hardlink(hardlink_dict, file): + for hardlink in hardlink_dict: + # Add to the existed hardlink + if os.path.samefile(hardlink, file): + hardlink_dict[hardlink].append(file) + return hardlink_dict + + hardlink_dict[file] = [] + return hardlink_dict + +def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False): + for target in hardlink_dict: + if decompress: + compress_format = _get_compress_format(target, shell_cmds.keys()) + cmd = "%s -f %s" % (shell_cmds[compress_format], target) + bb.note('decompress hardlink %s' % target) + else: + cmd = "%s -f %s" % (shell_cmds[compress_mode], target) + bb.note('compress hardlink %s' % target) + (retval, output) = oe.utils.getstatusoutput(cmd) + if retval: + bb.warn("de/compress file failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")) + return + + for hardlink_dup in hardlink_dict[target]: + if decompress: + # Remove compress suffix + compress_suffix = '.' + compress_format + new_hardlink = hardlink_dup[:-len(compress_suffix)] + new_target = target[:-len(compress_suffix)] + else: + # Append compress suffix + compress_suffix = '.' + compress_mode + new_hardlink = hardlink_dup + compress_suffix + new_target = target + compress_suffix + + bb.note('hardlink %s-->%s' % (new_hardlink, new_target)) + if not os.path.exists(new_hardlink): + os.link(new_target, new_hardlink) + if os.path.exists(hardlink_dup): + os.unlink(hardlink_dup) + +def _process_symlink(file, compress_format, decompress=False): + compress_suffix = '.' + compress_format + if decompress: + # Remove compress suffix + new_linkname = file[:-len(compress_suffix)] + new_source = os.readlink(file)[:-len(compress_suffix)] + else: + # Append compress suffix + new_linkname = file + compress_suffix + new_source = os.readlink(file) + compress_suffix + + bb.note('symlink %s-->%s' % (new_linkname, new_source)) + if not os.path.exists(new_linkname): + os.symlink(new_source, new_linkname) + + os.unlink(file) + +def _is_info(file): + flags = '.info .info-'.split() + for flag in flags: + if flag in os.path.basename(file): + return True + + return False + +def _is_man(file): + # It refers MANSECT-var in man(1.6g)'s man.config + flags = '.1:.1p:.8:.2:.3:.3p:.4:.5:.6:.7:.9:.0p:.tcl:.n:.l:.p:.o'.split(':') + for flag in flags: + if os.path.basename(file).endswith(flag): + return True + + return False + +def _is_compress_doc(file, compress_format_list): + compress_format = _get_compress_format(file, compress_format_list) + compress_suffix = '.' + compress_format + if file.endswith(compress_suffix): + # Remove the compress suffix + uncompress_file = file[:-len(compress_suffix)] + if _is_info(uncompress_file) or _is_man(uncompress_file): + return True, compress_format + + return False, '' + +def compress_doc(topdir, compress_mode, compress_cmds): + hardlink_dict = {} + for root, dirs, files in os.walk(topdir): + for f in files: + file = os.path.join(root, f) + if os.path.isdir(file): + continue + + if _is_info(file) or _is_man(file): + # Symlink + if os.path.islink(file): + _process_symlink(file, compress_mode) + # Hardlink + elif os.lstat(file).st_nlink > 1: + _collect_hardlink(hardlink_dict, file) + # Normal file + elif os.path.isfile(file): + cmd = "%s %s" % (compress_cmds[compress_mode], file) + (retval, output) = oe.utils.getstatusoutput(cmd) + if retval: + bb.warn("compress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")) + continue + bb.note('compress file %s' % file) + + _process_hardlink(hardlink_dict, compress_mode, compress_cmds) + +# Decompress doc files which format is not compress_mode +def decompress_doc(topdir, compress_mode, decompress_cmds): + hardlink_dict = {} + decompress = True + for root, dirs, files in os.walk(topdir): + for f in files: + file = os.path.join(root, f) + if os.path.isdir(file): + continue + + res, compress_format = _is_compress_doc(file, decompress_cmds.keys()) + # Decompress files which format is not compress_mode + if res and compress_mode!=compress_format: + # Symlink + if os.path.islink(file): + _process_symlink(file, compress_format, decompress) + # Hardlink + elif os.lstat(file).st_nlink > 1: + _collect_hardlink(hardlink_dict, file) + # Normal file + elif os.path.isfile(file): + cmd = "%s %s" % (decompress_cmds[compress_format], file) + (retval, output) = oe.utils.getstatusoutput(cmd) + if retval: + bb.warn("decompress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")) + continue + bb.note('decompress file %s' % file) + + _process_hardlink(hardlink_dict, compress_mode, decompress_cmds, decompress) + +python compress_doc_updatealternatives () { + if not bb.data.inherits_class('update-alternatives', d): + return + + mandir = d.getVar("mandir", True) + infodir = d.getVar("infodir", True) + compress_mode = d.getVar('DOC_COMPRESS', True) + for pkg in (d.getVar('PACKAGES', True) or "").split(): + old_names = (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split() + new_names = [] + for old_name in old_names: + old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name, True) + old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name, True) or \ + d.getVarFlag('ALTERNATIVE_TARGET', old_name, True) or \ + d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or \ + d.getVar('ALTERNATIVE_TARGET', True) or \ + old_link + # Sometimes old_target is specified as relative to the link name. + old_target = os.path.join(os.path.dirname(old_link), old_target) + + # The updatealternatives used for compress doc + if mandir in old_target or infodir in old_target: + new_name = old_name + '.' + compress_mode + new_link = old_link + '.' + compress_mode + new_target = old_target + '.' + compress_mode + d.delVarFlag('ALTERNATIVE_LINK_NAME', old_name) + d.setVarFlag('ALTERNATIVE_LINK_NAME', new_name, new_link) + if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name, True): + d.delVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name) + d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, new_name, new_target) + elif d.getVarFlag('ALTERNATIVE_TARGET', old_name, True): + d.delVarFlag('ALTERNATIVE_TARGET', old_name) + d.setVarFlag('ALTERNATIVE_TARGET', new_name, new_target) + elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True): + d.setVar('ALTERNATIVE_TARGET_%s' % pkg, new_target) + elif d.getVar('ALTERNATIVE_TARGET', old_name, True): + d.setVar('ALTERNATIVE_TARGET', new_target) + + new_names.append(new_name) + + if new_names: + d.setVar('ALTERNATIVE_%s' % pkg, ' '.join(new_names)) +} diff --git a/meta/classes/copyleft_compliance.bbclass b/meta/classes/copyleft_compliance.bbclass new file mode 100644 index 0000000000..907c1836b3 --- /dev/null +++ b/meta/classes/copyleft_compliance.bbclass @@ -0,0 +1,64 @@ +# Deploy sources for recipes for compliance with copyleft-style licenses +# Defaults to using symlinks, as it's a quick operation, and one can easily +# follow the links when making use of the files (e.g. tar with the -h arg). +# +# vi:sts=4:sw=4:et + +inherit copyleft_filter + +COPYLEFT_SOURCES_DIR ?= '${DEPLOY_DIR}/copyleft_sources' + +python do_prepare_copyleft_sources () { + """Populate a tree of the recipe sources and emit patch series files""" + import os.path + import shutil + + p = d.getVar('P', True) + included, reason = copyleft_should_include(d) + if not included: + bb.debug(1, 'copyleft: %s is excluded: %s' % (p, reason)) + return + else: + bb.debug(1, 'copyleft: %s is included: %s' % (p, reason)) + + sources_dir = d.getVar('COPYLEFT_SOURCES_DIR', True) + dl_dir = d.getVar('DL_DIR', True) + src_uri = d.getVar('SRC_URI', True).split() + fetch = bb.fetch2.Fetch(src_uri, d) + ud = fetch.ud + + pf = d.getVar('PF', True) + dest = os.path.join(sources_dir, pf) + shutil.rmtree(dest, ignore_errors=True) + bb.utils.mkdirhier(dest) + + for u in ud.values(): + local = os.path.normpath(fetch.localpath(u.url)) + if local.endswith('.bb'): + continue + elif local.endswith('/'): + local = local[:-1] + + if u.mirrortarball: + tarball_path = os.path.join(dl_dir, u.mirrortarball) + if os.path.exists(tarball_path): + local = tarball_path + + oe.path.symlink(local, os.path.join(dest, os.path.basename(local)), force=True) + + patches = src_patches(d) + for patch in patches: + _, _, local, _, _, parm = bb.fetch.decodeurl(patch) + patchdir = parm.get('patchdir') + if patchdir: + series = os.path.join(dest, 'series.subdir.%s' % patchdir.replace('/', '_')) + else: + series = os.path.join(dest, 'series') + + with open(series, 'a') as s: + s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel'])) +} + +addtask prepare_copyleft_sources after do_fetch before do_build +do_prepare_copyleft_sources[dirs] = "${WORKDIR}" +do_build[recrdeptask] += 'do_prepare_copyleft_sources' diff --git a/meta/classes/copyleft_filter.bbclass b/meta/classes/copyleft_filter.bbclass new file mode 100644 index 0000000000..2c1d8f1c90 --- /dev/null +++ b/meta/classes/copyleft_filter.bbclass @@ -0,0 +1,62 @@ +# Filter the license, the copyleft_should_include returns True for the +# COPYLEFT_LICENSE_INCLUDE recipe, and False for the +# COPYLEFT_LICENSE_EXCLUDE. +# +# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary. +# +# vi:sts=4:sw=4:et + +COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL*' +COPYLEFT_LICENSE_INCLUDE[type] = 'list' +COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which include licenses' + +COPYLEFT_LICENSE_EXCLUDE ?= 'CLOSED Proprietary' +COPYLEFT_LICENSE_EXCLUDE[type] = 'list' +COPYLEFT_LICENSE_EXCLUDE[doc] = 'Space separated list of globs which exclude licenses' + +COPYLEFT_RECIPE_TYPE ?= '${@copyleft_recipe_type(d)}' +COPYLEFT_RECIPE_TYPE[doc] = 'The "type" of the current recipe (e.g. target, native, cross)' + +COPYLEFT_RECIPE_TYPES ?= 'target' +COPYLEFT_RECIPE_TYPES[type] = 'list' +COPYLEFT_RECIPE_TYPES[doc] = 'Space separated list of recipe types to include' + +COPYLEFT_AVAILABLE_RECIPE_TYPES = 'target native nativesdk cross crosssdk cross-canadian' +COPYLEFT_AVAILABLE_RECIPE_TYPES[type] = 'list' +COPYLEFT_AVAILABLE_RECIPE_TYPES[doc] = 'Space separated list of available recipe types' + +def copyleft_recipe_type(d): + for recipe_type in oe.data.typed_value('COPYLEFT_AVAILABLE_RECIPE_TYPES', d): + if oe.utils.inherits(d, recipe_type): + return recipe_type + return 'target' + +def copyleft_should_include(d): + """ + Determine if this recipe's sources should be deployed for compliance + """ + import ast + import oe.license + from fnmatch import fnmatchcase as fnmatch + + recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE', True) + if recipe_type not in oe.data.typed_value('COPYLEFT_RECIPE_TYPES', d): + return False, 'recipe type "%s" is excluded' % recipe_type + + include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d) + exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d) + + try: + is_included, reason = oe.license.is_included(d.getVar('LICENSE', True), include, exclude) + except oe.license.LicenseError as exc: + bb.fatal('%s: %s' % (d.getVar('PF', True), exc)) + else: + if is_included: + if reason: + return True, 'recipe has included licenses: %s' % ', '.join(reason) + else: + return False, 'recipe does not include a copyleft license' + else: + return False, 'recipe has excluded licenses: %s' % ', '.join(reason) + + diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass new file mode 100644 index 0000000000..62363fb334 --- /dev/null +++ b/meta/classes/core-image.bbclass @@ -0,0 +1,80 @@ +# Common code for generating core reference images +# +# Copyright (C) 2007-2011 Linux Foundation + +LIC_FILES_CHKSUM = "file://${COREBASE}/LICENSE;md5=4d92cd373abda3937c2bc47fbc49d690 \ + file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420" + +# IMAGE_FEATURES control content of the core reference images +# +# By default we install packagegroup-core-boot and packagegroup-base-extended packages; +# this gives us working (console only) rootfs. +# +# Available IMAGE_FEATURES: +# +# - x11 - X server +# - x11-base - X server with minimal environment +# - x11-sato - OpenedHand Sato environment +# - tools-debug - debugging tools +# - eclipse-debug - Eclipse remote debugging support +# - tools-profile - profiling tools +# - tools-testapps - tools usable to make some device tests +# - tools-sdk - SDK (C/C++ compiler, autotools, etc.) +# - nfs-server - NFS server +# - ssh-server-dropbear - SSH server (dropbear) +# - ssh-server-openssh - SSH server (openssh) +# - qt4-pkgs - Qt4/X11 and demo applications +# - hwcodecs - Install hardware acceleration codecs +# - package-management - installs package management tools and preserves the package manager database +# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins +# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs +# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs +# - doc-pkgs - documentation packages for all installed packages in the rootfs +# - ptest-pkgs - ptest packages for all ptest-enabled recipes +# - read-only-rootfs - tweaks an image to support read-only rootfs +# +FEATURE_PACKAGES_x11 = "packagegroup-core-x11" +FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base" +FEATURE_PACKAGES_x11-sato = "packagegroup-core-x11-sato" +FEATURE_PACKAGES_tools-debug = "packagegroup-core-tools-debug" +FEATURE_PACKAGES_eclipse-debug = "packagegroup-core-eclipse-debug" +FEATURE_PACKAGES_tools-profile = "packagegroup-core-tools-profile" +FEATURE_PACKAGES_tools-testapps = "packagegroup-core-tools-testapps" +FEATURE_PACKAGES_tools-sdk = "packagegroup-core-sdk packagegroup-core-standalone-sdk-target" +FEATURE_PACKAGES_nfs-server = "packagegroup-core-nfs-server" +FEATURE_PACKAGES_ssh-server-dropbear = "packagegroup-core-ssh-dropbear" +FEATURE_PACKAGES_ssh-server-openssh = "packagegroup-core-ssh-openssh" +FEATURE_PACKAGES_qt4-pkgs = "packagegroup-core-qt-demoapps" +FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}" + + +# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2' +# Including image feature foo would replace the image features bar1 and bar2 +IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear" + +# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2' +# An error exception would be raised if both image features foo and bar1(or bar2) are included + +MACHINE_HWCODECS ??= "" + +CORE_IMAGE_BASE_INSTALL = '\ + packagegroup-core-boot \ + packagegroup-base-extended \ + \ + ${CORE_IMAGE_EXTRA_INSTALL} \ + ' + +CORE_IMAGE_EXTRA_INSTALL ?= "" + +IMAGE_INSTALL ?= "${CORE_IMAGE_BASE_INSTALL}" + +inherit image + +# Create /etc/timestamp during image construction to give a reasonably sane default time setting +ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; " + +# Zap the root password if debug-tweaks feature is not enabled +ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "debug-tweaks", "", "zap_empty_root_password ; ",d)}' + +# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled +ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}' diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass new file mode 100644 index 0000000000..d9817ba6b6 --- /dev/null +++ b/meta/classes/cpan-base.bbclass @@ -0,0 +1,55 @@ +# +# cpan-base providers various perl related information needed for building +# cpan modules +# +FILES_${PN} += "${libdir}/perl ${datadir}/perl" + +DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}" +RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}" + +PERL_OWN_DIR = "${@["", "/perl-native"][(bb.data.inherits_class('native', d))]}" + +# Determine the staged version of perl from the perl configuration file +# Assign vardepvalue, because otherwise signature is changed before and after +# perl is built (from None to real version in config.sh). +get_perl_version[vardepvalue] = "${PERL_OWN_DIR}" +def get_perl_version(d): + import re + cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh') + try: + f = open(cfg, 'r') + except IOError: + return None + l = f.readlines(); + f.close(); + r = re.compile("^version='(\d*\.\d*\.\d*)'") + for s in l: + m = r.match(s) + if m: + return m.group(1) + return None + +# Determine where the library directories are +def perl_get_libdirs(d): + libdir = d.getVar('libdir', True) + if is_target(d) == "no": + libdir += '/perl-native' + libdir += '/perl' + return libdir + +def is_target(d): + if not bb.data.inherits_class('native', d): + return "yes" + return "no" + +PERLLIBDIRS := "${@perl_get_libdirs(d)}" +PERLVERSION := "${@get_perl_version(d)}" +PERLVERSION[vardepvalue] = "" + +FILES_${PN}-dbg += "${PERLLIBDIRS}/auto/*/.debug \ + ${PERLLIBDIRS}/auto/*/*/.debug \ + ${PERLLIBDIRS}/auto/*/*/*/.debug \ + ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/.debug \ + ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/.debug \ + ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/*/.debug \ + " diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass new file mode 100644 index 0000000000..e2bbd2f63a --- /dev/null +++ b/meta/classes/cpan.bbclass @@ -0,0 +1,55 @@ +# +# This is for perl modules that use the old Makefile.PL build system +# +inherit cpan-base perlnative + +EXTRA_CPANFLAGS ?= "" +EXTRA_PERLFLAGS ?= "" + +# Env var which tells perl if it should use host (no) or target (yes) settings +export PERLCONFIGTARGET = "${@is_target(d)}" + +# Env var which tells perl where the perl include files are +export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}/CORE" +export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}" +export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}" +export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/" + +cpan_do_configure () { + export PERL5LIB="${PERL_ARCHLIB}" + yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL ${EXTRA_CPANFLAGS} + + # Makefile.PLs can exit with success without generating a + # Makefile, e.g. in cases of missing configure time + # dependencies. This is considered a best practice by + # cpantesters.org. See: + # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes + # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html + [ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL" + + if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then + . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh + # Use find since there can be a Makefile generated for each Makefile.PL + for f in `find -name Makefile.PL`; do + f2=`echo $f | sed -e 's/.PL//'` + test -f $f2 || continue + sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \ + -e 's/perl.real/perl/' \ + -e "s|^\(CCFLAGS =.*\)|\1 ${CFLAGS}|" \ + $f2 + done + fi +} + +cpan_do_compile () { + oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}" +} + +cpan_do_install () { + oe_runmake DESTDIR="${D}" install_vendor + for PERLSCRIPT in `grep -rIEl '#! *${bindir}/perl-native.*/perl' ${D}`; do + sed -i -e 's|${bindir}/perl-native.*/perl|/usr/bin/env nativeperl|' $PERLSCRIPT + done +} + +EXPORT_FUNCTIONS do_configure do_compile do_install diff --git a/meta/classes/cpan_build.bbclass b/meta/classes/cpan_build.bbclass new file mode 100644 index 0000000000..2eb8162314 --- /dev/null +++ b/meta/classes/cpan_build.bbclass @@ -0,0 +1,53 @@ +# +# This is for perl modules that use the new Build.PL build system +# +inherit cpan-base perlnative + +EXTRA_CPAN_BUILD_FLAGS ?= "" + +# Env var which tells perl if it should use host (no) or target (yes) settings +export PERLCONFIGTARGET = "${@is_target(d)}" +export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}" +export LD = "${CCLD}" + +# +# We also need to have built libmodule-build-perl-native for +# everything except libmodule-build-perl-native itself (which uses +# this class, but uses itself as the provider of +# libmodule-build-perl) +# +def cpan_build_dep_prepend(d): + if d.getVar('CPAN_BUILD_DEPS', True): + return '' + pn = d.getVar('PN', True) + if pn in ['libmodule-build-perl', 'libmodule-build-perl-native']: + return '' + return 'libmodule-build-perl-native ' + +DEPENDS_prepend = "${@cpan_build_dep_prepend(d)}" + +cpan_build_do_configure () { + if [ "${@is_target(d)}" = "yes" ]; then + # build for target + . ${STAGING_LIBDIR}/perl/config.sh + fi + + perl Build.PL --installdirs vendor \ + --destdir ${D} \ + --install_path arch="${libdir}/perl" \ + --install_path script=${bindir} \ + --install_path bin=${bindir} \ + --install_path bindoc=${mandir}/man1 \ + --install_path libdoc=${mandir}/man3 \ + ${EXTRA_CPAN_BUILD_FLAGS} +} + +cpan_build_do_compile () { + perl Build +} + +cpan_build_do_install () { + perl Build install +} + +EXPORT_FUNCTIONS do_configure do_compile do_install diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass new file mode 100644 index 0000000000..a8565e91e3 --- /dev/null +++ b/meta/classes/cross-canadian.bbclass @@ -0,0 +1,142 @@ +# +# NOTE - When using this class the user is repsonsible for ensuring that +# TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH +# is changed, another nativesdk xxx-canadian-cross can be installed +# + + +# SDK packages are built either explicitly by the user, +# or indirectly via dependency. No need to be in 'world'. +EXCLUDE_FROM_WORLD = "1" +CLASSOVERRIDE = "class-cross-canadian" +STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}" + +# +# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS +# +PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}" +CANADIANEXTRAOS = "" +MODIFYTOS ??= "1" +python () { + archs = d.getVar('PACKAGE_ARCHS', True).split() + sdkarchs = [] + for arch in archs: + sdkarchs.append(arch + '-${SDKPKGSUFFIX}') + d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs)) + + # Allow the following code segment to be disabled, e.g. meta-environment + if d.getVar("MODIFYTOS", True) != "1": + return + # PowerPC can build "linux" and "linux-gnuspe" + tarch = d.getVar("TARGET_ARCH", True) + if tarch == "powerpc": + tos = d.getVar("TARGET_OS", True) + if (tos != "linux" and tos != "linux-gnuspe" + and tos != "linux-uclibc" and tos != "linux-uclibcspe" + and tos != "linux-musl" and tos != "linux-muslspe"): + bb.fatal("Building cross-candian powerpc for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS", True)) + # This is a bit ugly. We need to zero LIBC/ABI extension which will change TARGET_OS + # however we need the old value in some variables. We expand those here first. + d.setVar("DEPENDS", d.getVar("DEPENDS", True)) + d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN", True)) + for prefix in ["AR", "AS", "DLLTOOL", "CC", "CXX", "GCC", "LD", "LIPO", "NM", "OBJDUMP", "RANLIB", "STRIP", "WINDRES"]: + n = prefix + "_FOR_TARGET" + d.setVar(n, d.getVar(n, True)) + + d.setVar("LIBCEXTENSION", "") + d.setVar("ABIEXTENSION", "") + d.setVar("CANADIANEXTRAOS", "linux-gnuspe") +} +MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}" + +INHIBIT_DEFAULT_DEPS = "1" + +STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}" + +TOOLCHAIN_OPTIONS = " --sysroot=${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}" + +PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}" +PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/" + +HOST_ARCH = "${SDK_ARCH}" +HOST_VENDOR = "${SDK_VENDOR}" +HOST_OS = "${SDK_OS}" +HOST_PREFIX = "${SDK_PREFIX}" +HOST_CC_ARCH = "${SDK_CC_ARCH}" +HOST_LD_ARCH = "${SDK_LD_ARCH}" +HOST_AS_ARCH = "${SDK_AS_ARCH}" + +#assign DPKG_ARCH +DPKG_ARCH = "${SDK_ARCH}" + +CPPFLAGS = "${BUILDSDK_CPPFLAGS}" +CFLAGS = "${BUILDSDK_CFLAGS}" +CXXFLAGS = "${BUILDSDK_CFLAGS}" +LDFLAGS = "${BUILDSDK_LDFLAGS} \ + -Wl,-rpath-link,${STAGING_LIBDIR}/.. \ + -Wl,-rpath,${libdir}/.. " + +DEPENDS_GETTEXT = "gettext-native nativesdk-gettext" + +# +# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit +# binaries +# +DEPENDS_append = " chrpath-replacement-native" +EXTRANATIVEPATH += "chrpath-native" + +# Path mangling needed by the cross packaging +# Note that we use := here to ensure that libdir and includedir are +# target paths. +target_base_prefix := "${base_prefix}" +target_prefix := "${prefix}" +target_exec_prefix := "${exec_prefix}" +target_base_libdir = "${target_base_prefix}/${baselib}" +target_libdir = "${target_exec_prefix}/${baselib}" +target_includedir := "${includedir}" + +# Change to place files in SDKPATH +base_prefix = "${SDKPATHNATIVE}" +prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" +exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" +bindir = "${exec_prefix}/bin/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}" +sbindir = "${bindir}" +base_bindir = "${bindir}" +base_sbindir = "${bindir}" +libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}" +libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}" + +FILES_${PN} = "${prefix}" +FILES_${PN}-dbg += "${prefix}/.debug \ + ${prefix}/bin/.debug \ + " + +export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig" +export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}" + +do_populate_sysroot[stamp-extra-info] = "" +do_packagedata[stamp-extra-info] = "" + +USE_NLS = "${SDKUSE_NLS}" + +# We have to us TARGET_ARCH but we care about the absolute value +# and not any particular tune that is enabled. +TARGET_ARCH[vardepsexclude] = "TUNE_ARCH" + +# If MLPREFIX is set by multilib code, shlibs +# points to the wrong place so force it +SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2" +SHLIBSWORKDIR = "${PKGDATA_DIR}/nativesdk-shlibs2" + +cross_canadian_bindirlinks () { + for i in ${CANADIANEXTRAOS} + do + d=${D}${bindir}/../${TARGET_ARCH}${TARGET_VENDOR}-$i + install -d $d + for j in `ls ${D}${bindir}` + do + p=${TARGET_ARCH}${TARGET_VENDOR}-$i-`echo $j | sed -e s,${TARGET_PREFIX},,` + ln -s ../${TARGET_SYS}/$j $d/$p + done + done +} diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass new file mode 100644 index 0000000000..28fd2116c0 --- /dev/null +++ b/meta/classes/cross.bbclass @@ -0,0 +1,75 @@ +inherit relocatable + +# Cross packages are built indirectly via dependency, +# no need for them to be a direct target of 'world' +EXCLUDE_FROM_WORLD = "1" + +CLASSOVERRIDE = "class-cross" +PACKAGES = "" +PACKAGES_DYNAMIC = "" +PACKAGES_DYNAMIC_class-native = "" + +HOST_ARCH = "${BUILD_ARCH}" +HOST_VENDOR = "${BUILD_VENDOR}" +HOST_OS = "${BUILD_OS}" +HOST_PREFIX = "${BUILD_PREFIX}" +HOST_CC_ARCH = "${BUILD_CC_ARCH}" +HOST_LD_ARCH = "${BUILD_LD_ARCH}" +HOST_AS_ARCH = "${BUILD_AS_ARCH}" + +STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}" + +PACKAGE_ARCH = "${BUILD_ARCH}" + +export PKG_CONFIG_DIR = "${exec_prefix}/lib/pkgconfig" +export PKG_CONFIG_SYSROOT_DIR = "" + +CPPFLAGS = "${BUILD_CPPFLAGS}" +CFLAGS = "${BUILD_CFLAGS}" +CXXFLAGS = "${BUILD_CFLAGS}" +LDFLAGS = "${BUILD_LDFLAGS}" +LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE}" + +TOOLCHAIN_OPTIONS = "" + +DEPENDS_GETTEXT = "gettext-native" + +# Path mangling needed by the cross packaging +# Note that we use := here to ensure that libdir and includedir are +# target paths. +target_base_prefix := "${base_prefix}" +target_prefix := "${prefix}" +target_exec_prefix := "${exec_prefix}" +target_base_libdir = "${target_base_prefix}/${baselib}" +target_libdir = "${target_exec_prefix}/${baselib}" +target_includedir := "${includedir}" + +# Overrides for paths +CROSS_TARGET_SYS_DIR = "${TARGET_SYS}" +prefix = "${STAGING_DIR_NATIVE}${prefix_native}" +base_prefix = "${STAGING_DIR_NATIVE}" +exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}" +bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}" +sbindir = "${bindir}" +base_bindir = "${bindir}" +base_sbindir = "${bindir}" +libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}" +libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}" + +do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/" +do_populate_sysroot[stamp-extra-info] = "" +do_packagedata[stamp-extra-info] = "" + +do_install () { + oe_runmake 'DESTDIR=${D}' install +} + +USE_NLS = "no" + +deltask package +deltask packagedata +deltask package_write_ipk +deltask package_write_deb +deltask package_write_rpm +deltask package_write + diff --git a/meta/classes/crosssdk.bbclass b/meta/classes/crosssdk.bbclass new file mode 100644 index 0000000000..87d5cf5d37 --- /dev/null +++ b/meta/classes/crosssdk.bbclass @@ -0,0 +1,36 @@ +inherit cross + +CLASSOVERRIDE = "class-crosssdk" +MACHINEOVERRIDES = "" +PACKAGE_ARCH = "${SDK_ARCH}" +python () { + # set TUNE_PKGARCH to SDK_ARCH + d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH', True)) +} + +STAGING_DIR_TARGET = "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}" +STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}" + +TARGET_ARCH = "${SDK_ARCH}" +TARGET_VENDOR = "${SDK_VENDOR}" +TARGET_OS = "${SDK_OS}" +TARGET_PREFIX = "${SDK_PREFIX}" +TARGET_CC_ARCH = "${SDK_CC_ARCH}" +TARGET_LD_ARCH = "${SDK_LD_ARCH}" +TARGET_AS_ARCH = "${SDK_AS_ARCH}" +TARGET_FPU = "" + +target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}" +target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}" +target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}" +target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" +target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" +baselib = "lib" + +do_populate_sysroot[stamp-extra-info] = "" +do_packagedata[stamp-extra-info] = "" + +# Need to force this to ensure consitency accross architectures +EXTRA_OECONF_GCC_FLOAT = "" + +USE_NLS = "no" diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass new file mode 100644 index 0000000000..c859703669 --- /dev/null +++ b/meta/classes/debian.bbclass @@ -0,0 +1,141 @@ +# Debian package renaming only occurs when a package is built +# We therefore have to make sure we build all runtime packages +# before building the current package to make the packages runtime +# depends are correct +# +# Custom library package names can be defined setting +# DEBIANNAME_ + pkgname to the desired name. +# +# Better expressed as ensure all RDEPENDS package before we package +# This means we can't have circular RDEPENDS/RRECOMMENDS + +AUTO_LIBNAME_PKGS = "${PACKAGES}" + +inherit package + +DEBIANRDEP = "do_packagedata" +do_package_write_ipk[rdeptask] = "${DEBIANRDEP}" +do_package_write_deb[rdeptask] = "${DEBIANRDEP}" +do_package_write_tar[rdeptask] = "${DEBIANRDEP}" +do_package_write_rpm[rdeptask] = "${DEBIANRDEP}" + +python () { + if not d.getVar("PACKAGES", True): + d.setVar("DEBIANRDEP", "") +} + +python debian_package_name_hook () { + import glob, copy, stat, errno, re + + pkgdest = d.getVar('PKGDEST', True) + packages = d.getVar('PACKAGES', True) + bin_re = re.compile(".*/s?" + os.path.basename(d.getVar("bindir", True)) + "$") + lib_re = re.compile(".*/" + os.path.basename(d.getVar("libdir", True)) + "$") + so_re = re.compile("lib.*\.so") + + def socrunch(s): + s = s.lower().replace('_', '-') + m = re.match("^(.*)(.)\.so\.(.*)$", s) + if m is None: + return None + if m.group(2) in '0123456789': + bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3)) + else: + bin = m.group(1) + m.group(2) + m.group(3) + dev = m.group(1) + m.group(2) + return (bin, dev) + + def isexec(path): + try: + s = os.stat(path) + except (os.error, AttributeError): + return 0 + return (s[stat.ST_MODE] & stat.S_IEXEC) + + def add_rprovides(pkg, d): + newpkg = d.getVar('PKG_' + pkg) + if newpkg and newpkg != pkg: + provs = (d.getVar('RPROVIDES_' + pkg, True) or "").split() + if pkg not in provs: + d.appendVar('RPROVIDES_' + pkg, " " + pkg) + + def auto_libname(packages, orig_pkg): + sonames = [] + has_bins = 0 + has_libs = 0 + for file in pkgfiles[orig_pkg]: + root = os.path.dirname(file) + if bin_re.match(root): + has_bins = 1 + if lib_re.match(root): + has_libs = 1 + if so_re.match(os.path.basename(file)): + cmd = (d.getVar('TARGET_PREFIX', True) or "") + "objdump -p " + file + " 2>/dev/null" + fd = os.popen(cmd) + lines = fd.readlines() + fd.close() + for l in lines: + m = re.match("\s+SONAME\s+([^\s]*)", l) + if m and not m.group(1) in sonames: + sonames.append(m.group(1)) + + bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames)) + soname = None + if len(sonames) == 1: + soname = sonames[0] + elif len(sonames) > 1: + lead = d.getVar('LEAD_SONAME', True) + if lead: + r = re.compile(lead) + filtered = [] + for s in sonames: + if r.match(s): + filtered.append(s) + if len(filtered) == 1: + soname = filtered[0] + elif len(filtered) > 1: + bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead)) + else: + bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead)) + else: + bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames)) + + if has_libs and not has_bins and soname: + soname_result = socrunch(soname) + if soname_result: + (pkgname, devname) = soname_result + for pkg in packages.split(): + if (d.getVar('PKG_' + pkg) or d.getVar('DEBIAN_NOAUTONAME_' + pkg)): + add_rprovides(pkg, d) + continue + debian_pn = d.getVar('DEBIANNAME_' + pkg) + if debian_pn: + newpkg = debian_pn + elif pkg == orig_pkg: + newpkg = pkgname + else: + newpkg = pkg.replace(orig_pkg, devname, 1) + mlpre=d.getVar('MLPREFIX', True) + if mlpre: + if not newpkg.find(mlpre) == 0: + newpkg = mlpre + newpkg + if newpkg != pkg: + d.setVar('PKG_' + pkg, newpkg) + add_rprovides(pkg, d) + else: + add_rprovides(orig_pkg, d) + + # reversed sort is needed when some package is substring of another + # ie in ncurses we get without reverse sort: + # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5 + # and later + # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw + # so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5 + for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS', True) or "").split(), reverse=True): + auto_libname(packages, pkg) +} + +EXPORT_FUNCTIONS package_name_hook + +DEBIAN_NAMES = "1" + diff --git a/meta/classes/deploy.bbclass b/meta/classes/deploy.bbclass new file mode 100644 index 0000000000..78f5e4a7ba --- /dev/null +++ b/meta/classes/deploy.bbclass @@ -0,0 +1,10 @@ +DEPLOYDIR = "${WORKDIR}/deploy-${PN}" +SSTATETASKS += "do_deploy" +do_deploy[sstate-inputdirs] = "${DEPLOYDIR}" +do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}" + +python do_deploy_setscene () { + sstate_setscene(d) +} +addtask do_deploy_setscene +do_deploy[dirs] = "${DEPLOYDIR} ${B}" diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass new file mode 100644 index 0000000000..41164a3f33 --- /dev/null +++ b/meta/classes/devshell.bbclass @@ -0,0 +1,154 @@ +inherit terminal + +DEVSHELL = "${SHELL}" + +python do_devshell () { + if d.getVarFlag("do_devshell", "manualfakeroot"): + d.prependVar("DEVSHELL", "pseudo ") + fakeenv = d.getVar("FAKEROOTENV", True).split() + for f in fakeenv: + k = f.split("=") + d.setVar(k[0], k[1]) + d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0]) + d.delVarFlag("do_devshell", "fakeroot") + + oe_terminal(d.getVar('DEVSHELL', True), 'OpenEmbedded Developer Shell', d) +} + +addtask devshell after do_patch + +do_devshell[dirs] = "${S}" +do_devshell[nostamp] = "1" + +# devshell and fakeroot/pseudo need careful handling since only the final +# command should run under fakeroot emulation, any X connection should +# be done as the normal user. We therfore carefully construct the envionment +# manually +python () { + if d.getVarFlag("do_devshell", "fakeroot"): + # We need to signal our code that we want fakeroot however we + # can't manipulate the environment and variables here yet (see YOCTO #4795) + d.setVarFlag("do_devshell", "manualfakeroot", "1") + d.delVarFlag("do_devshell", "fakeroot") +} + +def devpyshell(d): + + import code + import select + import signal + import termios + + m, s = os.openpty() + sname = os.ttyname(s) + + def noechoicanon(fd): + old = termios.tcgetattr(fd) + old[3] = old[3] &~ termios.ECHO &~ termios.ICANON + # &~ termios.ISIG + termios.tcsetattr(fd, termios.TCSADRAIN, old) + + # No echo or buffering over the pty + noechoicanon(s) + + pid = os.fork() + if pid: + os.close(m) + oe_terminal("oepydevshell-internal.py %s %d" % (sname, pid), 'OpenEmbedded Developer PyShell', d) + os._exit(0) + else: + os.close(s) + + os.dup2(m, sys.stdin.fileno()) + os.dup2(m, sys.stdout.fileno()) + os.dup2(m, sys.stderr.fileno()) + + sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) + sys.stdin = os.fdopen(sys.stdin.fileno(), 'r', 0) + + bb.utils.nonblockingfd(sys.stdout) + bb.utils.nonblockingfd(sys.stderr) + bb.utils.nonblockingfd(sys.stdin) + + _context = { + "os": os, + "bb": bb, + "time": time, + "d": d, + } + + ps1 = "pydevshell> " + ps2 = "... " + buf = [] + more = False + + i = code.InteractiveInterpreter(locals=_context) + print("OE PyShell (PN = %s)\n" % d.getVar("PN", True)) + + def prompt(more): + if more: + prompt = ps2 + else: + prompt = ps1 + sys.stdout.write(prompt) + + # Restore Ctrl+C since bitbake masks this + def signal_handler(signal, frame): + raise KeyboardInterrupt + signal.signal(signal.SIGINT, signal_handler) + + child = None + + prompt(more) + while True: + try: + try: + (r, _, _) = select.select([sys.stdin], [], [], 1) + if not r: + continue + line = sys.stdin.readline().strip() + if not line: + prompt(more) + continue + except EOFError as e: + sys.stdout.write("\n") + except (OSError, IOError) as e: + if e.errno == 11: + continue + if e.errno == 5: + return + raise + else: + if not child: + child = int(line) + continue + buf.append(line) + source = "\n".join(buf) + more = i.runsource(source, "") + if not more: + buf = [] + prompt(more) + except KeyboardInterrupt: + i.write("\nKeyboardInterrupt\n") + buf = [] + more = False + prompt(more) + except SystemExit: + # Easiest way to ensure everything exits + os.kill(child, signal.SIGTERM) + break + +python do_devpyshell() { + import signal + + try: + devpyshell(d) + except SystemExit: + # Stop the SIGTERM above causing an error exit code + return + finally: + return +} +addtask devpyshell after do_patch + +do_devpyshell[nostamp] = "1" diff --git a/meta/classes/distro_features_check.bbclass b/meta/classes/distro_features_check.bbclass new file mode 100644 index 0000000000..1f1d6fba37 --- /dev/null +++ b/meta/classes/distro_features_check.bbclass @@ -0,0 +1,28 @@ +# Allow checking of required and conflicting DISTRO_FEATURES +# +# REQUIRED_DISTRO_FEATURES: ensure every item on this list is included +# in DISTRO_FEATURES. +# CONFLICT_DISTRO_FEATURES: ensure no item in this list is included in +# DISTRO_FEATURES. +# +# Copyright 2013 (C) O.S. Systems Software LTDA. + +python () { + required_distro_features = d.getVar('REQUIRED_DISTRO_FEATURES', True) + if required_distro_features: + required_distro_features = required_distro_features.split() + distro_features = (d.getVar('DISTRO_FEATURES', True) or "").split() + for f in required_distro_features: + if f in distro_features: + continue + else: + raise bb.parse.SkipPackage("missing required distro feature '%s' (not in DISTRO_FEATURES)" % f) + + conflict_distro_features = d.getVar('CONFLICT_DISTRO_FEATURES', True) + if conflict_distro_features: + conflict_distro_features = conflict_distro_features.split() + distro_features = (d.getVar('DISTRO_FEATURES', True) or "").split() + for f in conflict_distro_features: + if f in distro_features: + raise bb.parse.SkipPackage("conflicting distro feature '%s' (in DISTRO_FEATURES)" % f) +} diff --git a/meta/classes/distrodata.bbclass b/meta/classes/distrodata.bbclass new file mode 100644 index 0000000000..a890de7911 --- /dev/null +++ b/meta/classes/distrodata.bbclass @@ -0,0 +1,902 @@ +include conf/distro/include/package_regex.inc +addhandler distro_eventhandler +distro_eventhandler[eventmask] = "bb.event.BuildStarted" +python distro_eventhandler() { + import oe.distro_check as dc + logfile = dc.create_log_file(e.data, "distrodata.csv") + lf = bb.utils.lockfile("%s.lock" % logfile) + f = open(logfile, "a") + f.write("Package,Description,Owner,License,VerMatch,Version,Upsteam,Reason,Recipe Status,Distro 1,Distro 2,Distro 3\n") + f.close() + bb.utils.unlockfile(lf) + + return +} + +addtask distrodata_np +do_distrodata_np[nostamp] = "1" +python do_distrodata_np() { + localdata = bb.data.createCopy(d) + pn = d.getVar("PN", True) + bb.note("Package Name: %s" % pn) + + import oe.distro_check as dist_check + tmpdir = d.getVar('TMPDIR', True) + distro_check_dir = os.path.join(tmpdir, "distro_check") + datetime = localdata.getVar('DATETIME', True) + dist_check.update_distro_data(distro_check_dir, datetime) + + if pn.find("-native") != -1: + pnstripped = pn.split("-native") + bb.note("Native Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + if pn.find("-cross") != -1: + pnstripped = pn.split("-cross") + bb.note("cross Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + if pn.find("-crosssdk") != -1: + pnstripped = pn.split("-crosssdk") + bb.note("cross Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + if pn.startswith("nativesdk-"): + pnstripped = pn.replace("nativesdk-", "") + bb.note("NativeSDK Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + + if pn.find("-initial") != -1: + pnstripped = pn.split("-initial") + bb.note("initial Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + """generate package information from .bb file""" + pname = localdata.getVar('PN', True) + pcurver = localdata.getVar('PV', True) + pdesc = localdata.getVar('DESCRIPTION', True) + if pdesc is not None: + pdesc = pdesc.replace(',','') + pdesc = pdesc.replace('\n','') + + pgrp = localdata.getVar('SECTION', True) + plicense = localdata.getVar('LICENSE', True).replace(',','_') + + rstatus = localdata.getVar('RECIPE_COLOR', True) + if rstatus is not None: + rstatus = rstatus.replace(',','') + + pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True) + if pcurver == pupver: + vermatch="1" + else: + vermatch="0" + noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True) + if noupdate_reason is None: + noupdate="0" + else: + noupdate="1" + noupdate_reason = noupdate_reason.replace(',','') + + maintainer = localdata.getVar('RECIPE_MAINTAINER', True) + rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True) + result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata) + + bb.note("DISTRO: %s,%s,%s,%s,%s,%s,%s,%s,%s\n" % \ + (pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus)) + line = pn + for i in result: + line = line + "," + i + bb.note("%s\n" % line) +} + +addtask distrodata +do_distrodata[nostamp] = "1" +python do_distrodata() { + logpath = d.getVar('LOG_DIR', True) + bb.utils.mkdirhier(logpath) + logfile = os.path.join(logpath, "distrodata.csv") + + import oe.distro_check as dist_check + localdata = bb.data.createCopy(d) + tmpdir = d.getVar('TMPDIR', True) + distro_check_dir = os.path.join(tmpdir, "distro_check") + datetime = localdata.getVar('DATETIME', True) + dist_check.update_distro_data(distro_check_dir, datetime) + + pn = d.getVar("PN", True) + bb.note("Package Name: %s" % pn) + + if pn.find("-native") != -1: + pnstripped = pn.split("-native") + bb.note("Native Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + if pn.startswith("nativesdk-"): + pnstripped = pn.replace("nativesdk-", "") + bb.note("NativeSDK Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + if pn.find("-cross") != -1: + pnstripped = pn.split("-cross") + bb.note("cross Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + if pn.find("-crosssdk") != -1: + pnstripped = pn.split("-crosssdk") + bb.note("cross Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + if pn.find("-initial") != -1: + pnstripped = pn.split("-initial") + bb.note("initial Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + """generate package information from .bb file""" + pname = localdata.getVar('PN', True) + pcurver = localdata.getVar('PV', True) + pdesc = localdata.getVar('DESCRIPTION', True) + if pdesc is not None: + pdesc = pdesc.replace(',','') + pdesc = pdesc.replace('\n','') + + pgrp = localdata.getVar('SECTION', True) + plicense = localdata.getVar('LICENSE', True).replace(',','_') + + rstatus = localdata.getVar('RECIPE_COLOR', True) + if rstatus is not None: + rstatus = rstatus.replace(',','') + + pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True) + if pcurver == pupver: + vermatch="1" + else: + vermatch="0" + + noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True) + if noupdate_reason is None: + noupdate="0" + else: + noupdate="1" + noupdate_reason = noupdate_reason.replace(',','') + + maintainer = localdata.getVar('RECIPE_MAINTAINER', True) + rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True) + # do the comparison + result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata) + + lf = bb.utils.lockfile("%s.lock" % logfile) + f = open(logfile, "a") + f.write("%s,%s,%s,%s,%s,%s,%s,%s,%s" % \ + (pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus)) + line = "" + for i in result: + line = line + "," + i + f.write(line + "\n") + f.close() + bb.utils.unlockfile(lf) +} + +addtask distrodataall after do_distrodata +do_distrodataall[recrdeptask] = "do_distrodataall do_distrodata" +do_distrodataall[recideptask] = "do_${BB_DEFAULT_TASK}" +do_distrodataall[nostamp] = "1" +do_distrodataall() { + : +} + +addhandler checkpkg_eventhandler +checkpkg_eventhandler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted" +python checkpkg_eventhandler() { + def parse_csv_file(filename): + package_dict = {} + fd = open(filename, "r") + lines = fd.read().rsplit("\n") + fd.close() + + first_line = '' + index = 0 + for line in lines: + #Skip the first line + if index == 0: + first_line = line + index += 1 + continue + elif line == '': + continue + index += 1 + package_name = line.rsplit("\t")[0] + if '-native' in package_name or 'nativesdk-' in package_name: + original_name = package_name.rsplit('-native')[0] + if original_name == '': + original_name = package_name.rsplit('nativesdk-')[0] + if original_name in package_dict: + continue + else: + package_dict[package_name] = line + else: + new_name = package_name + "-native" + if not(new_name in package_dict): + new_name = 'nativesdk-' + package_name + if new_name in package_dict: + del package_dict[new_name] + package_dict[package_name] = line + + fd = open(filename, "w") + fd.write("%s\n"%first_line) + for el in package_dict: + fd.write(package_dict[el] + "\n") + fd.close() + + del package_dict + + if bb.event.getName(e) == "BuildStarted": + import oe.distro_check as dc + logfile = dc.create_log_file(e.data, "checkpkg.csv") + + lf = bb.utils.lockfile("%s.lock" % logfile) + f = open(logfile, "a") + f.write("Package\tVersion\tUpver\tLicense\tSection\tHome\tRelease\tDepends\tBugTracker\tPE\tDescription\tStatus\tTracking\tURI\tMAINTAINER\tNoUpReason\n") + f.close() + bb.utils.unlockfile(lf) + elif bb.event.getName(e) == "BuildCompleted": + import os + filename = "tmp/log/checkpkg.csv" + if os.path.isfile(filename): + lf = bb.utils.lockfile("%s.lock"%filename) + parse_csv_file(filename) + bb.utils.unlockfile(lf) + return +} + +addtask checkpkg +do_checkpkg[nostamp] = "1" +python do_checkpkg() { + localdata = bb.data.createCopy(d) + import re + import tempfile + import subprocess + + """ + sanity check to ensure same name and type. Match as many patterns as possible + such as: + gnome-common-2.20.0.tar.gz (most common format) + gtk+-2.90.1.tar.gz + xf86-input-synaptics-12.6.9.tar.gz + dri2proto-2.3.tar.gz + blktool_4.orig.tar.gz + libid3tag-0.15.1b.tar.gz + unzip552.tar.gz + icu4c-3_6-src.tgz + genext2fs_1.3.orig.tar.gz + gst-fluendo-mp3 + """ + prefix1 = "[a-zA-Z][a-zA-Z0-9]*([\-_][a-zA-Z]\w+)*\+?[\-_]" # match most patterns which uses "-" as separator to version digits + prefix2 = "[a-zA-Z]+" # a loose pattern such as for unzip552.tar.gz + prefix3 = "[0-9]+[\-]?[a-zA-Z]+" # a loose pattern such as for 80325-quicky-0.4.tar.gz + prefix = "(%s|%s|%s)" % (prefix1, prefix2, prefix3) + ver_regex = "(([A-Z]*\d+[a-zA-Z]*[\.\-_]*)+)"#"((\d+[\.\-_[a-z]])+)" + # src.rpm extension was added only for rpm package. Can be removed if the rpm + # packaged will always be considered as having to be manually upgraded + suffix = "(tar\.gz|tgz|tar\.bz2|tar\.lz4|zip|xz|rpm|bz2|lz4|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)" + + suffixtuple = ("tar.gz", "tgz", "zip", "tar.bz2", "tar.xz", "tar.lz4", "bz2", "lz4", "orig.tar.gz", "src.tar.gz", "src.rpm", "src.tgz", "svnr\d+.tar.bz2", "stable.tar.gz", "src.rpm") + sinterstr = "(?P%s?)v?(?P%s)(\-source)?" % (prefix, ver_regex) + sdirstr = "(?P%s)\.?v?(?P%s)(\-source)?[\.\-](?P%s$)" % (prefix, ver_regex, suffix) + + def parse_inter(s): + m = re.search(sinterstr, s) + if not m: + return None + else: + return (m.group('name'), m.group('ver'), "") + + def parse_dir(s): + m = re.search(sdirstr, s) + if not m: + return None + else: + return (m.group('name'), m.group('ver'), m.group('type')) + + def modelate_version(version): + if version[0] in ['.', '-']: + if version[1].isdigit(): + version = version[1] + version[0] + version[2:len(version)] + else: + version = version[1:len(version)] + + version = re.sub('\-', '.', version) + version = re.sub('_', '.', version) + version = re.sub('(rc)+', '.-1.', version) + version = re.sub('(alpha)+', '.-3.', version) + version = re.sub('(beta)+', '.-2.', version) + if version[0] == 'v': + version = version[1:len(version)] + return version + + """ + Check whether 'new' is newer than 'old' version. We use existing vercmp() for the + purpose. PE is cleared in comparison as it's not for build, and PV is cleared too + for simplicity as it's somehow difficult to get from various upstream format + """ + def __vercmp(old, new): + (on, ov, ot) = old + (en, ev, et) = new + if on != en or (et and et not in suffixtuple): + return False + ov = modelate_version(ov) + ev = modelate_version(ev) + + result = bb.utils.vercmp(("0", ov, ""), ("0", ev, "")) + if result < 0: + return True + else: + return False + + """ + wrapper for fetch upstream directory info + 'url' - upstream link customized by regular expression + 'd' - database + 'tmpf' - tmpfile for fetcher output + We don't want to exit whole build due to one recipe error. So handle all exceptions + gracefully w/o leaking to outer. + """ + def internal_fetch_wget(url, ud, d, tmpf): + status = "ErrFetchUnknown" + + agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12" + fetchcmd = "/usr/bin/env wget -t 1 --passive-ftp -O %s --user-agent=\"%s\" '%s'" % (tmpf.name, agent, url) + try: + fetcher = bb.fetch2.wget.Wget(d) + fetcher._runwget(ud, d, fetchcmd, True) + status = "SUCC" + except bb.fetch2.BBFetchException, e: + status = "ErrFetch" + + return status + + """ + Check on middle version directory such as "2.4/" in "http://xxx/2.4/pkg-2.4.1.tar.gz", + 'url' - upstream link customized by regular expression + 'd' - database + 'curver' - current version + Return new version if success, or else error in "Errxxxx" style + """ + def check_new_dir(url, curver, ud, d): + pn = d.getVar('PN', True) + f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-1-" % pn) + status = internal_fetch_wget(url, ud, d, f) + fhtml = f.read() + if status == "SUCC" and len(fhtml): + newver = parse_inter(curver) + + """ + match "*4.1/">*4.1/ where '*' matches chars + N.B. add package name, only match for digits + """ + regex = d.getVar('REGEX', True) + if regex == '': + regex = "^%s" %prefix + m = re.search("^%s" % regex, curver) + if m: + s = "%s[^\d\"]*?(\d+[\.\-_])+\d+/?" % m.group() + else: + s = "(\d+[\.\-_])+\d+/?" + + searchstr = "[hH][rR][eE][fF]=\"%s\">" % s + + reg = re.compile(searchstr) + valid = 0 + for line in fhtml.split("\n"): + if line.find(curver) >= 0: + valid = 1 + m = reg.search(line) + if m: + ver = m.group().split("\"")[1] + ver = ver.strip("/") + ver = parse_inter(ver) + if ver and __vercmp(newver, ver) == True: + newver = ver + + """Expect a match for curver in directory list, or else it indicates unknown format""" + if not valid: + status = "ErrParseInterDir" + else: + """rejoin the path name""" + status = newver[0] + newver[1] + elif not len(fhtml): + status = "ErrHostNoDir" + + f.close() + if status != "ErrHostNoDir" and re.match("Err", status): + logpath = d.getVar('LOG_DIR', True) + subprocess.call("cp %s %s/" % (f.name, logpath), shell=True) + os.unlink(f.name) + return status + + """ + Check on the last directory to search '2.4.1' in "http://xxx/2.4/pkg-2.4.1.tar.gz", + 'url' - upstream link customized by regular expression + 'd' - database + 'curname' - current package name + Return new version if success, or else error in "Errxxxx" style + """ + def check_new_version(url, curname, ud, d): + """possible to have no version in pkg name, such as spectrum-fw""" + if not re.search("\d+", curname): + return pcurver + pn = d.getVar('PN', True) + newver_regex = d.getVar('REGEX', True) + f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-2-" % pn) + status = internal_fetch_wget(url, ud, d, f) + fhtml = f.read() + + if status == "SUCC" and len(fhtml): + newver = parse_dir(curname) + + if not newver_regex: + """this is the default matching pattern, if recipe does not """ + """provide a regex expression """ + """match "{PN}-5.21.1.tar.gz">{PN}-5.21.1.tar.gz """ + pn1 = re.search("^%s" % prefix, curname).group() + s = "[^\"]*%s[^\d\"]*?(\d+[\.\-_])+[^\"]*" % pn1 + searchstr = "[hH][rR][eE][fF]=\"%s\".*[>\"]" % s + reg = searchstr + else: + reg = newver_regex + valid = 0 + count = 0 + for line in fhtml.split("\n"): + if pn == 'kconfig-frontends': + m = re.findall(reg, line) + if m: + valid = 1 + for match in m: + (on, ov, oe) = newver + ver = (on, match[0], oe) + if ver and __vercmp(newver, ver) == True: + newver = ver + continue + count += 1 + m = re.search(reg, line) + if m: + valid = 1 + if not newver_regex: + ver = m.group().split("\"")[1].split("/")[-1] + if ver == "download": + ver = m.group().split("\"")[1].split("/")[-2] + ver = parse_dir(ver) + else: + """ we cheat a little here, but we assume that the + regular expression in the recipe will extract exacly + the version """ + (on, ov, oe) = newver + ver = (on, m.group('pver'), oe) + if ver and __vercmp(newver, ver) == True: + newver = ver + """Expect a match for curver in directory list, or else it indicates unknown format""" + if not valid: + status = "ErrParseDir" + else: + """newver still contains a full package name string""" + status = re.sub('_', '.', newver[1]) + elif not len(fhtml): + status = "ErrHostNoDir" + + f.close() + """if host hasn't directory information, no need to save tmp file""" + if status != "ErrHostNoDir" and re.match("Err", status): + logpath = d.getVar('LOG_DIR', True) + subprocess.call("cp %s %s/" % (f.name, logpath), shell=True) + os.unlink(f.name) + return status + + """first check whether a uri is provided""" + src_uri = d.getVar('SRC_URI', True) + if not src_uri: + return + + """initialize log files.""" + logpath = d.getVar('LOG_DIR', True) + bb.utils.mkdirhier(logpath) + logfile = os.path.join(logpath, "checkpkg.csv") + + """generate package information from .bb file""" + pname = d.getVar('PN', True) + + if pname.find("-native") != -1: + if d.getVar('BBCLASSEXTEND', True): + return + pnstripped = pname.split("-native") + bb.note("Native Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + if pname.startswith("nativesdk-"): + if d.getVar('BBCLASSEXTEND', True): + return + pnstripped = pname.replace("nativesdk-", "") + bb.note("NativeSDK Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + if pname.find("-cross") != -1: + pnstripped = pname.split("-cross") + bb.note("cross Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + if pname.find("-initial") != -1: + pnstripped = pname.split("-initial") + bb.note("initial Split: %s" % pnstripped) + localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True)) + bb.data.update_data(localdata) + + chk_uri = d.getVar('REGEX_URI', True) + if not chk_uri: + chk_uri = src_uri + pdesc = localdata.getVar('DESCRIPTION', True) + pgrp = localdata.getVar('SECTION', True) + if localdata.getVar('PRSPV', True): + pversion = localdata.getVar('PRSPV', True) + else: + pversion = localdata.getVar('PV', True) + plicense = localdata.getVar('LICENSE', True) + psection = localdata.getVar('SECTION', True) + phome = localdata.getVar('HOMEPAGE', True) + prelease = localdata.getVar('PR', True) + pdepends = localdata.getVar('DEPENDS', True) + pbugtracker = localdata.getVar('BUGTRACKER', True) + ppe = localdata.getVar('PE', True) + psrcuri = localdata.getVar('SRC_URI', True) + maintainer = localdata.getVar('RECIPE_MAINTAINER', True) + + found = 0 + for uri in src_uri.split(): + m = re.compile('(?P[^:]*)').match(uri) + if not m: + raise MalformedUrl(uri) + elif m.group('type') in ('http', 'https', 'ftp', 'cvs', 'svn', 'git'): + found = 1 + pproto = m.group('type') + break + if not found: + pproto = "file" + pupver = "N/A" + pstatus = "ErrUnknown" + + (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(uri) + if type in ['http', 'https', 'ftp']: + if d.getVar('PRSPV', True): + pcurver = d.getVar('PRSPV', True) + else: + pcurver = d.getVar('PV', True) + else: + if d.getVar('PRSPV', True): + pcurver = d.getVar('PRSPV', True) + else: + pcurver = d.getVar("SRCREV", True) + + + if type in ['http', 'https', 'ftp']: + ud = bb.fetch2.FetchData(uri, d) + newver = pcurver + altpath = path + dirver = "-" + curname = "-" + + """ + match version number amid the path, such as "5.7" in: + http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz + N.B. how about sth. like "../5.7/5.8/..."? Not find such example so far :-P + """ + m = re.search(r"[^/]*(\d+\.)+\d+([\-_]r\d+)*/", path) + if m: + altpath = path.split(m.group())[0] + dirver = m.group().strip("/") + + """use new path and remove param. for wget only param is md5sum""" + alturi = bb.fetch.encodeurl([type, host, altpath, user, pswd, {}]) + my_uri = d.getVar('REGEX_URI', True) + if my_uri: + if d.getVar('PRSPV', True): + newver = d.getVar('PRSPV', True) + else: + newver = d.getVar('PV', True) + else: + newver = check_new_dir(alturi, dirver, ud, d) + altpath = path + if not re.match("Err", newver) and dirver != newver: + altpath = altpath.replace(dirver, newver, True) + # For folder in folder cases - try to enter the folder again and then try parsing + """Now try to acquire all remote files in current directory""" + if not re.match("Err", newver): + curname = altpath.split("/")[-1] + + """get remote name by skipping pacakge name""" + m = re.search(r"/.*/", altpath) + if not m: + altpath = "/" + else: + altpath = m.group() + + chk_uri = d.getVar('REGEX_URI', True) + if not chk_uri: + alturi = bb.fetch.encodeurl([type, host, altpath, user, pswd, {}]) + else: + alturi = chk_uri + newver = check_new_version(alturi, curname, ud, d) + while(newver == "ErrHostNoDir"): + if alturi == "/download": + break + else: + alturi = "/".join(alturi.split("/")[0:-2]) + "/download" + newver = check_new_version(alturi, curname, ud, d) + if not re.match("Err", newver): + pupver = newver + if pupver != pcurver: + pstatus = "UPDATE" + else: + pstatus = "MATCH" + + if re.match("Err", newver): + pstatus = newver + ":" + altpath + ":" + dirver + ":" + curname + elif type == 'git': + if user: + gituser = user + '@' + else: + gituser = "" + + if 'protocol' in parm: + gitproto = parm['protocol'] + else: + gitproto = "git" + + # Get all tags and HEAD + if d.getVar('GIT_REGEX', True): + gitcmd = "git ls-remote %s://%s%s%s %s 2>&1" % (gitproto, gituser, host, path, d.getVar('GIT_REGEX', True)) + else: + gitcmd = "git ls-remote %s://%s%s%s *tag* 2>&1" % (gitproto, gituser, host, path) + gitcmd2 = "git ls-remote %s://%s%s%s HEAD 2>&1" % (gitproto, gituser, host, path) + + tmp = os.popen(gitcmd).read() + if 'unable to connect' in tmp: + tmp = None + tmp2 = os.popen(gitcmd2).read() + if 'unable to connect' in tmp2: + tmp2 = None + #This is for those repos have tag like: refs/tags/1.2.2 + phash = pversion.rsplit("+")[-1] + if tmp: + tmpline = tmp.split("\n") + verflag = 0 + pupver = pversion + for line in tmpline: + if len(line)==0: + break; + puptag = line.split("/")[-1] + upstr_regex = d.getVar('REGEX', True) + if upstr_regex: + puptag = re.search(upstr_regex, puptag) + else: + puptag = re.search("(?P([0-9][\.|_]?)+)", puptag) + if puptag == None: + continue + puptag = puptag.group('pver') + puptag = re.sub("_",".",puptag) + plocaltag = pupver.split("+git")[0] + if "git" in plocaltag: + plocaltag = plocaltag.split("-")[0] + result = bb.utils.vercmp(("0", puptag, ""), ("0", plocaltag, "")) + + if result > 0: + verflag = 1 + pupver = puptag + elif verflag == 0 : + pupver = plocaltag + #This is for those no tag repo + elif tmp2: + pupver = pversion.rsplit("+")[0] + phash = pupver + else: + pstatus = "ErrGitAccess" + if not ('ErrGitAccess' in pstatus): + + latest_head = tmp2.rsplit("\t")[0][:7] + tmp3 = re.search('(?P(\d+[\.-]?)+)(?P(\+git[r|\-|]?)AUTOINC\+)(?P([\w|_]+))', pversion) + tmp4 = re.search('(?P(\d+[\.-]?)+)(?P(\+git[r|\-|]?)AUTOINC\+)(?P([\w|_]+))', pupver) + if not tmp4: + tmp4 = re.search('(?P(\d+[\.-]?)+)', pupver) + + if tmp3: + # Get status of the package - MATCH/UPDATE + result = bb.utils.vercmp(("0", tmp3.group('git_ver'), ""), ("0",tmp3.group('git_ver') , "")) + # Get the latest tag + pstatus = 'MATCH' + if result < 0: + latest_pv = tmp3.group('git_ver') + else: + latest_pv = pupver + if not(tmp3.group('head_md5')[:7] in latest_head) or not(latest_head in tmp3.group('head_md5')[:7]): + pstatus = 'UPDATE' + + git_prefix = tmp3.group('git_prefix') + pupver = latest_pv + tmp3.group('git_prefix') + latest_head + else: + if not tmp3: + bb.plain("#DEBUG# Package %s: current version (%s) doesn't match the usual pattern" %(pname, pversion)) + elif type == 'svn': + ud = bb.fetch2.FetchData(uri, d) + + svnFetcher = bb.fetch2.svn.Svn(d) + svnFetcher.urldata_init(ud, d) + try: + pupver = svnFetcher.latest_revision(ud, d, ud.names[0]) + except bb.fetch2.FetchError: + pstatus = "ErrSvnAccess" + + if pupver: + if pupver in pversion: + pstatus = "MATCH" + else: + pstatus = "UPDATE" + else: + pstatus = "ErrSvnAccess" + + if 'rev' in ud.parm: + pcurver = ud.parm['rev'] + + if pstatus != "ErrSvnAccess": + tag = pversion.rsplit("+svn")[0] + svn_prefix = re.search('(\+svn[r|\-]?)', pversion) + if tag and svn_prefix: + pupver = tag + svn_prefix.group() + pupver + + elif type == 'cvs': + pupver = "HEAD" + pstatus = "UPDATE" + elif type == 'file': + """local file is always up-to-date""" + pupver = pcurver + pstatus = "MATCH" + else: + pstatus = "ErrUnsupportedProto" + + if re.match("Err", pstatus): + pstatus += ":%s%s" % (host, path) + + """Read from manual distro tracking fields as alternative""" + pmver = d.getVar("RECIPE_UPSTREAM_VERSION", True) + if not pmver: + pmver = "N/A" + pmstatus = "ErrNoRecipeData" + else: + if pmver == pcurver: + pmstatus = "MATCH" + else: + pmstatus = "UPDATE" + + psrcuri = psrcuri.split()[0] + pdepends = "".join(pdepends.split("\t")) + pdesc = "".join(pdesc.split("\t")) + no_upgr_reason = d.getVar('RECIPE_NO_UPDATE_REASON', True) + lf = bb.utils.lockfile("%s.lock" % logfile) + f = open(logfile, "a") + f.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % \ + (pname,pversion,pupver,plicense,psection, phome,prelease, pdepends,pbugtracker,ppe,pdesc,pstatus,pmver,psrcuri,maintainer, no_upgr_reason)) + f.close() + bb.utils.unlockfile(lf) +} + +addtask checkpkgall after do_checkpkg +do_checkpkgall[recrdeptask] = "do_checkpkgall do_checkpkg" +do_checkpkgall[recideptask] = "do_${BB_DEFAULT_TASK}" +do_checkpkgall[nostamp] = "1" +do_checkpkgall() { + : +} + +addhandler distro_check_eventhandler +distro_check_eventhandler[eventmask] = "bb.event.BuildStarted" +python distro_check_eventhandler() { + """initialize log files.""" + import oe.distro_check as dc + result_file = dc.create_log_file(e.data, "distrocheck.csv") + return +} + +addtask distro_check +do_distro_check[nostamp] = "1" +python do_distro_check() { + """checks if the package is present in other public Linux distros""" + import oe.distro_check as dc + import shutil + if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk',d): + return + + localdata = bb.data.createCopy(d) + bb.data.update_data(localdata) + tmpdir = d.getVar('TMPDIR', True) + distro_check_dir = os.path.join(tmpdir, "distro_check") + logpath = d.getVar('LOG_DIR', True) + bb.utils.mkdirhier(logpath) + result_file = os.path.join(logpath, "distrocheck.csv") + datetime = localdata.getVar('DATETIME', True) + dc.update_distro_data(distro_check_dir, datetime) + + # do the comparison + result = dc.compare_in_distro_packages_list(distro_check_dir, d) + + # save the results + dc.save_distro_check_result(result, datetime, result_file, d) +} + +addtask distro_checkall after do_distro_check +do_distro_checkall[recrdeptask] = "do_distro_checkall do_distro_check" +do_distro_checkall[recideptask] = "do_${BB_DEFAULT_TASK}" +do_distro_checkall[nostamp] = "1" +do_distro_checkall() { + : +} +# +#Check Missing License Text. +#Use this task to generate the missing license text data for pkg-report system, +#then we can search those recipes which license text isn't exsit in common-licenses directory +# +addhandler checklicense_eventhandler +checklicense_eventhandler[eventmask] = "bb.event.BuildStarted" +python checklicense_eventhandler() { + """initialize log files.""" + import oe.distro_check as dc + logfile = dc.create_log_file(e.data, "missinglicense.csv") + lf = bb.utils.lockfile("%s.lock" % logfile) + f = open(logfile, "a") + f.write("Package\tLicense\tMissingLicense\n") + f.close() + bb.utils.unlockfile(lf) + return +} + +addtask checklicense +do_checklicense[nostamp] = "1" +python do_checklicense() { + import shutil + logpath = d.getVar('LOG_DIR', True) + bb.utils.mkdirhier(logpath) + pn = d.getVar('PN', True) + logfile = os.path.join(logpath, "missinglicense.csv") + generic_directory = d.getVar('COMMON_LICENSE_DIR', True) + license_types = d.getVar('LICENSE', True) + for license_type in ((license_types.replace('+', '').replace('|', '&') + .replace('(', '').replace(')', '').replace(';', '') + .replace(',', '').replace(" ", "").split("&"))): + if not os.path.isfile(os.path.join(generic_directory, license_type)): + lf = bb.utils.lockfile("%s.lock" % logfile) + f = open(logfile, "a") + f.write("%s\t%s\t%s\n" % \ + (pn,license_types,license_type)) + f.close() + bb.utils.unlockfile(lf) + return +} + +addtask checklicenseall after do_checklicense +do_checklicenseall[recrdeptask] = "do_checklicenseall do_checklicense" +do_checklicenseall[recideptask] = "do_${BB_DEFAULT_TASK}" +do_checklicenseall[nostamp] = "1" +do_checklicenseall() { + : +} + + diff --git a/meta/classes/distutils-base.bbclass b/meta/classes/distutils-base.bbclass new file mode 100644 index 0000000000..aa18e8b292 --- /dev/null +++ b/meta/classes/distutils-base.bbclass @@ -0,0 +1,4 @@ +DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}" +RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}" + +inherit distutils-common-base pythonnative diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/distutils-common-base.bbclass new file mode 100644 index 0000000000..9a608eb63e --- /dev/null +++ b/meta/classes/distutils-common-base.bbclass @@ -0,0 +1,24 @@ +inherit python-dir + +EXTRA_OEMAKE = "" + +export STAGING_INCDIR +export STAGING_LIBDIR + +PACKAGES = "${PN}-staticdev ${PN}-dev ${PN}-dbg ${PN}-doc ${PN}" + +FILES_${PN} = "${bindir}/* ${libdir}/* ${libdir}/${PYTHON_DIR}/*" + +FILES_${PN}-staticdev += "\ + ${PYTHON_SITEPACKAGES_DIR}/*.a \ +" +FILES_${PN}-dev += "\ + ${datadir}/pkgconfig \ + ${libdir}/pkgconfig \ + ${PYTHON_SITEPACKAGES_DIR}/*.la \ +" +FILES_${PN}-dbg += "\ + ${PYTHON_SITEPACKAGES_DIR}/.debug \ + ${PYTHON_SITEPACKAGES_DIR}/*/.debug \ + ${PYTHON_SITEPACKAGES_DIR}/*/*/.debug \ +" diff --git a/meta/classes/distutils-native-base.bbclass b/meta/classes/distutils-native-base.bbclass new file mode 100644 index 0000000000..509cb9551a --- /dev/null +++ b/meta/classes/distutils-native-base.bbclass @@ -0,0 +1,3 @@ +inherit distutils-common-base + +DEPENDS += "${@["${PYTHON_PN}-native", ""][(d.getVar('PACKAGES', True) == '')]}" diff --git a/meta/classes/distutils-tools.bbclass b/meta/classes/distutils-tools.bbclass new file mode 100644 index 0000000000..f43450e56f --- /dev/null +++ b/meta/classes/distutils-tools.bbclass @@ -0,0 +1,77 @@ +DISTUTILS_BUILD_ARGS ?= "" +DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}" +DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \ + --install-data=${STAGING_DATADIR}" +DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \ + --install-data=${D}/${datadir}" + +distutils_do_compile() { + STAGING_INCDIR=${STAGING_INCDIR} \ + STAGING_LIBDIR=${STAGING_LIBDIR} \ + BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \ + ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \ + bbfatal "${PYTHON_PN} setup.py build_ext execution failed." +} + +distutils_stage_headers() { + install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} + BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \ + ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \ + bbfatal "${PYTHON_PN} setup.py install_headers execution failed." +} + +distutils_stage_all() { + STAGING_INCDIR=${STAGING_INCDIR} \ + STAGING_LIBDIR=${STAGING_LIBDIR} \ + install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} + PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \ + BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \ + ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \ + bbfatal "${PYTHON_PN} setup.py install (stage) execution failed." +} + +distutils_do_install() { + echo "Beginning ${PN} Install ..." + install -d ${D}${PYTHON_SITEPACKAGES_DIR} + echo "Step 2 of ${PN} Install ..." + STAGING_INCDIR=${STAGING_INCDIR} \ + STAGING_LIBDIR=${STAGING_LIBDIR} \ + PYTHONPATH=${D}/${PYTHON_SITEPACKAGES_DIR} \ + BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \ + ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \ + bbfatal "${PYTHON_PN} setup.py install execution failed." + + echo "Step 3 of ${PN} Install ..." + # support filenames with *spaces* + find ${D} -name "*.py" -print0 | while read -d $'\0' i ; do \ + sed -i -e s:${D}::g $i + done + + echo "Step 4 of ${PN} Install ..." + if test -e ${D}${bindir} ; then + for i in ${D}${bindir}/* ; do \ + sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i + done + fi + + echo "Step 4 of ${PN} Install ..." + if test -e ${D}${sbindir}; then + for i in ${D}${sbindir}/* ; do \ + sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i + done + fi + + echo "Step 5 of ${PN} Install ..." + rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth + + # + # FIXME: Bandaid against wrong datadir computation + # + if test -e ${D}${datadir}/share; then + mv -f ${D}${datadir}/share/* ${D}${datadir}/ + fi +} + +#EXPORT_FUNCTIONS do_compile do_install + +export LDSHARED="${CCLD} -shared" diff --git a/meta/classes/distutils.bbclass b/meta/classes/distutils.bbclass new file mode 100644 index 0000000000..6ed7ecc99f --- /dev/null +++ b/meta/classes/distutils.bbclass @@ -0,0 +1,80 @@ +inherit distutils-base + +DISTUTILS_BUILD_ARGS ?= "" +DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}" +DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \ + --install-data=${STAGING_DATADIR}" +DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \ + --install-data=${D}/${datadir}" + +distutils_do_compile() { + STAGING_INCDIR=${STAGING_INCDIR} \ + STAGING_LIBDIR=${STAGING_LIBDIR} \ + BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \ + ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \ + bbfatal "${PYTHON_PN} setup.py build_ext execution failed." +} + +distutils_stage_headers() { + install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} + BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \ + ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \ + bbfatal "${PYTHON_PN} setup.py install_headers execution failed." +} + +distutils_stage_all() { + STAGING_INCDIR=${STAGING_INCDIR} \ + STAGING_LIBDIR=${STAGING_LIBDIR} \ + install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} + PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \ + BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \ + ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \ + bbfatal "${PYTHON_PN} setup.py install (stage) execution failed." +} + +distutils_do_install() { + install -d ${D}${PYTHON_SITEPACKAGES_DIR} + STAGING_INCDIR=${STAGING_INCDIR} \ + STAGING_LIBDIR=${STAGING_LIBDIR} \ + PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \ + BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \ + ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \ + bbfatal "${PYTHON_PN} setup.py install execution failed." + + # support filenames with *spaces* + # only modify file if it contains path to avoid recompilation on the target + find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \; + + if test -e ${D}${bindir} ; then + for i in ${D}${bindir}/* ; do \ + if [ ${PN} != "${BPN}-native" ]; then + sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i + fi + sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i + done + fi + + if test -e ${D}${sbindir}; then + for i in ${D}${sbindir}/* ; do \ + if [ ${PN} != "${BPN}-native" ]; then + sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i + fi + sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i + done + fi + + rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth + rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/site.py* + + # + # FIXME: Bandaid against wrong datadir computation + # + if test -e ${D}${datadir}/share; then + mv -f ${D}${datadir}/share/* ${D}${datadir}/ + rmdir ${D}${datadir}/share + fi +} + +EXPORT_FUNCTIONS do_compile do_install + +export LDSHARED="${CCLD} -shared" diff --git a/meta/classes/distutils3-base.bbclass b/meta/classes/distutils3-base.bbclass new file mode 100644 index 0000000000..d4d25dccb9 --- /dev/null +++ b/meta/classes/distutils3-base.bbclass @@ -0,0 +1,8 @@ +DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}" +RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}" + +PYTHON_BASEVERSION = "3.3" +PYTHON_ABI = "m" + +inherit distutils-common-base python3native + diff --git a/meta/classes/distutils3-native-base.bbclass b/meta/classes/distutils3-native-base.bbclass new file mode 100644 index 0000000000..ed3fe54587 --- /dev/null +++ b/meta/classes/distutils3-native-base.bbclass @@ -0,0 +1,4 @@ +PYTHON_BASEVERSION = "3.3" +PYTHON_ABI = "m" + +inherit distutils-native-base diff --git a/meta/classes/distutils3.bbclass b/meta/classes/distutils3.bbclass new file mode 100644 index 0000000000..e909ef41b6 --- /dev/null +++ b/meta/classes/distutils3.bbclass @@ -0,0 +1,96 @@ +inherit distutils3-base + +DISTUTILS_BUILD_ARGS ?= "" +DISTUTILS_BUILD_EXT_ARGS ?= "" +DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}" +DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \ + --install-data=${STAGING_DATADIR}" +DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \ + --install-data=${D}/${datadir}" + +distutils3_do_compile() { + if [ ${BUILD_SYS} != ${HOST_SYS} ]; then + SYS=${MACHINE} + else + SYS=${HOST_SYS} + fi + STAGING_INCDIR=${STAGING_INCDIR} \ + STAGING_LIBDIR=${STAGING_LIBDIR} \ + BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \ + ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \ + build ${DISTUTILS_BUILD_ARGS} || \ + bbfatal "${PYTHON_PN} setup.py build_ext execution failed." +} + +distutils3_stage_headers() { + install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} + if [ ${BUILD_SYS} != ${HOST_SYS} ]; then + SYS=${MACHINE} + else + SYS=${HOST_SYS} + fi + BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \ + ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \ + bbfatal "${PYTHON_PN} setup.py install_headers execution failed." +} + +distutils3_stage_all() { + if [ ${BUILD_SYS} != ${HOST_SYS} ]; then + SYS=${MACHINE} + else + SYS=${HOST_SYS} + fi + STAGING_INCDIR=${STAGING_INCDIR} \ + STAGING_LIBDIR=${STAGING_LIBDIR} \ + install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} + PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \ + BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \ + ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \ + bbfatal "${PYTHON_PN} setup.py install (stage) execution failed." +} + +distutils3_do_install() { + install -d ${D}${PYTHON_SITEPACKAGES_DIR} + if [ ${BUILD_SYS} != ${HOST_SYS} ]; then + SYS=${MACHINE} + else + SYS=${HOST_SYS} + fi + STAGING_INCDIR=${STAGING_INCDIR} \ + STAGING_LIBDIR=${STAGING_LIBDIR} \ + PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \ + BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \ + ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \ + bbfatal "${PYTHON_PN} setup.py install execution failed." + + # support filenames with *spaces* + find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \; + + if test -e ${D}${bindir} ; then + for i in ${D}${bindir}/* ; do \ + sed -i -e s:${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}:${bindir}/env\ ${PYTHON_PN}:g $i + sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i + done + fi + + if test -e ${D}${sbindir}; then + for i in ${D}${sbindir}/* ; do \ + sed -i -e s:${STAGING_BINDIR_NATIVE}/python-${PYTHON_PN}/${PYTHON_PN}:${bindir}/env\ ${PYTHON_PN}:g $i + sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i + done + fi + + rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth + + # + # FIXME: Bandaid against wrong datadir computation + # + if test -e ${D}${datadir}/share; then + mv -f ${D}${datadir}/share/* ${D}${datadir}/ + rmdir ${D}${datadir}/share + fi +} + +EXPORT_FUNCTIONS do_compile do_install + +export LDSHARED="${CCLD} -shared" diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass new file mode 100644 index 0000000000..2ac62747a2 --- /dev/null +++ b/meta/classes/externalsrc.bbclass @@ -0,0 +1,53 @@ +# Copyright (C) 2012 Linux Foundation +# Author: Richard Purdie +# Some code and influence taken from srctree.bbclass: +# Copyright (C) 2009 Chris Larson +# Released under the MIT license (see COPYING.MIT for the terms) +# +# externalsrc.bbclass enables use of an existing source tree, usually external to +# the build system to build a piece of software rather than the usual fetch/unpack/patch +# process. +# +# To use, add externalsrc to the global inherit and set EXTERNALSRC to point at the +# directory you want to use containing the sources e.g. from local.conf for a recipe +# called "myrecipe" you would do: +# +# INHERIT += "externalsrc" +# EXTERNALSRC_pn-myrecipe = "/path/to/my/source/tree" +# +# In order to make this class work for both target and native versions (or with +# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate +# directory under the work directory (split source and build directories). This is +# the default, but the build directory can be set to the source directory if +# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.: +# +# EXTERNALSRC_BUILD_pn-myrecipe = "/path/to/my/source/tree" +# + +SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch" + +python () { + externalsrc = d.getVar('EXTERNALSRC', True) + if externalsrc: + d.setVar('S', externalsrc) + externalsrcbuild = d.getVar('EXTERNALSRC_BUILD', True) + if externalsrcbuild: + d.setVar('B', externalsrcbuild) + else: + d.setVar('B', '${WORKDIR}/${BPN}-${PV}/') + d.setVar('SRC_URI', '') + + tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys()) + + for task in tasks: + if task.endswith("_setscene"): + # sstate is never going to work for external source trees, disable it + bb.build.deltask(task, d) + else: + # Since configure will likely touch ${S}, ensure only we lock so one task has access at a time + d.appendVarFlag(task, "lockfiles", "${S}/singletask.lock") + + for task in d.getVar("SRCTREECOVEREDTASKS", True).split(): + bb.build.deltask(task, d) +} + diff --git a/meta/classes/extrausers.bbclass b/meta/classes/extrausers.bbclass new file mode 100644 index 0000000000..faf57b108e --- /dev/null +++ b/meta/classes/extrausers.bbclass @@ -0,0 +1,65 @@ +# This bbclass is mainly used for image level user/group configuration. +# Inherit this class if you want to make EXTRA_USERS_PARAMS effective. + +# Below is an example showing how to use this functionality. +# INHERIT += "extrausers" +# EXTRA_USERS_PARAMS = "\ +# useradd -p '' tester; \ +# groupadd developers; \ +# userdel nobody; \ +# groupdel -g video; \ +# groupmod -g 1020 developers; \ +# usermod -s /bin/sh tester; \ +# " + + +inherit useradd_base + +IMAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS', True))]}" + +# Image level user / group settings +ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group;" + +# Image level user / group settings +set_user_group () { + user_group_settings="${EXTRA_USERS_PARAMS}" + export PSEUDO="${FAKEROOTENV} ${STAGING_DIR_NATIVE}${bindir}/pseudo" + setting=`echo $user_group_settings | cut -d ';' -f1` + remaining=`echo $user_group_settings | cut -d ';' -f2-` + while test "x$setting" != "x"; do + cmd=`echo $setting | cut -d ' ' -f1` + opts=`echo $setting | cut -d ' ' -f2-` + # Different from useradd.bbclass, there's no file locking issue here, as + # this setting is actually a serial process. So we only retry once. + case $cmd in + useradd) + perform_useradd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1 + ;; + groupadd) + perform_groupadd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1 + ;; + userdel) + perform_userdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1 + ;; + groupdel) + perform_groupdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1 + ;; + usermod) + perform_usermod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1 + ;; + groupmod) + perform_groupmod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1 + ;; + *) + bbfatal "Invalid command in EXTRA_USERS_PARAMS: $cmd" + ;; + esac + # Avoid infinite loop if the last parameter doesn't end with ';' + if [ "$setting" = "$remaining" ]; then + break + fi + # iterate to the next setting + setting=`echo $remaining | cut -d ';' -f1` + remaining=`echo $remaining | cut -d ';' -f2-` + done +} diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass new file mode 100644 index 0000000000..d122387ffd --- /dev/null +++ b/meta/classes/fontcache.bbclass @@ -0,0 +1,45 @@ +# +# This class will generate the proper postinst/postrm scriptlets for font +# packages. +# + +DEPENDS += "qemu-native" +inherit qemu + +FONT_PACKAGES ??= "${PN}" +FONT_EXTRA_RDEPENDS ?= "fontconfig-utils" +FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig" +fontcache_common() { +if [ "x$D" != "x" ] ; then + $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} bindir=${bindir} \ + libdir=${libdir} base_libdir=${base_libdir} fontconfigcachedir=${FONTCONFIG_CACHE_DIR} +else + fc-cache +fi +} + +python () { + font_pkgs = d.getVar('FONT_PACKAGES', True).split() + deps = d.getVar("FONT_EXTRA_RDEPENDS", True) + + for pkg in font_pkgs: + if deps: d.appendVar('RDEPENDS_' + pkg, ' '+deps) +} + +python add_fontcache_postinsts() { + for pkg in d.getVar('FONT_PACKAGES', True).split(): + bb.note("adding fonts postinst and postrm scripts to %s" % pkg) + postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True) + if not postinst: + postinst = '#!/bin/sh\n' + postinst += d.getVar('fontcache_common', True) + d.setVar('pkg_postinst_%s' % pkg, postinst) + + postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True) + if not postrm: + postrm = '#!/bin/sh\n' + postrm += d.getVar('fontcache_common', True) + d.setVar('pkg_postrm_%s' % pkg, postrm) +} + +PACKAGEFUNCS =+ "add_fontcache_postinsts" diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass new file mode 100644 index 0000000000..e9076b2779 --- /dev/null +++ b/meta/classes/gconf.bbclass @@ -0,0 +1,70 @@ +DEPENDS += "gconf gconf-native" + +# These are for when gconftool is used natively and the prefix isn't necessarily +# the sysroot. TODO: replicate the postinst logic for -native packages going +# into sysroot as they won't be running their own install-time schema +# registration (disabled below) nor the postinst script (as they don't happen). +export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults" +export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2" + +# Disable install-time schema registration as we're a packaging system so this +# happens in the postinst script, not at install time. Set both the configure +# script option and the traditional envionment variable just to make sure. +EXTRA_OECONF += "--disable-schemas-install" +export GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL = "1" + +gconf_postinst() { +if [ "x$D" != "x" ]; then + export GCONF_CONFIG_SOURCE="xml::$D${sysconfdir}/gconf/gconf.xml.defaults" +else + export GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` +fi + +SCHEMA_LOCATION=$D/etc/gconf/schemas +for SCHEMA in ${SCHEMA_FILES}; do + if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then + HOME=$D/root gconftool-2 \ + --makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null + fi +done +} + +gconf_prerm() { +SCHEMA_LOCATION=/etc/gconf/schemas +for SCHEMA in ${SCHEMA_FILES}; do + if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then + HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \ + gconftool-2 \ + --makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null + fi +done +} + +python populate_packages_append () { + import re + packages = d.getVar('PACKAGES', True).split() + pkgdest = d.getVar('PKGDEST', True) + + for pkg in packages: + schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg) + schemas = [] + schema_re = re.compile(".*\.schemas$") + if os.path.exists(schema_dir): + for f in os.listdir(schema_dir): + if schema_re.match(f): + schemas.append(f) + if schemas != []: + bb.note("adding gconf postinst and prerm scripts to %s" % pkg) + d.setVar('SCHEMA_FILES', " ".join(schemas)) + postinst = d.getVar('pkg_postinst_%s' % pkg, True) + if not postinst: + postinst = '#!/bin/sh\n' + postinst += d.getVar('gconf_postinst', True) + d.setVar('pkg_postinst_%s' % pkg, postinst) + prerm = d.getVar('pkg_prerm_%s' % pkg, True) + if not prerm: + prerm = '#!/bin/sh\n' + prerm += d.getVar('gconf_prerm', True) + d.setVar('pkg_prerm_%s' % pkg, prerm) + d.appendVar("RDEPENDS_%s" % pkg, ' ' + d.getVar('MLPREFIX') + 'gconf') +} diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass new file mode 100644 index 0000000000..03b89b2455 --- /dev/null +++ b/meta/classes/gettext.bbclass @@ -0,0 +1,19 @@ +def gettext_dependencies(d): + if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'): + return "" + if d.getVar('USE_NLS', True) == 'no': + return "gettext-minimal-native" + return d.getVar('DEPENDS_GETTEXT', False) + +def gettext_oeconf(d): + if d.getVar('USE_NLS', True) == 'no': + return '--disable-nls' + # Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set + if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'): + return '--disable-nls' + return "--enable-nls" + +DEPENDS_GETTEXT ??= "virtual/gettext gettext-native" + +BASEDEPENDS =+ "${@gettext_dependencies(d)}" +EXTRA_OECONF_append = " ${@gettext_oeconf(d)}" diff --git a/meta/classes/gnome.bbclass b/meta/classes/gnome.bbclass new file mode 100644 index 0000000000..0de22dd6d2 --- /dev/null +++ b/meta/classes/gnome.bbclass @@ -0,0 +1,5 @@ +inherit gnomebase gtk-icon-cache gconf mime + +EXTRA_OECONF += "--disable-introspection" + +UNKNOWN_CONFIGURE_WHITELIST += "--disable-introspection" diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass new file mode 100644 index 0000000000..6ca13cb1e0 --- /dev/null +++ b/meta/classes/gnomebase.bbclass @@ -0,0 +1,30 @@ +def gnome_verdir(v): + return oe.utils.trim_version(v, 2) + +GNOME_COMPRESS_TYPE ?= "bz2" +SECTION ?= "x11/gnome" +GNOMEBN ?= "${BPN}" +SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive" + +DEPENDS += "gnome-common-native" + +FILES_${PN} += "${datadir}/application-registry \ + ${datadir}/mime-info \ + ${datadir}/mime/packages \ + ${datadir}/mime/application \ + ${datadir}/gnome-2.0 \ + ${datadir}/polkit* \ + ${datadir}/GConf \ + ${datadir}/glib-2.0/schemas \ +" + +FILES_${PN}-doc += "${datadir}/devhelp" + +inherit autotools pkgconfig + +do_install_append() { + rm -rf ${D}${localstatedir}/lib/scrollkeeper/* + rm -rf ${D}${localstatedir}/scrollkeeper/* + rm -f ${D}${datadir}/applications/*.cache +} + diff --git a/meta/classes/grub-efi.bbclass b/meta/classes/grub-efi.bbclass new file mode 100644 index 0000000000..47bd35e049 --- /dev/null +++ b/meta/classes/grub-efi.bbclass @@ -0,0 +1,141 @@ +# grub-efi.bbclass +# Copyright (c) 2011, Intel Corporation. +# All rights reserved. +# +# Released under the MIT license (see packages/COPYING) + +# Provide grub-efi specific functions for building bootable images. + +# External variables +# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional) +# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional) +# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu +# ${LABELS} - a list of targets for the automatic config +# ${APPEND} - an override list of append strings for each label +# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional) +# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional) + +do_bootimg[depends] += "${MLPREFIX}grub-efi:do_deploy" +do_bootdirectdisk[depends] += "${MLPREFIX}grub-efi:do_deploy" + +GRUB_SERIAL ?= "console=ttyS0,115200" +GRUBCFG = "${S}/grub.cfg" +GRUB_TIMEOUT ?= "10" +#FIXME: build this from the machine config +GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1" + +EFIDIR = "/EFI/BOOT" + +efi_populate() { + # DEST must be the root of the image so that EFIDIR is not + # nested under a top level directory. + DEST=$1 + + install -d ${DEST}${EFIDIR} + + GRUB_IMAGE="bootia32.efi" + if [ "${TARGET_ARCH}" = "x86_64" ]; then + GRUB_IMAGE="bootx64.efi" + fi + install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR} + + install -m 0644 ${GRUBCFG} ${DEST}${EFIDIR} +} + +efi_iso_populate() { + iso_dir=$1 + efi_populate $iso_dir + # Build a EFI directory to create efi.img + mkdir -p ${EFIIMGDIR}/${EFIDIR} + cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR} + cp $iso_dir/vmlinuz ${EFIIMGDIR} + echo "${GRUB_IMAGE}" > ${EFIIMGDIR}/startup.nsh + if [ -f "$iso_dir/initrd" ] ; then + cp $iso_dir/initrd ${EFIIMGDIR} + fi +} + +efi_hddimg_populate() { + efi_populate $1 +} + +python build_efi_cfg() { + import sys + + workdir = d.getVar('WORKDIR', True) + if not workdir: + bb.error("WORKDIR not defined, unable to package") + return + + gfxserial = d.getVar('GRUB_GFXSERIAL', True) or "" + + labels = d.getVar('LABELS', True) + if not labels: + bb.debug(1, "LABELS not defined, nothing to do") + return + + if labels == []: + bb.debug(1, "No labels, nothing to do") + return + + cfile = d.getVar('GRUBCFG', True) + if not cfile: + raise bb.build.FuncFailed('Unable to read GRUBCFG') + + try: + cfgfile = file(cfile, 'w') + except OSError: + raise bb.build.funcFailed('Unable to open %s' % (cfile)) + + cfgfile.write('# Automatically created by OE\n') + + opts = d.getVar('GRUB_OPTS', True) + if opts: + for opt in opts.split(';'): + cfgfile.write('%s\n' % opt) + + cfgfile.write('default=%s\n' % (labels.split()[0])) + + timeout = d.getVar('GRUB_TIMEOUT', True) + if timeout: + cfgfile.write('timeout=%s\n' % timeout) + else: + cfgfile.write('timeout=50\n') + + if gfxserial == "1": + btypes = [ [ " graphics console", "" ], + [ " serial console", d.getVar('GRUB_SERIAL', True) or "" ] ] + else: + btypes = [ [ "", "" ] ] + + for label in labels.split(): + localdata = d.createCopy() + + overrides = localdata.getVar('OVERRIDES', True) + if not overrides: + raise bb.build.FuncFailed('OVERRIDES not defined') + + for btype in btypes: + localdata.setVar('OVERRIDES', label + ':' + overrides) + bb.data.update_data(localdata) + + cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0])) + lb = label + if label == "install": + lb = "install-efi" + cfgfile.write('linux /vmlinuz LABEL=%s' % (lb)) + + append = localdata.getVar('APPEND', True) + initrd = localdata.getVar('INITRD', True) + + if append: + cfgfile.write('%s' % (append)) + cfgfile.write(' %s' % btype[1]) + cfgfile.write('\n') + + if initrd: + cfgfile.write('initrd /initrd') + cfgfile.write('\n}\n') + + cfgfile.close() +} diff --git a/meta/classes/gsettings.bbclass b/meta/classes/gsettings.bbclass new file mode 100644 index 0000000000..dec5abc026 --- /dev/null +++ b/meta/classes/gsettings.bbclass @@ -0,0 +1,37 @@ +# A bbclass to handle installed GSettings (glib) schemas, updated the compiled +# form on package install and remove. +# +# The compiled schemas are platform-agnostic, so we can depend on +# glib-2.0-native for the native tool and run the postinst script when the +# rootfs builds to save a little time on first boot. + +# TODO use a trigger so that this runs once per package operation run + +DEPENDS += "glib-2.0-native" + +RDEPENDS_${PN} += "glib-2.0-utils" + +FILES_${PN} += "${datadir}/glib-2.0/schemas" + +gsettings_postinstrm () { + glib-compile-schemas $D${datadir}/glib-2.0/schemas +} + +python populate_packages_append () { + pkg = d.getVar('PN', True) + bb.note("adding gsettings postinst scripts to %s" % pkg) + + postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True) + if not postinst: + postinst = '#!/bin/sh\n' + postinst += d.getVar('gsettings_postinstrm', True) + d.setVar('pkg_postinst_%s' % pkg, postinst) + + bb.note("adding gsettings postrm scripts to %s" % pkg) + + postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True) + if not postrm: + postrm = '#!/bin/sh\n' + postrm += d.getVar('gsettings_postinstrm', True) + d.setVar('pkg_postrm_%s' % pkg, postrm) +} diff --git a/meta/classes/gtk-doc.bbclass b/meta/classes/gtk-doc.bbclass new file mode 100644 index 0000000000..e32f98dcfc --- /dev/null +++ b/meta/classes/gtk-doc.bbclass @@ -0,0 +1,25 @@ +# Helper class to pull in the right gtk-doc dependencies and disable +# gtk-doc. +# +# Long-term it would be great if this class could be toggled between +# gtk-doc-stub-native and the real gtk-doc-native, which would enable +# re-generation of documentation. For now, we'll make do with this which +# packages up any existing documentation (so from tarball builds). + +# The documentation directory, where the infrastructure will be copied. +# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S. +GTKDOC_DOCDIR ?= "${S}" + +DEPENDS_append = " gtk-doc-stub-native" + +EXTRA_OECONF_append = "\ + --disable-gtk-doc \ + --disable-gtk-doc-html \ + --disable-gtk-doc-pdf \ +" + +do_configure_prepend () { + ( cd ${S}; gtkdocize --docdir ${GTKDOC_DOCDIR} ) +} + +inherit pkgconfig diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass new file mode 100644 index 0000000000..789fa38a16 --- /dev/null +++ b/meta/classes/gtk-icon-cache.bbclass @@ -0,0 +1,62 @@ +FILES_${PN} += "${datadir}/icons/hicolor" + +DEPENDS += "${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk-update-icon-cache-native" + +gtk_icon_cache_postinst() { +if [ "x$D" != "x" ]; then + $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \ + base_libdir=${base_libdir} +else + + # Update the pixbuf loaders in case they haven't been registered yet + GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache + + for icondir in /usr/share/icons/* ; do + if [ -d $icondir ] ; then + gtk-update-icon-cache -fqt $icondir + fi + done +fi +} + +gtk_icon_cache_postrm() { +if [ "x$D" != "x" ]; then + $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \ + base_libdir=${base_libdir} +else + for icondir in /usr/share/icons/* ; do + if [ -d $icondir ] ; then + gtk-update-icon-cache -qt $icondir + fi + done +fi +} + +python populate_packages_append () { + packages = d.getVar('PACKAGES', True).split() + pkgdest = d.getVar('PKGDEST', True) + + for pkg in packages: + icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir', True)) + if not os.path.exists(icon_dir): + continue + + bb.note("adding hicolor-icon-theme dependency to %s" % pkg) + rdepends = ' ' + d.getVar('MLPREFIX') + "hicolor-icon-theme" + d.appendVar('RDEPENDS_%s' % pkg, rdepends) + + bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg) + + postinst = d.getVar('pkg_postinst_%s' % pkg, True) + if not postinst: + postinst = '#!/bin/sh\n' + postinst += d.getVar('gtk_icon_cache_postinst', True) + d.setVar('pkg_postinst_%s' % pkg, postinst) + + postrm = d.getVar('pkg_postrm_%s' % pkg, True) + if not postrm: + postrm = '#!/bin/sh\n' + postrm += d.getVar('gtk_icon_cache_postrm', True) + d.setVar('pkg_postrm_%s' % pkg, postrm) +} + diff --git a/meta/classes/gtk-immodules-cache.bbclass b/meta/classes/gtk-immodules-cache.bbclass new file mode 100644 index 0000000000..5b45149080 --- /dev/null +++ b/meta/classes/gtk-immodules-cache.bbclass @@ -0,0 +1,83 @@ +# This class will update the inputmethod module cache for virtual keyboards +# +# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules + +DEPENDS =+ "qemu-native" + +inherit qemu + +GTKIMMODULES_PACKAGES ?= "${PN}" + +gtk_immodule_cache_postinst() { +if [ "x$D" != "x" ]; then + for maj_ver in 2 3; do + if [ -x $D${bindir}/gtk-query-immodules-$maj_ver.0 ]; then + IMFILES=$(ls $D${libdir}/gtk-$maj_ver.0/*/immodules/*.so) + ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-$maj_ver.0')} \ + $IMFILES > $D/etc/gtk-$maj_ver.0/gtk.immodules 2>/dev/null && + sed -i -e "s:$D::" $D/etc/gtk-$maj_ver.0/gtk.immodules + + [ $? -ne 0 ] && exit 1 + fi + done + + exit 0 +fi +if [ ! -z `which gtk-query-immodules-2.0` ]; then + gtk-query-immodules-2.0 > /etc/gtk-2.0/gtk.immodules +fi +if [ ! -z `which gtk-query-immodules-3.0` ]; then + gtk-query-immodules-3.0 > /etc/gtk-3.0/gtk.immodules +fi +} + +gtk_immodule_cache_postrm() { +if [ "x$D" != "x" ]; then + for maj_ver in 2 3; do + if [ -x $D${bindir}/gtk-query-immodules-$maj_ver.0 ]; then + IMFILES=$(ls $D${libdir}/gtk-$maj_ver.0/*/immodules/*.so) + ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-$maj_ver.0')} \ + $IMFILES > $D/etc/gtk-$maj_ver.0/gtk.immodules 2>/dev/null && + sed -i -e "s:$D::" $D/etc/gtk-$maj_ver.0/gtk.immodules + + [ $? -ne 0 ] && exit 1 + fi + done + + exit 0 +fi +if [ ! -z `which gtk-query-immodules-2.0` ]; then + gtk-query-immodules-2.0 > /etc/gtk-2.0/gtk.immodules +fi +if [ ! -z `which gtk-query-immodules-3.0` ]; then + gtk-query-immodules-3.0 > /etc/gtk-3.0/gtk.immodules +fi +} + +python populate_packages_append () { + gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES', True).split() + + for pkg in gtkimmodules_pkgs: + bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg) + + postinst = d.getVar('pkg_postinst_%s' % pkg, True) + if not postinst: + postinst = '#!/bin/sh\n' + postinst += d.getVar('gtk_immodule_cache_postinst', True) + d.setVar('pkg_postinst_%s' % pkg, postinst) + + postrm = d.getVar('pkg_postrm_%s' % pkg, True) + if not postrm: + postrm = '#!/bin/sh\n' + postrm += d.getVar('gtk_immodule_cache_postrm', True) + d.setVar('pkg_postrm_%s' % pkg, postrm) +} + +python __anonymous() { + if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d): + gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES') + if not gtkimmodules_check: + bb_filename = d.getVar('FILE') + raise bb.build.FuncFailed("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename) +} + diff --git a/meta/classes/gummiboot.bbclass b/meta/classes/gummiboot.bbclass new file mode 100644 index 0000000000..dae19775c3 --- /dev/null +++ b/meta/classes/gummiboot.bbclass @@ -0,0 +1,114 @@ +# Copyright (C) 2014 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + +# gummiboot.bbclass - equivalent of grub-efi.bbclass +# Set EFI_PROVIDER = "gummiboot" to use gummiboot on your live images instead of grub-efi +# (images built by bootimage.bbclass or boot-directdisk.bbclass) + +do_bootimg[depends] += "${MLPREFIX}gummiboot:do_deploy" +do_bootdirectdisk[depends] += "${MLPREFIX}gummiboot:do_deploy" + +EFIDIR = "/EFI/BOOT" + +GUMMIBOOT_CFG ?= "${S}/loader.conf" +GUMMIBOOT_ENTRIES ?= "" +GUMMIBOOT_TIMEOUT ?= "10" + +efi_populate() { + DEST=$1 + + EFI_IMAGE="gummibootia32.efi" + DEST_EFI_IMAGE="bootia32.efi" + if [ "${TARGET_ARCH}" = "x86_64" ]; then + EFI_IMAGE="gummibootx64.efi" + DEST_EFI_IMAGE="bootx64.efi" + fi + + install -d ${DEST}${EFIDIR} + # gummiboot requires these paths for configuration files + # they are not customizable so no point in new vars + install -d ${DEST}/loader + install -d ${DEST}/loader/entries + install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE} + install -m 0644 ${GUMMIBOOT_CFG} ${DEST}/loader/loader.conf + for i in ${GUMMIBOOT_ENTRIES}; do + install -m 0644 ${i} ${DEST}/loader/entries + done +} + +efi_iso_populate() { + iso_dir=$1 + efi_populate $iso_dir + mkdir -p ${EFIIMGDIR}/${EFIDIR} + cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR} + cp $iso_dir/vmlinuz ${EFIIMGDIR} + echo "${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh + if [ -f "$iso_dir/initrd" ] ; then + cp $iso_dir/initrd ${EFIIMGDIR} + fi +} + +efi_hddimg_populate() { + efi_populate $1 +} + +python build_efi_cfg() { + s = d.getVar("S", True) + labels = d.getVar('LABELS', True) + if not labels: + bb.debug(1, "LABELS not defined, nothing to do") + return + + if labels == []: + bb.debug(1, "No labels, nothing to do") + return + + cfile = d.getVar('GUMMIBOOT_CFG', True) + try: + cfgfile = open(cfile, 'w') + except OSError: + raise bb.build.funcFailed('Unable to open %s' % (cfile)) + + cfgfile.write('# Automatically created by OE\n') + cfgfile.write('default %s\n' % (labels.split()[0])) + timeout = d.getVar('GUMMIBOOT_TIMEOUT', True) + if timeout: + cfgfile.write('timeout %s\n' % timeout) + else: + cfgfile.write('timeout 10\n') + cfgfile.close() + + for label in labels.split(): + localdata = d.createCopy() + + overrides = localdata.getVar('OVERRIDES', True) + if not overrides: + raise bb.build.FuncFailed('OVERRIDES not defined') + + entryfile = "%s/%s.conf" % (s, label) + d.appendVar("GUMMIBOOT_ENTRIES", " " + entryfile) + try: + entrycfg = open(entryfile, "w") + except OSError: + raise bb.build.funcFailed('Unable to open %s' % (entryfile)) + localdata.setVar('OVERRIDES', label + ':' + overrides) + bb.data.update_data(localdata) + + entrycfg.write('title %s\n' % label) + entrycfg.write('linux /vmlinuz\n') + + append = localdata.getVar('APPEND', True) + initrd = localdata.getVar('INITRD', True) + + if initrd: + entrycfg.write('initrd /initrd\n') + lb = label + if label == "install": + lb = "install-efi" + entrycfg.write('options LABEL=%s ' % lb) + if append: + entrycfg.write('%s' % append) + entrycfg.write('\n') + entrycfg.close() +} diff --git a/meta/classes/gzipnative.bbclass b/meta/classes/gzipnative.bbclass new file mode 100644 index 0000000000..326cbbb6f6 --- /dev/null +++ b/meta/classes/gzipnative.bbclass @@ -0,0 +1,5 @@ +EXTRANATIVEPATH += "pigz-native gzip-native" +DEPENDS += "gzip-native" + +# tar may get run by do_unpack or do_populate_lic which could call gzip +do_unpack[depends] += "gzip-native:do_populate_sysroot" diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass new file mode 100644 index 0000000000..2f9e3cf8ef --- /dev/null +++ b/meta/classes/icecc.bbclass @@ -0,0 +1,332 @@ +# IceCream distributed compiling support +# +# Stages directories with symlinks from gcc/g++ to icecc, for both +# native and cross compilers. Depending on each configure or compile, +# the directories are added at the head of the PATH list and ICECC_CXX +# and ICEC_CC are set. +# +# For the cross compiler, creates a tar.gz of our toolchain and sets +# ICECC_VERSION accordingly. +# +# The class now handles all 3 different compile 'stages' (i.e native ,cross-kernel and target) creating the +# necessary environment tar.gz file to be used by the remote machines. +# It also supports meta-toolchain generation +# +# If ICECC_PATH is not set in local.conf then the class will try to locate it using 'bb.utils.which' +# but nothing is sure ;) +# +# If ICECC_ENV_EXEC is set in local.conf, then it should point to the icecc-create-env script provided by the user +# or the default one provided by icecc-create-env.bb will be used +# (NOTE that this is a modified version of the script need it and *not the one that comes with icecc* +# +# User can specify if specific packages or packages belonging to class should not use icecc to distribute +# compile jobs to remote machines, but handled locally, by defining ICECC_USER_CLASS_BL and ICECC_USER_PACKAGE_BL +# with the appropriate values in local.conf. In addition the user can force to enable icecc for packages +# which set an empty PARALLEL_MAKE variable by defining ICECC_USER_PACKAGE_WL. +# +######################################################################################### +#Error checking is kept to minimum so double check any parameters you pass to the class +########################################################################################### + +BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL ICECC_PATH ICECC_ENV_EXEC" + +ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env" + +def icecc_dep_prepend(d): + # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not + # we need that built is the responsibility of the patch function / class, not + # the application. + if not d.getVar('INHIBIT_DEFAULT_DEPS'): + return "icecc-create-env-native" + return "" + +DEPENDS_prepend += "${@icecc_dep_prepend(d)} " + +def get_cross_kernel_cc(bb,d): + kernel_cc = d.getVar('KERNEL_CC') + + # evaluate the expression by the shell if necessary + if '`' in kernel_cc or '$(' in kernel_cc: + kernel_cc = os.popen("echo %s" % kernel_cc).read()[:-1] + + kernel_cc = d.expand(kernel_cc) + kernel_cc = kernel_cc.replace('ccache', '').strip() + kernel_cc = kernel_cc.split(' ')[0] + kernel_cc = kernel_cc.strip() + return kernel_cc + +def get_icecc(d): + return d.getVar('ICECC_PATH') or bb.utils.which(os.getenv("PATH"), "icecc") + +def create_path(compilers, bb, d): + """ + Create Symlinks for the icecc in the staging directory + """ + staging = os.path.join(d.expand('${STAGING_BINDIR}'), "ice") + if icc_is_kernel(bb, d): + staging += "-kernel" + + #check if the icecc path is set by the user + icecc = get_icecc(d) + + # Create the dir if necessary + try: + os.stat(staging) + except: + try: + os.makedirs(staging) + except: + pass + + for compiler in compilers: + gcc_path = os.path.join(staging, compiler) + try: + os.stat(gcc_path) + except: + try: + os.symlink(icecc, gcc_path) + except: + pass + + return staging + +def use_icc(bb,d): + if d.getVar('ICECC_DISABLED') == "1": + # don't even try it, when explicitly disabled + return "no" + + # allarch recipes don't use compiler + if icc_is_allarch(bb, d): + return "no" + + pn = d.getVar('PN', True) + + system_class_blacklist = [] + user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split() + package_class_blacklist = system_class_blacklist + user_class_blacklist + + for black in package_class_blacklist: + if bb.data.inherits_class(black, d): + bb.debug(1, "%s: class %s found in blacklist, disable icecc" % (pn, black)) + return "no" + + # "system" recipe blacklist contains a list of packages that can not distribute compile tasks + # for one reason or the other + # this is the old list (which doesn't seem to be valid anymore, because I was able to build + # all these with icecc enabled) + # system_package_blacklist = [ "uclibc", "glibc", "gcc", "bind", "u-boot", "dhcp-forwarder", "enchant", "connman", "orbit2" ] + # when adding new entry, please document why (how it failed) so that we can re-evaluate it later + # e.g. when there is new version + system_package_blacklist = [] + user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split() + user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL') or "").split() + package_blacklist = system_package_blacklist + user_package_blacklist + + if pn in package_blacklist: + bb.debug(1, "%s: found in blacklist, disable icecc" % pn) + return "no" + + if pn in user_package_whitelist: + bb.debug(1, "%s: found in whitelist, enable icecc" % pn) + return "yes" + + if d.getVar('PARALLEL_MAKE') == "": + bb.debug(1, "%s: has empty PARALLEL_MAKE, disable icecc" % pn) + return "no" + + return "yes" + +def icc_is_allarch(bb, d): + return d.getVar("PACKAGE_ARCH") == "all" + +def icc_is_kernel(bb, d): + return \ + bb.data.inherits_class("kernel", d); + +def icc_is_native(bb, d): + return \ + bb.data.inherits_class("cross", d) or \ + bb.data.inherits_class("native", d); + +# Don't pollute allarch signatures with TARGET_FPU +icc_version[vardepsexclude] += "TARGET_FPU" +def icc_version(bb, d): + if use_icc(bb, d) == "no": + return "" + + parallel = d.getVar('ICECC_PARALLEL_MAKE') or "" + if not d.getVar('PARALLEL_MAKE') == "" and parallel: + d.setVar("PARALLEL_MAKE", parallel) + + if icc_is_native(bb, d): + archive_name = "local-host-env" + elif d.expand('${HOST_PREFIX}') == "": + bb.fatal(d.expand("${PN}"), " NULL prefix") + else: + prefix = d.expand('${HOST_PREFIX}' ) + distro = d.expand('${DISTRO}') + target_sys = d.expand('${TARGET_SYS}') + float = d.getVar('TARGET_FPU') or "hard" + archive_name = prefix + distro + "-" + target_sys + "-" + float + if icc_is_kernel(bb, d): + archive_name += "-kernel" + + import socket + ice_dir = d.expand('${STAGING_DIR_NATIVE}${prefix_native}') + tar_file = os.path.join(ice_dir, 'ice', archive_name + "-@VERSION@-" + socket.gethostname() + '.tar.gz') + + return tar_file + +def icc_path(bb,d): + if use_icc(bb, d) == "no": + # don't create unnecessary directories when icecc is disabled + return + + if icc_is_kernel(bb, d): + return create_path( [get_cross_kernel_cc(bb,d), ], bb, d) + + else: + prefix = d.expand('${HOST_PREFIX}') + return create_path( [prefix+"gcc", prefix+"g++"], bb, d) + +def icc_get_external_tool(bb, d, tool): + external_toolchain_bindir = d.expand('${EXTERNAL_TOOLCHAIN}${bindir_cross}') + target_prefix = d.expand('${TARGET_PREFIX}') + return os.path.join(external_toolchain_bindir, '%s%s' % (target_prefix, tool)) + +# Don't pollute native signatures with target TUNE_PKGARCH through STAGING_BINDIR_TOOLCHAIN +icc_get_tool[vardepsexclude] += "STAGING_BINDIR_TOOLCHAIN" +def icc_get_tool(bb, d, tool): + if icc_is_native(bb, d): + return bb.utils.which(os.getenv("PATH"), tool) + elif icc_is_kernel(bb, d): + return bb.utils.which(os.getenv("PATH"), get_cross_kernel_cc(bb, d)) + else: + ice_dir = d.expand('${STAGING_BINDIR_TOOLCHAIN}') + target_sys = d.expand('${TARGET_SYS}') + tool_bin = os.path.join(ice_dir, "%s-%s" % (target_sys, tool)) + if os.path.isfile(tool_bin): + return tool_bin + else: + external_tool_bin = icc_get_external_tool(bb, d, tool) + if os.path.isfile(external_tool_bin): + return external_tool_bin + else: + return "" + +def icc_get_and_check_tool(bb, d, tool): + # Check that g++ or gcc is not a symbolic link to icecc binary in + # PATH or icecc-create-env script will silently create an invalid + # compiler environment package. + t = icc_get_tool(bb, d, tool) + if t and os.popen("readlink -f %s" % t).read()[:-1] == get_icecc(d): + bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, get_icecc(d))) + return "" + else: + return t + +wait_for_file() { + local TIME_ELAPSED=0 + local FILE_TO_TEST=$1 + local TIMEOUT=$2 + until [ -f "$FILE_TO_TEST" ] + do + TIME_ELAPSED=`expr $TIME_ELAPSED + 1` + if [ $TIME_ELAPSED -gt $TIMEOUT ] + then + return 1 + fi + sleep 1 + done +} + +def set_icecc_env(): + # dummy python version of set_icecc_env + return + +set_icecc_env() { + if [ "${@use_icc(bb, d)}" = "no" ] + then + return + fi + ICECC_VERSION="${@icc_version(bb, d)}" + if [ "x${ICECC_VERSION}" = "x" ] + then + bbwarn "Cannot use icecc: could not get ICECC_VERSION" + return + fi + + ICE_PATH="${@icc_path(bb, d)}" + if [ "x${ICE_PATH}" = "x" ] + then + bbwarn "Cannot use icecc: could not get ICE_PATH" + return + fi + + ICECC_CC="${@icc_get_and_check_tool(bb, d, "gcc")}" + ICECC_CXX="${@icc_get_and_check_tool(bb, d, "g++")}" + # cannot use icc_get_and_check_tool here because it assumes as without target_sys prefix + ICECC_WHICH_AS="${@bb.utils.which(os.getenv('PATH'), 'as')}" + if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ] + then + bbwarn "Cannot use icecc: could not get ICECC_CC or ICECC_CXX" + return + fi + + ICE_VERSION=`$ICECC_CC -dumpversion` + ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"` + if [ ! -x "${ICECC_ENV_EXEC}" ] + then + bbwarn "Cannot use icecc: invalid ICECC_ENV_EXEC" + return + fi + + ICECC_AS="`${ICECC_CC} -print-prog-name=as`" + # for target recipes should return something like: + # /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as + # and just "as" for native, if it returns "as" in current directory (for whatever reason) use "as" from PATH + if [ "`dirname "${ICECC_AS}"`" = "." ] + then + ICECC_AS="${ICECC_WHICH_AS}" + fi + + if [ ! -f "${ICECC_VERSION}.done" ] + then + mkdir -p "`dirname "${ICECC_VERSION}"`" + + # the ICECC_VERSION generation step must be locked by a mutex + # in order to prevent race conditions + if flock -n "${ICECC_VERSION}.lock" \ + ${ICECC_ENV_EXEC} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}" + then + touch "${ICECC_VERSION}.done" + elif [ ! wait_for_file "${ICECC_VERSION}.done" 30 ] + then + # locking failed so wait for ${ICECC_VERSION}.done to appear + bbwarn "Timeout waiting for ${ICECC_VERSION}.done" + return + fi + fi + + export ICECC_VERSION ICECC_CC ICECC_CXX + export PATH="$ICE_PATH:$PATH" + export CCACHE_PATH="$PATH" + + bbnote "Using icecc" +} + +do_configure_prepend() { + set_icecc_env +} + +do_compile_prepend() { + set_icecc_env +} + +do_compile_kernelmodules_prepend() { + set_icecc_env +} + +do_install_prepend() { + set_icecc_env +} diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass new file mode 100644 index 0000000000..7b770fb353 --- /dev/null +++ b/meta/classes/image-live.bbclass @@ -0,0 +1,18 @@ + +AUTO_SYSLINUXCFG = "1" +INITRD_IMAGE ?= "core-image-minimal-initramfs" +INITRD ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE}-${MACHINE}.cpio.gz" +SYSLINUX_ROOT = "root=/dev/ram0" +SYSLINUX_TIMEOUT ?= "10" +SYSLINUX_LABELS ?= "boot install" +LABELS_append = " ${SYSLINUX_LABELS} " + +ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext3" + +do_bootimg[depends] += "${INITRD_IMAGE}:do_rootfs" +do_bootimg[depends] += "${PN}:do_rootfs" + +inherit bootimg + +IMAGE_TYPEDEP_live = "ext3" +IMAGE_TYPES_MASKED += "live" diff --git a/meta/classes/image-mklibs.bbclass b/meta/classes/image-mklibs.bbclass new file mode 100644 index 0000000000..c455a8e2d4 --- /dev/null +++ b/meta/classes/image-mklibs.bbclass @@ -0,0 +1,71 @@ +do_rootfs[depends] += "mklibs-native:do_populate_sysroot" + +IMAGE_PREPROCESS_COMMAND += "mklibs_optimize_image; " + +mklibs_optimize_image_doit() { + rm -rf ${WORKDIR}/mklibs + mkdir -p ${WORKDIR}/mklibs/dest + cd ${IMAGE_ROOTFS} + du -bs > ${WORKDIR}/mklibs/du.before.mklibs.txt + for i in `find .`; do file $i; done \ + | grep ELF \ + | grep "LSB *executable" \ + | grep "dynamically linked" \ + | sed "s/:.*//" \ + | sed "s+^\./++" \ + > ${WORKDIR}/mklibs/executables.list + + case ${TARGET_ARCH} in + powerpc | mips | mipsel | microblaze ) + dynamic_loader="${base_libdir}/ld.so.1" + ;; + powerpc64) + dynamic_loader="${base_libdir}/ld64.so.1" + ;; + x86_64) + dynamic_loader="${base_libdir}/ld-linux-x86-64.so.2" + ;; + i586 ) + dynamic_loader="${base_libdir}/ld-linux.so.2" + ;; + arm ) + dynamic_loader="${base_libdir}/ld-linux.so.3" + ;; + * ) + dynamic_loader="/unknown_dynamic_linker" + ;; + esac + + mklibs -v \ + --ldlib ${dynamic_loader} \ + --libdir ${baselib} \ + --sysroot ${PKG_CONFIG_SYSROOT_DIR} \ + --gcc-options "--sysroot=${PKG_CONFIG_SYSROOT_DIR}" \ + --root ${IMAGE_ROOTFS} \ + --target `echo ${TARGET_PREFIX} | sed 's/-$//' ` \ + -d ${WORKDIR}/mklibs/dest \ + `cat ${WORKDIR}/mklibs/executables.list` + + cd ${WORKDIR}/mklibs/dest + for i in * + do + cp $i `find ${IMAGE_ROOTFS} -name $i` + done + + cd ${IMAGE_ROOTFS} + du -bs > ${WORKDIR}/mklibs/du.after.mklibs.txt + + echo rootfs size before mklibs optimization: `cat ${WORKDIR}/mklibs/du.before.mklibs.txt` + echo rootfs size after mklibs optimization: `cat ${WORKDIR}/mklibs/du.after.mklibs.txt` +} + +mklibs_optimize_image() { + for img in ${MKLIBS_OPTIMIZED_IMAGES} + do + if [ "${img}" = "${PN}" ] || [ "${img}" = "all" ] + then + mklibs_optimize_image_doit + break + fi + done +} diff --git a/meta/classes/image-prelink.bbclass b/meta/classes/image-prelink.bbclass new file mode 100644 index 0000000000..d4bb3aec39 --- /dev/null +++ b/meta/classes/image-prelink.bbclass @@ -0,0 +1,33 @@ +do_rootfs[depends] += "prelink-native:do_populate_sysroot" + +IMAGE_PREPROCESS_COMMAND += "prelink_image; " + +prelink_image () { +# export PSEUDO_DEBUG=4 +# /bin/env | /bin/grep PSEUDO +# echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" +# echo "LD_PRELOAD=$LD_PRELOAD" + + pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'` + echo "Size before prelinking $pre_prelink_size." + + # We need a prelink conf on the filesystem, add one if it's missing + if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then + cp ${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf \ + ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf + dummy_prelink_conf=true; + else + dummy_prelink_conf=false; + fi + + # prelink! + ${STAGING_DIR_NATIVE}${sbindir_native}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf + + # Remove the prelink.conf if we had to add it. + if [ "$dummy_prelink_conf" = "true" ]; then + rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf + fi + + pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'` + echo "Size after prelinking $pre_prelink_size." +} diff --git a/meta/classes/image-swab.bbclass b/meta/classes/image-swab.bbclass new file mode 100644 index 0000000000..89318560db --- /dev/null +++ b/meta/classes/image-swab.bbclass @@ -0,0 +1,94 @@ +HOST_DATA ?= "${TMPDIR}/host-contamination-data/" +SWABBER_REPORT ?= "${LOG_DIR}/swabber/" +SWABBER_LOGS ?= "${LOG_DIR}/contamination-logs" +TRACE_LOGDIR ?= "${SWABBER_LOGS}/${PACKAGE_ARCH}" +TRACE_LOGFILE = "${TRACE_LOGDIR}/${PN}-${PV}" + +SWAB_ORIG_TASK := "${BB_DEFAULT_TASK}" +BB_DEFAULT_TASK = "generate_swabber_report" + +# Several recipes don't build with parallel make when run under strace +# Ideally these should be fixed but as a temporary measure disable parallel +# builds for troublesome recipes +PARALLEL_MAKE_pn-openssl = "" +PARALLEL_MAKE_pn-glibc = "" +PARALLEL_MAKE_pn-glib-2.0 = "" +PARALLEL_MAKE_pn-libxml2 = "" +PARALLEL_MAKE_pn-readline = "" +PARALLEL_MAKE_pn-util-linux = "" +PARALLEL_MAKE_pn-binutils = "" +PARALLEL_MAKE_pn-bison = "" +PARALLEL_MAKE_pn-cmake = "" +PARALLEL_MAKE_pn-elfutils = "" +PARALLEL_MAKE_pn-gcc = "" +PARALLEL_MAKE_pn-gcc-runtime = "" +PARALLEL_MAKE_pn-m4 = "" +PARALLEL_MAKE_pn-opkg = "" +PARALLEL_MAKE_pn-pkgconfig = "" +PARALLEL_MAKE_pn-prelink = "" +PARALLEL_MAKE_pn-rpm = "" +PARALLEL_MAKE_pn-tcl = "" +PARALLEL_MAKE_pn-beecrypt = "" +PARALLEL_MAKE_pn-curl = "" +PARALLEL_MAKE_pn-gmp = "" +PARALLEL_MAKE_pn-libmpc = "" +PARALLEL_MAKE_pn-libxslt = "" +PARALLEL_MAKE_pn-lzo = "" +PARALLEL_MAKE_pn-popt = "" +PARALLEL_MAKE_pn-linux-wrs = "" +PARALLEL_MAKE_pn-libgcrypt = "" +PARALLEL_MAKE_pn-gpgme = "" +PARALLEL_MAKE_pn-udev = "" +PARALLEL_MAKE_pn-gnutls = "" + +python() { + # NOTE: It might be useful to detect host infection on native and cross + # packages but as it turns out to be pretty hard to do this for all native + # and cross packages which aren't swabber-native or one of its dependencies + # I have ignored them for now... + if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('nativesdk', d) and not bb.data.inherits_class('cross', d): + deps = (d.getVarFlag('do_setscene', 'depends') or "").split() + deps.append('strace-native:do_populate_sysroot') + d.setVarFlag('do_setscene', 'depends', " ".join(deps)) + logdir = d.expand("${TRACE_LOGDIR}") + bb.utils.mkdirhier(logdir) + else: + d.setVar('STRACEFUNC', '') +} + +STRACEPID = "${@os.getpid()}" +STRACEFUNC = "imageswab_attachstrace" + +do_configure[prefuncs] += "${STRACEFUNC}" +do_compile[prefuncs] += "${STRACEFUNC}" + +imageswab_attachstrace () { + STRACE=`which strace` + + if [ -x "$STRACE" ]; then + swabber-strace-attach "$STRACE -f -o ${TRACE_LOGFILE}-${BB_CURRENTTASK}.log -e trace=open,execve -p ${STRACEPID}" "${TRACE_LOGFILE}-traceattach-${BB_CURRENTTASK}.log" + fi +} + +do_generate_swabber_report () { + + update_distro ${HOST_DATA} + + # Swabber can't create the directory for us + mkdir -p ${SWABBER_REPORT} + + REPORTSTAMP=${SWAB_ORIG_TASK}-`date +%2m%2d%2H%2M%Y` + + if [ `which ccache` ] ; then + CCACHE_DIR=`( ccache -s | grep "cache directory" | grep -o '[^ ]*$' 2> /dev/null )` + fi + + if [ "$(ls -A ${HOST_DATA})" ]; then + echo "Generating swabber report" + swabber -d ${HOST_DATA} -l ${SWABBER_LOGS} -o ${SWABBER_REPORT}/report-${REPORTSTAMP}.txt -r ${SWABBER_REPORT}/extra_report-${REPORTSTAMP}.txt -c all -p ${TOPDIR} -f ${OEROOT}/meta/conf/swabber ${TOPDIR} ${OEROOT} ${CCACHE_DIR} + else + echo "No host data, cannot generate swabber report." + fi +} +addtask generate_swabber_report after do_${SWAB_ORIG_TASK} +do_generate_swabber_report[depends] = "swabber-native:do_populate_sysroot" diff --git a/meta/classes/image-vmdk.bbclass b/meta/classes/image-vmdk.bbclass new file mode 100644 index 0000000000..77b7facd41 --- /dev/null +++ b/meta/classes/image-vmdk.bbclass @@ -0,0 +1,35 @@ + +#NOISO = "1" + +SYSLINUX_ROOT ?= "root=/dev/sda2" +SYSLINUX_PROMPT ?= "0" +SYSLINUX_TIMEOUT ?= "10" +SYSLINUX_LABELS = "boot" +LABELS_append = " ${SYSLINUX_LABELS} " + +# need to define the dependency and the ROOTFS for directdisk +do_bootdirectdisk[depends] += "${PN}:do_rootfs" +ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext3" + +# creating VMDK relies on having a live hddimg so ensure we +# inherit it here. +#inherit image-live +inherit boot-directdisk + +IMAGE_TYPEDEP_vmdk = "ext3" +IMAGE_TYPES_MASKED += "vmdk" + +create_vmdk_image () { + qemu-img convert -O vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.vmdk + ln -sf ${IMAGE_NAME}.vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.vmdk +} + +python do_vmdkimg() { + bb.build.exec_func('create_vmdk_image', d) +} + +#addtask vmdkimg after do_bootimg before do_build +addtask vmdkimg after do_bootdirectdisk before do_build + +do_vmdkimg[depends] += "qemu-native:do_populate_sysroot" + diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass new file mode 100644 index 0000000000..1c0fda7d60 --- /dev/null +++ b/meta/classes/image.bbclass @@ -0,0 +1,448 @@ +inherit rootfs_${IMAGE_PKGTYPE} + +inherit populate_sdk_base + +TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}" +TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}" +POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; " + +inherit gzipnative + +LICENSE = "MIT" +PACKAGES = "" +DEPENDS += "${MLPREFIX}qemuwrapper-cross ${MLPREFIX}depmodwrapper-cross" +RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL}" +RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}" + +INHIBIT_DEFAULT_DEPS = "1" + +TESTIMAGECLASS = "${@base_conditional('TEST_IMAGE', '1', 'testimage-auto', '', d)}" +inherit ${TESTIMAGECLASS} + +# IMAGE_FEATURES may contain any available package group +IMAGE_FEATURES ?= "" +IMAGE_FEATURES[type] = "list" +IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs" + +# rootfs bootstrap install +ROOTFS_BOOTSTRAP_INSTALL = "${@bb.utils.contains("IMAGE_FEATURES", "package-management", "", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)}" + +# packages to install from features +FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}" +FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}" + +# Define some very basic feature package groups +FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}" +SPLASH ?= "psplash" +FEATURE_PACKAGES_splash = "${SPLASH}" + +IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}' + +def check_image_features(d): + valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems', True) or "").split() + valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys() + for var in d: + if var.startswith("PACKAGE_GROUP_"): + bb.warn("PACKAGE_GROUP is deprecated, please use FEATURE_PACKAGES instead") + valid_features.append(var[14:]) + elif var.startswith("FEATURE_PACKAGES_"): + valid_features.append(var[17:]) + valid_features.sort() + + features = set(oe.data.typed_value('IMAGE_FEATURES', d)) + for feature in features: + if feature not in valid_features: + bb.fatal("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features))) + +IMAGE_INSTALL ?= "" +IMAGE_INSTALL[type] = "list" +export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}" +PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}" + +# Images are generally built explicitly, do not need to be part of world. +EXCLUDE_FROM_WORLD = "1" + +USE_DEVFS ?= "1" + +PID = "${@os.getpid()}" + +PACKAGE_ARCH = "${MACHINE_ARCH}" + +LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot" +LDCONFIGDEPEND_libc-uclibc = "" +LDCONFIGDEPEND_libc-musl = "" + +do_rootfs[depends] += "makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND}" +do_rootfs[depends] += "virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot" +do_rootfs[recrdeptask] += "do_packagedata" + +def command_variables(d): + return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND', + 'IMAGE_PREPROCESS_COMMAND','ROOTFS_POSTPROCESS_COMMAND','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS', + 'RPM_POSTPROCESS_COMMANDS'] + +python () { + variables = command_variables(d) + for var in variables: + if d.getVar(var): + d.setVarFlag(var, 'func', '1') +} + +def rootfs_variables(d): + from oe.rootfs import variable_depends + variables = ['IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPEDEP_','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE', + 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','RM_OLD_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS','SDK_OS', + 'SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT','SDKTARGETSYSROOT','MULTILIBRE_ALLOW_REP', + 'MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS', + 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','BUILDNAME','USE_DEVFS', + 'STAGING_KERNEL_DIR','COMPRESSIONTYPES'] + variables.extend(command_variables(d)) + variables.extend(variable_depends(d)) + return " ".join(variables) + +do_rootfs[vardeps] += "${@rootfs_variables(d)}" + +do_build[depends] += "virtual/kernel:do_deploy" + +def build_live(d): + if bb.utils.contains("IMAGE_FSTYPES", "live", "live", "0", d) == "0": # live is not set but hob might set iso or hddimg + d.setVar('NOISO', bb.utils.contains('IMAGE_FSTYPES', "iso", "0", "1", d)) + d.setVar('NOHDD', bb.utils.contains('IMAGE_FSTYPES', "hddimg", "0", "1", d)) + if d.getVar('NOISO', True) == "0" or d.getVar('NOHDD', True) == "0": + return "image-live" + return "" + return "image-live" + +IMAGE_TYPE_live = "${@build_live(d)}" + +inherit ${IMAGE_TYPE_live} +IMAGE_TYPE_vmdk = '${@bb.utils.contains("IMAGE_FSTYPES", "vmdk", "image-vmdk", "", d)}' +inherit ${IMAGE_TYPE_vmdk} + +python () { + deps = " " + imagetypes_getdepends(d) + d.appendVarFlag('do_rootfs', 'depends', deps) + + deps = "" + for dep in (d.getVar('EXTRA_IMAGEDEPENDS', True) or "").split(): + deps += " %s:do_populate_sysroot" % dep + d.appendVarFlag('do_build', 'depends', deps) + + #process IMAGE_FEATURES, we must do this before runtime_mapping_rename + #Check for replaces image features + features = set(oe.data.typed_value('IMAGE_FEATURES', d)) + remain_features = features.copy() + for feature in features: + replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature, True) or "").split()) + remain_features -= replaces + + #Check for conflict image features + for feature in remain_features: + conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature, True) or "").split()) + temp = conflicts & remain_features + if temp: + bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN', True), feature, ' '.join(list(temp)))) + + d.setVar('IMAGE_FEATURES', ' '.join(list(remain_features))) + + check_image_features(d) + initramfs_image = d.getVar('INITRAMFS_IMAGE', True) or "" + if initramfs_image != "": + d.appendVarFlag('do_build', 'depends', " %s:do_bundle_initramfs" % d.getVar('PN', True)) + d.appendVarFlag('do_bundle_initramfs', 'depends', " %s:do_rootfs" % initramfs_image) +} + +IMAGE_CLASSES += "image_types" +inherit ${IMAGE_CLASSES} + +IMAGE_POSTPROCESS_COMMAND ?= "" +MACHINE_POSTPROCESS_COMMAND ?= "" +# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks is enabled +ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "debug-tweaks", "ssh_allow_empty_password; ", "",d)}' +# Enable postinst logging if debug-tweaks is enabled +ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "debug-tweaks", "postinst_enable_logging; ", "",d)}' +# Write manifest +IMAGE_MANIFEST = "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest" +ROOTFS_POSTPROCESS_COMMAND =+ "write_image_manifest ; " +# Set default postinst log file +POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log" +# Set default target for systemd images +SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}' +ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; ", "", d)}' + +# some default locales +IMAGE_LINGUAS ?= "de-de fr-fr en-gb" + +LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS', True).split()))}" + +# Prefer image, but use the fallback files for lookups if the image ones +# aren't yet available. +PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}" + +do_rootfs[dirs] = "${TOPDIR}" +do_rootfs[lockfiles] += "${IMAGE_ROOTFS}.lock" +do_rootfs[cleandirs] += "${S}" + +# Must call real_do_rootfs() from inside here, rather than as a separate +# task, so that we have a single fakeroot context for the whole process. +do_rootfs[umask] = "022" + +# A hook function to support read-only-rootfs IMAGE_FEATURES +# Currently, it only supports sysvinit system. +read_only_rootfs_hook () { + # Tweak the mount option and fs_passno for rootfs in fstab + sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab + + if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then + # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes + if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then + sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS + fi + # Run populate-volatile.sh at rootfs time to set up basic files + # and directories to support read-only rootfs. + if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then + ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh + fi + # If we're using openssh and the /etc/ssh directory has no pre-generated keys, + # we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly + # and the keys under /var/run/ssh. + if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then + if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then + echo "SYSCONFDIR=/etc/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh + echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh + else + echo "SYSCONFDIR=/var/run/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh + echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh + fi + fi + fi + + if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then + # Update user database files so that services don't fail for a read-only systemd system + for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do + [ -e $conffile ] || continue + grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do + if [ "$type" = "u" ]; then + useradd_params="" + [ "$id" != "-" ] && useradd_params="$useradd_params --uid $id" + [ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment" + useradd_params="$useradd_params --system $name" + eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true + elif [ "$type" = "g" ]; then + groupadd_params="" + [ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id" + groupadd_params="$groupadd_params --system $name" + eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true + fi + done + done + fi +} + +PACKAGE_EXCLUDE ??= "" +PACKAGE_EXCLUDE[type] = "list" + +python rootfs_process_ignore() { + excl_pkgs = d.getVar("PACKAGE_EXCLUDE", True).split() + inst_pkgs = d.getVar("PACKAGE_INSTALL", True).split() + inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY", True).split() + + d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs)) + d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs)) + + for pkg in excl_pkgs: + if pkg in inst_pkgs: + bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs)) + inst_pkgs.remove(pkg) + + if pkg in inst_attempt_pkgs: + bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs)) + inst_attempt_pkgs.remove(pkg) + + d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs)) + d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs)) +} +do_rootfs[prefuncs] += "rootfs_process_ignore" + +# We have to delay the runtime_mapping_rename until just before rootfs runs +# otherwise, the multilib renaming could step in and squash any fixups that +# may have occurred. +python rootfs_runtime_mapping() { + pn = d.getVar('PN', True) + runtime_mapping_rename("PACKAGE_INSTALL", pn, d) + runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d) + runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d) +} +do_rootfs[prefuncs] += "rootfs_runtime_mapping" + +fakeroot python do_rootfs () { + from oe.rootfs import create_rootfs + from oe.image import create_image + from oe.manifest import create_manifest + + # generate the initial manifest + create_manifest(d) + + # generate rootfs + create_rootfs(d) + + # generate final images + create_image(d) +} + +insert_feed_uris () { + + echo "Building feeds for [${DISTRO}].." + + for line in ${FEED_URIS} + do + # strip leading and trailing spaces/tabs, then split into name and uri + line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`" + feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`" + feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`" + + echo "Added $feed_name feed with URL $feed_uri" + + # insert new feed-sources + echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/opkg/${feed_name}-feed.conf + done +} + +MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|${sysconfdir}|${nonarch_base_libdir}/udev|/lib/modules/[^/]*/modules.*|" +MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py" +MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib" + +# This function is intended to disallow empty root password if 'debug-tweaks' is not in IMAGE_FEATURES. +zap_empty_root_password () { + if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then + sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow + elif [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then + sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd + fi +} + +# allow dropbear/openssh to accept root logins and logins from accounts with an empty password string +ssh_allow_empty_password () { + if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then + sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config + sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config + fi + + if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then + if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then + if ! grep -q "DROPBEAR_EXTRA_ARGS=.*-B" ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear ; then + sed -i 's/^DROPBEAR_EXTRA_ARGS="*\([^"]*\)"*/DROPBEAR_EXTRA_ARGS="\1 -B"/' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear + fi + else + printf '\nDROPBEAR_EXTRA_ARGS="-B"\n' >> ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear + fi + fi + + if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then + sed -i 's/nullok_secure/nullok/' ${IMAGE_ROOTFS}${sysconfdir}/pam.d/* + fi +} + +# Disable DNS lookups, the SSH_DISABLE_DNS_LOOKUP can be overridden to allow +# distros to choose not to take this change +SSH_DISABLE_DNS_LOOKUP ?= " ssh_disable_dns_lookup ; " +ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}" +ssh_disable_dns_lookup () { + if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then + sed -i -e 's:#UseDNS yes:UseDNS no:' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config + fi +} + +# Enable postinst logging if debug-tweaks is enabled +postinst_enable_logging () { + mkdir -p ${IMAGE_ROOTFS}${sysconfdir}/default + echo "POSTINST_LOGGING=1" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst + echo "LOGFILE=${POSTINST_LOGFILE}" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst +} + +# Modify systemd default target +set_systemd_default_target () { + if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ]; then + ln -sf ${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target + fi +} + +# Turn any symbolic /sbin/init link into a file +remove_init_link () { + if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then + LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init` + rm ${IMAGE_ROOTFS}/sbin/init + cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init + fi +} + +make_zimage_symlink_relative () { + if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then + (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done) + fi +} + +python write_image_manifest () { + from oe.rootfs import image_list_installed_packages + with open(d.getVar('IMAGE_MANIFEST', True), 'w+') as image_manifest: + image_manifest.write(image_list_installed_packages(d, 'ver')) +} + +# Make login manager(s) enable automatic login. +# Useful for devices where we do not want to log in at all (e.g. phones) +set_image_autologin () { + sed -i 's%^AUTOLOGIN=\"false"%AUTOLOGIN="true"%g' ${IMAGE_ROOTFS}/etc/sysconfig/gpelogin +} + +# Can be use to create /etc/timestamp during image construction to give a reasonably +# sane default time setting +rootfs_update_timestamp () { + date -u +%4Y%2m%2d%2H%2M%2S >${IMAGE_ROOTFS}/etc/timestamp +} + +# Prevent X from being started +rootfs_no_x_startup () { + if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then + chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm + fi +} + +rootfs_trim_schemas () { + for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas + do + # Need this in case no files exist + if [ -e $schema ]; then + oe-trim-schemas $schema > $schema.new + mv $schema.new $schema + fi + done +} + +# Make any absolute links in a sysroot relative +rootfs_sysroot_relativelinks () { + sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT} +} + +do_fetch[noexec] = "1" +do_unpack[noexec] = "1" +do_patch[noexec] = "1" +do_configure[noexec] = "1" +do_compile[noexec] = "1" +do_install[noexec] = "1" +do_populate_sysroot[noexec] = "1" +do_package[noexec] = "1" +do_package_qa[noexec] = "1" +do_packagedata[noexec] = "1" +do_package_write_ipk[noexec] = "1" +do_package_write_deb[noexec] = "1" +do_package_write_rpm[noexec] = "1" + +addtask rootfs before do_build +# Allow the kernel to be repacked with the initramfs and boot image file as a single file +do_bundle_initramfs[depends] += "virtual/kernel:do_bundle_initramfs" +do_bundle_initramfs[nostamp] = "1" +do_bundle_initramfs[noexec] = "1" +do_bundle_initramfs () { + : +} +addtask bundle_initramfs after do_rootfs diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass new file mode 100644 index 0000000000..c7da4c3ed8 --- /dev/null +++ b/meta/classes/image_types.bbclass @@ -0,0 +1,163 @@ + +# The default aligment of the size of the rootfs is set to 1KiB. In case +# you're using the SD card emulation of a QEMU system simulator you may +# set this value to 2048 (2MiB alignment). +IMAGE_ROOTFS_ALIGNMENT ?= "1" + +def imagetypes_getdepends(d): + def adddep(depstr, deps): + for i in (depstr or "").split(): + if i not in deps: + deps.append(i) + + deps = [] + ctypes = d.getVar('COMPRESSIONTYPES', True).split() + for type in (d.getVar('IMAGE_FSTYPES', True) or "").split(): + if type == "vmdk" or type == "live" or type == "iso" or type == "hddimg": + type = "ext3" + basetype = type + for ctype in ctypes: + if type.endswith("." + ctype): + basetype = type[:-len("." + ctype)] + adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps) + break + adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps) + + depstr = "" + for dep in deps: + depstr += " " + dep + ":do_populate_sysroot" + return depstr + + +XZ_COMPRESSION_LEVEL ?= "-e -6" +XZ_INTEGRITY_CHECK ?= "crc32" +XZ_THREADS ?= "-T 0" + +JFFS2_SUM_EXTRA_ARGS ?= "" +IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 ${EXTRA_IMAGECMD}" + +IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cramfs ${EXTRA_IMAGECMD}" + +oe_mkext234fs () { + fstype=$1 + extra_imagecmd="" + + if [ $# -gt 1 ]; then + shift + extra_imagecmd=$@ + fi + + # Create a sparse image block + dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$fstype seek=$ROOTFS_SIZE count=0 bs=1k + mkfs.$fstype -F $extra_imagecmd ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$fstype -d ${IMAGE_ROOTFS} +} + +IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}" +IMAGE_CMD_ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}" +IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}" + +IMAGE_CMD_btrfs () { + touch ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs + mkfs.btrfs -b `expr ${ROOTFS_SIZE} \* 1024` ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs +} + +IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs ${EXTRA_IMAGECMD} -noappend" +IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz" +IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo" +IMAGE_CMD_tar = "tar -cvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.tar -C ${IMAGE_ROOTFS} ." + +do_rootfs[cleandirs] += "${WORKDIR}/cpio_append" +IMAGE_CMD_cpio () { + (cd ${IMAGE_ROOTFS} && find . | cpio -o -H newc >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio) + if [ ! -L ${IMAGE_ROOTFS}/init -a ! -e ${IMAGE_ROOTFS}/init ]; then + if [ -L ${IMAGE_ROOTFS}/sbin/init -o -e ${IMAGE_ROOTFS}/sbin/init ]; then + ln -sf /sbin/init ${WORKDIR}/cpio_append/init + else + touch ${WORKDIR}/cpio_append/init + fi + (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio) + fi +} + +ELF_KERNEL ?= "${STAGING_DIR_HOST}/usr/src/kernel/${KERNEL_IMAGETYPE}" +ELF_APPEND ?= "ramdisk_size=32768 root=/dev/ram0 rw console=" + +IMAGE_CMD_elf () { + test -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf && rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf + mkelfImage --kernel=${ELF_KERNEL} --initrd=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio.gz --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD} +} +IMAGE_TYPEDEP_elf = "cpio.gz" + +UBI_VOLNAME ?= "${MACHINE}-rootfs" + +IMAGE_CMD_ubi () { + echo \[ubifs\] > ubinize.cfg + echo mode=ubi >> ubinize.cfg + echo image=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs >> ubinize.cfg + echo vol_id=0 >> ubinize.cfg + echo vol_type=dynamic >> ubinize.cfg + echo vol_name=${UBI_VOLNAME} >> ubinize.cfg + echo vol_flags=autoresize >> ubinize.cfg + mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS} + ubinize -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubi ${UBINIZE_ARGS} ubinize.cfg +} +IMAGE_TYPEDEP_ubi = "ubifs" + +IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS}" + +EXTRA_IMAGECMD = "" + +inherit siteinfo +JFFS2_ENDIANNESS ?= "${@base_conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}" +JFFS2_ERASEBLOCK ?= "0x40000" +EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers" + +# Change these if you want default mkfs behavior (i.e. create minimal inode number) +EXTRA_IMAGECMD_ext2 ?= "-i 4096" +EXTRA_IMAGECMD_ext3 ?= "-i 4096" +EXTRA_IMAGECMD_ext4 ?= "-i 4096" +EXTRA_IMAGECMD_btrfs ?= "" +EXTRA_IMAGECMD_elf ?= "" + +IMAGE_DEPENDS = "" +IMAGE_DEPENDS_jffs2 = "mtd-utils-native" +IMAGE_DEPENDS_cramfs = "util-linux-native" +IMAGE_DEPENDS_ext2 = "e2fsprogs-native" +IMAGE_DEPENDS_ext3 = "e2fsprogs-native" +IMAGE_DEPENDS_ext4 = "e2fsprogs-native" +IMAGE_DEPENDS_btrfs = "btrfs-tools-native" +IMAGE_DEPENDS_squashfs = "squashfs-tools-native" +IMAGE_DEPENDS_squashfs-xz = "squashfs-tools-native" +IMAGE_DEPENDS_squashfs-lzo = "squashfs-tools-native" +IMAGE_DEPENDS_elf = "virtual/kernel mkelfimage-native" +IMAGE_DEPENDS_ubi = "mtd-utils-native" +IMAGE_DEPENDS_ubifs = "mtd-utils-native" + +# This variable is available to request which values are suitable for IMAGE_FSTYPES +IMAGE_TYPES = "jffs2 jffs2.sum cramfs ext2 ext2.gz ext2.bz2 ext3 ext3.gz ext2.lzma btrfs iso hddimg squashfs squashfs-xz squashfs-lzo ubi ubifs tar tar.gz tar.bz2 tar.xz tar.lz4 cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 vmdk elf" + +COMPRESSIONTYPES = "gz bz2 lzma xz lz4 sum" +COMPRESS_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}.rootfs.${type}" +COMPRESS_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.gz" +COMPRESS_CMD_bz2 = "bzip2 -f -k ${IMAGE_NAME}.rootfs.${type}" +COMPRESS_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.xz" +COMPRESS_CMD_lz4 = "lz4c -9 -c ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.lz4" +COMPRESS_CMD_sum = "sumtool -i ${IMAGE_NAME}.rootfs.${type} -o ${IMAGE_NAME}.rootfs.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}" +COMPRESS_DEPENDS_lzma = "xz-native" +COMPRESS_DEPENDS_gz = "" +COMPRESS_DEPENDS_bz2 = "" +COMPRESS_DEPENDS_xz = "xz-native" +COMPRESS_DEPENDS_lz4 = "lz4-native" +COMPRESS_DEPENDS_sum = "mtd-utils-native" + +RUNNABLE_IMAGE_TYPES ?= "ext2 ext3" +RUNNABLE_MACHINE_PATTERNS ?= "qemu" + +DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso" + +# Use IMAGE_EXTENSION_xxx to map image type 'xxx' with real image file extension name(s) for Hob +IMAGE_EXTENSION_live = "hddimg iso" + +# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES, +# images that will not be built at do_rootfs time: vmdk, hddimg, iso, etc. +IMAGE_TYPES_MASKED ?= "" diff --git a/meta/classes/image_types_uboot.bbclass b/meta/classes/image_types_uboot.bbclass new file mode 100644 index 0000000000..07837b566c --- /dev/null +++ b/meta/classes/image_types_uboot.bbclass @@ -0,0 +1,23 @@ +inherit image_types kernel-arch + +oe_mkimage () { + mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C $2 -n ${IMAGE_NAME} \ + -d ${DEPLOY_DIR_IMAGE}/$1 ${DEPLOY_DIR_IMAGE}/$1.u-boot +} + +COMPRESSIONTYPES += "gz.u-boot bz2.u-boot lzma.u-boot u-boot" + +COMPRESS_DEPENDS_u-boot = "u-boot-mkimage-native" +COMPRESS_CMD_u-boot = "oe_mkimage ${IMAGE_NAME}.rootfs.${type} none" + +COMPRESS_DEPENDS_gz.u-boot = "u-boot-mkimage-native" +COMPRESS_CMD_gz.u-boot = "${COMPRESS_CMD_gz}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.gz gzip" + +COMPRESS_DEPENDS_bz2.u-boot = "u-boot-mkimage-native" +COMPRESS_CMD_bz2.u-boot = "${COMPRESS_CMD_bz2}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.bz2 bzip2" + +COMPRESS_DEPENDS_lzma.u-boot = "u-boot-mkimage-native" +COMPRESS_CMD_lzma.u-boot = "${COMPRESS_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma" + +IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot" + diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass new file mode 100644 index 0000000000..c6dea22618 --- /dev/null +++ b/meta/classes/insane.bbclass @@ -0,0 +1,1153 @@ +# BB Class inspired by ebuild.sh +# +# This class will test files after installation for certain +# security issues and other kind of issues. +# +# Checks we do: +# -Check the ownership and permissions +# -Check the RUNTIME path for the $TMPDIR +# -Check if .la files wrongly point to workdir +# -Check if .pc files wrongly point to workdir +# -Check if packages contains .debug directories or .so files +# where they should be in -dev or -dbg +# -Check if config.log contains traces to broken autoconf tests +# -Ensure that binaries in base_[bindir|sbindir|libdir] do not link +# into exec_prefix +# -Check that scripts in base_[bindir|sbindir|libdir] do not reference +# files under exec_prefix + + +# unsafe-references-in-binaries requires prelink-rtld from +# prelink-native, but we don't want this DEPENDS for -native builds +QADEPENDS = "prelink-native" +QADEPENDS_class-native = "" +QADEPENDS_class-nativesdk = "" +QA_SANE = "True" + +# Elect whether a given type of error is a warning or error, they may +# have been set by other files. +WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \ + textrel already-stripped incompatible-license files-invalid \ + installed-vs-shipped compile-host-path install-host-path \ + pn-overrides infodir build-deps file-rdeps \ + " +ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \ + perms dep-cmp pkgvarcheck perm-config perm-line perm-link \ + split-strip packages-list pkgv-undefined var-undefined \ + version-going-backwards \ + " + +ALL_QA = "${WARN_QA} ${ERROR_QA}" + +UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot" + +# +# dictionary for elf headers +# +# feel free to add and correct. +# +# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit? +def package_qa_get_machine_dict(): + return { + "darwin9" : { + "arm" : (40, 0, 0, True, 32), + }, + "linux" : { + "aarch64" : (183, 0, 0, True, 64), + "aarch64_be" :(183, 0, 0, False, 64), + "arm" : (40, 97, 0, True, 32), + "armeb": (40, 97, 0, False, 32), + "powerpc": (20, 0, 0, False, 32), + "powerpc64": (21, 0, 0, False, 64), + "i386": ( 3, 0, 0, True, 32), + "i486": ( 3, 0, 0, True, 32), + "i586": ( 3, 0, 0, True, 32), + "i686": ( 3, 0, 0, True, 32), + "x86_64": (62, 0, 0, True, 64), + "ia64": (50, 0, 0, True, 64), + "alpha": (36902, 0, 0, True, 64), + "hppa": (15, 3, 0, False, 32), + "m68k": ( 4, 0, 0, False, 32), + "mips": ( 8, 0, 0, False, 32), + "mipsel": ( 8, 0, 0, True, 32), + "mips64": ( 8, 0, 0, False, 64), + "mips64el": ( 8, 0, 0, True, 64), + "s390": (22, 0, 0, False, 32), + "sh4": (42, 0, 0, True, 32), + "sparc": ( 2, 0, 0, False, 32), + "microblaze": (189, 0, 0, False, 32), + "microblazeel":(189, 0, 0, True, 32), + }, + "linux-uclibc" : { + "arm" : ( 40, 97, 0, True, 32), + "armeb": ( 40, 97, 0, False, 32), + "powerpc": ( 20, 0, 0, False, 32), + "i386": ( 3, 0, 0, True, 32), + "i486": ( 3, 0, 0, True, 32), + "i586": ( 3, 0, 0, True, 32), + "i686": ( 3, 0, 0, True, 32), + "x86_64": ( 62, 0, 0, True, 64), + "mips": ( 8, 0, 0, False, 32), + "mipsel": ( 8, 0, 0, True, 32), + "mips64": ( 8, 0, 0, False, 64), + "mips64el": ( 8, 0, 0, True, 64), + "avr32": (6317, 0, 0, False, 32), + "sh4": (42, 0, 0, True, 32), + + }, + "linux-musl" : { + "arm" : ( 40, 97, 0, True, 32), + "armeb": ( 40, 97, 0, False, 32), + "powerpc": ( 20, 0, 0, False, 32), + "i386": ( 3, 0, 0, True, 32), + "i486": ( 3, 0, 0, True, 32), + "i586": ( 3, 0, 0, True, 32), + "i686": ( 3, 0, 0, True, 32), + "x86_64": ( 62, 0, 0, True, 64), + "mips": ( 8, 0, 0, False, 32), + "mipsel": ( 8, 0, 0, True, 32), + "mips64": ( 8, 0, 0, False, 64), + "mips64el": ( 8, 0, 0, True, 64), + }, + "uclinux-uclibc" : { + "bfin": ( 106, 0, 0, True, 32), + }, + "linux-gnueabi" : { + "arm" : (40, 0, 0, True, 32), + "armeb" : (40, 0, 0, False, 32), + }, + "linux-musleabi" : { + "arm" : (40, 0, 0, True, 32), + "armeb" : (40, 0, 0, False, 32), + }, + "linux-uclibceabi" : { + "arm" : (40, 0, 0, True, 32), + "armeb" : (40, 0, 0, False, 32), + }, + "linux-gnuspe" : { + "powerpc": (20, 0, 0, False, 32), + }, + "linux-muslspe" : { + "powerpc": (20, 0, 0, False, 32), + }, + "linux-uclibcspe" : { + "powerpc": (20, 0, 0, False, 32), + }, + "linux-gnu" : { + "powerpc": (20, 0, 0, False, 32), + "sh4": (42, 0, 0, True, 32), + }, + "linux-gnux32" : { + "x86_64": (62, 0, 0, True, 32), + }, + "linux-gnun32" : { + "mips64": ( 8, 0, 0, False, 32), + "mips64el": ( 8, 0, 0, True, 32), + }, + } + + +def package_qa_clean_path(path,d): + """ Remove the common prefix from the path. In this case it is the TMPDIR""" + return path.replace(d.getVar('TMPDIR',True),"") + +def package_qa_write_error(type, error, d): + logfile = d.getVar('QA_LOGFILE', True) + if logfile: + p = d.getVar('P', True) + f = file( logfile, "a+") + print >> f, "%s: %s [%s]" % (p, error, type) + f.close() + +def package_qa_handle_error(error_class, error_msg, d): + package_qa_write_error(error_class, error_msg, d) + if error_class in (d.getVar("ERROR_QA", True) or "").split(): + bb.error("QA Issue: %s [%s]" % (error_msg, error_class)) + d.setVar("QA_SANE", False) + return False + elif error_class in (d.getVar("WARN_QA", True) or "").split(): + bb.warn("QA Issue: %s [%s]" % (error_msg, error_class)) + else: + bb.note("QA Issue: %s [%s]" % (error_msg, error_class)) + return True + +QAPATHTEST[libexec] = "package_qa_check_libexec" +def package_qa_check_libexec(path,name, d, elf, messages): + + # Skip the case where the default is explicitly /usr/libexec + libexec = d.getVar('libexecdir', True) + if libexec == "/usr/libexec": + return True + + if 'libexec' in path.split(os.path.sep): + messages["libexec"] = "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec) + return False + + return True + +QAPATHTEST[rpaths] = "package_qa_check_rpath" +def package_qa_check_rpath(file,name, d, elf, messages): + """ + Check for dangerous RPATHs + """ + if not elf: + return + + if os.path.islink(file): + return + + bad_dirs = [d.getVar('BASE_WORKDIR', True), d.getVar('STAGING_DIR_TARGET', True)] + + phdrs = elf.run_objdump("-p", d) + + import re + rpath_re = re.compile("\s+RPATH\s+(.*)") + for line in phdrs.split("\n"): + m = rpath_re.match(line) + if m: + rpath = m.group(1) + for dir in bad_dirs: + if dir in rpath: + messages["rpaths"] = "package %s contains bad RPATH %s in file %s" % (name, rpath, file) + +QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths" +def package_qa_check_useless_rpaths(file, name, d, elf, messages): + """ + Check for RPATHs that are useless but not dangerous + """ + def rpath_eq(a, b): + return os.path.normpath(a) == os.path.normpath(b) + + if not elf: + return + + if os.path.islink(file): + return + + libdir = d.getVar("libdir", True) + base_libdir = d.getVar("base_libdir", True) + + phdrs = elf.run_objdump("-p", d) + + import re + rpath_re = re.compile("\s+RPATH\s+(.*)") + for line in phdrs.split("\n"): + m = rpath_re.match(line) + if m: + rpath = m.group(1) + if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir): + # The dynamic linker searches both these places anyway. There is no point in + # looking there again. + messages["useless-rpaths"] = "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath) + +QAPATHTEST[dev-so] = "package_qa_check_dev" +def package_qa_check_dev(path, name, d, elf, messages): + """ + Check for ".so" library symlinks in non-dev packages + """ + + if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path): + messages["dev-so"] = "non -dev/-dbg/-nativesdk package contains symlink .so: %s path '%s'" % \ + (name, package_qa_clean_path(path,d)) + +QAPATHTEST[staticdev] = "package_qa_check_staticdev" +def package_qa_check_staticdev(path, name, d, elf, messages): + """ + Check for ".a" library in non-staticdev packages + There are a number of exceptions to this rule, -pic packages can contain + static libraries, the _nonshared.a belong with their -dev packages and + libgcc.a, libgcov.a will be skipped in their packages + """ + + if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a"): + messages["staticdev"] = "non -staticdev package contains static .a library: %s path '%s'" % \ + (name, package_qa_clean_path(path,d)) + +def package_qa_check_libdir(d): + """ + Check for wrong library installation paths. For instance, catch + recipes installing /lib/bar.so when ${base_libdir}="lib32" or + installing in /usr/lib64 when ${libdir}="/usr/lib" + """ + import re + + pkgdest = d.getVar('PKGDEST', True) + base_libdir = d.getVar("base_libdir",True) + os.sep + libdir = d.getVar("libdir", True) + os.sep + exec_prefix = d.getVar("exec_prefix", True) + os.sep + + messages = [] + + lib_re = re.compile("^/lib.+\.so(\..+)?$") + exec_re = re.compile("^%s.*/lib.+\.so(\..+)?$" % exec_prefix) + + for root, dirs, files in os.walk(pkgdest): + if root == pkgdest: + # Skip subdirectories for any packages with libdir in INSANE_SKIP + skippackages = [] + for package in dirs: + if 'libdir' in (d.getVar('INSANE_SKIP_' + package, True) or "").split(): + bb.note("Package %s skipping libdir QA test" % (package)) + skippackages.append(package) + for package in skippackages: + dirs.remove(package) + for file in files: + full_path = os.path.join(root, file) + rel_path = os.path.relpath(full_path, pkgdest) + if os.sep in rel_path: + package, rel_path = rel_path.split(os.sep, 1) + rel_path = os.sep + rel_path + if lib_re.match(rel_path): + if base_libdir not in rel_path: + messages.append("%s: found library in wrong location: %s" % (package, rel_path)) + if exec_re.match(rel_path): + if libdir not in rel_path: + messages.append("%s: found library in wrong location: %s" % (package, rel_path)) + + if messages: + package_qa_handle_error("libdir", "\n".join(messages), d) + +QAPATHTEST[debug-files] = "package_qa_check_dbg" +def package_qa_check_dbg(path, name, d, elf, messages): + """ + Check for ".debug" files or directories outside of the dbg package + """ + + if not "-dbg" in name and not "-ptest" in name: + if '.debug' in path.split(os.path.sep): + messages["debug-files"] = "non debug package contains .debug directory: %s path %s" % \ + (name, package_qa_clean_path(path,d)) + +QAPATHTEST[perms] = "package_qa_check_perm" +def package_qa_check_perm(path,name,d, elf, messages): + """ + Check the permission of files + """ + return + +QAPATHTEST[unsafe-references-in-binaries] = "package_qa_check_unsafe_references_in_binaries" +def package_qa_check_unsafe_references_in_binaries(path, name, d, elf, messages): + """ + Ensure binaries in base_[bindir|sbindir|libdir] do not link to files under exec_prefix + """ + if unsafe_references_skippable(path, name, d): + return + + if elf: + import subprocess as sub + pn = d.getVar('PN', True) + + exec_prefix = d.getVar('exec_prefix', True) + sysroot_path = d.getVar('STAGING_DIR_TARGET', True) + sysroot_path_usr = sysroot_path + exec_prefix + + try: + ldd_output = bb.process.Popen(["prelink-rtld", "--root", sysroot_path, path], stdout=sub.PIPE).stdout.read() + except bb.process.CmdError: + error_msg = pn + ": prelink-rtld aborted when processing %s" % path + package_qa_handle_error("unsafe-references-in-binaries", error_msg, d) + return False + + if sysroot_path_usr in ldd_output: + ldd_output = ldd_output.replace(sysroot_path, "") + + pkgdest = d.getVar('PKGDEST', True) + packages = d.getVar('PACKAGES', True) + + for package in packages.split(): + short_path = path.replace('%s/%s' % (pkgdest, package), "", 1) + if (short_path != path): + break + + base_err = pn + ": %s, installed in the base_prefix, requires a shared library under exec_prefix (%s)" % (short_path, exec_prefix) + for line in ldd_output.split('\n'): + if exec_prefix in line: + error_msg = "%s: %s" % (base_err, line.strip()) + package_qa_handle_error("unsafe-references-in-binaries", error_msg, d) + + return False + +QAPATHTEST[unsafe-references-in-scripts] = "package_qa_check_unsafe_references_in_scripts" +def package_qa_check_unsafe_references_in_scripts(path, name, d, elf, messages): + """ + Warn if scripts in base_[bindir|sbindir|libdir] reference files under exec_prefix + """ + if unsafe_references_skippable(path, name, d): + return + + if not elf: + import stat + import subprocess + pn = d.getVar('PN', True) + + # Ensure we're checking an executable script + statinfo = os.stat(path) + if bool(statinfo.st_mode & stat.S_IXUSR): + # grep shell scripts for possible references to /exec_prefix/ + exec_prefix = d.getVar('exec_prefix', True) + statement = "grep -e '%s/' %s > /dev/null" % (exec_prefix, path) + if subprocess.call(statement, shell=True) == 0: + error_msg = pn + ": Found a reference to %s/ in %s" % (exec_prefix, path) + package_qa_handle_error("unsafe-references-in-scripts", error_msg, d) + error_msg = "Shell scripts in base_bindir and base_sbindir should not reference anything in exec_prefix" + package_qa_handle_error("unsafe-references-in-scripts", error_msg, d) + +def unsafe_references_skippable(path, name, d): + if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d): + return True + + if "-dbg" in name or "-dev" in name: + return True + + # Other package names to skip: + if name.startswith("kernel-module-"): + return True + + # Skip symlinks + if os.path.islink(path): + return True + + # Skip unusual rootfs layouts which make these tests irrelevant + exec_prefix = d.getVar('exec_prefix', True) + if exec_prefix == "": + return True + + pkgdest = d.getVar('PKGDEST', True) + pkgdest = pkgdest + "/" + name + pkgdest = os.path.abspath(pkgdest) + base_bindir = pkgdest + d.getVar('base_bindir', True) + base_sbindir = pkgdest + d.getVar('base_sbindir', True) + base_libdir = pkgdest + d.getVar('base_libdir', True) + bindir = pkgdest + d.getVar('bindir', True) + sbindir = pkgdest + d.getVar('sbindir', True) + libdir = pkgdest + d.getVar('libdir', True) + + if base_bindir == bindir and base_sbindir == sbindir and base_libdir == libdir: + return True + + # Skip files not in base_[bindir|sbindir|libdir] + path = os.path.abspath(path) + if not (base_bindir in path or base_sbindir in path or base_libdir in path): + return True + + return False + +QAPATHTEST[arch] = "package_qa_check_arch" +def package_qa_check_arch(path,name,d, elf, messages): + """ + Check if archs are compatible + """ + if not elf: + return + + target_os = d.getVar('TARGET_OS', True) + target_arch = d.getVar('TARGET_ARCH', True) + provides = d.getVar('PROVIDES', True) + bpn = d.getVar('BPN', True) + + # FIXME: Cross package confuse this check, so just skip them + for s in ['cross', 'nativesdk', 'cross-canadian']: + if bb.data.inherits_class(s, d): + return + + # avoid following links to /usr/bin (e.g. on udev builds) + # we will check the files pointed to anyway... + if os.path.islink(path): + return + + #if this will throw an exception, then fix the dict above + (machine, osabi, abiversion, littleendian, bits) \ + = package_qa_get_machine_dict()[target_os][target_arch] + + # Check the architecture and endiannes of the binary + if not ((machine == elf.machine()) or \ + ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))): + messages["arch"] = "Architecture did not match (%d to %d) on %s" % \ + (machine, elf.machine(), package_qa_clean_path(path,d)) + elif not ((bits == elf.abiSize()) or \ + ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))): + messages["arch"] = "Bit size did not match (%d to %d) %s on %s" % \ + (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)) + elif not littleendian == elf.isLittleEndian(): + messages["arch"] = "Endiannes did not match (%d to %d) on %s" % \ + (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)) + +QAPATHTEST[desktop] = "package_qa_check_desktop" +def package_qa_check_desktop(path, name, d, elf, messages): + """ + Run all desktop files through desktop-file-validate. + """ + if path.endswith(".desktop"): + desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE',True),'desktop-file-validate') + output = os.popen("%s %s" % (desktop_file_validate, path)) + # This only produces output on errors + for l in output: + messages["desktop"] = "Desktop file issue: " + l.strip() + +QAPATHTEST[textrel] = "package_qa_textrel" +def package_qa_textrel(path, name, d, elf, messages): + """ + Check if the binary contains relocations in .text + """ + + if not elf: + return + + if os.path.islink(path): + return + + phdrs = elf.run_objdump("-p", d) + sane = True + + import re + textrel_re = re.compile("\s+TEXTREL\s+") + for line in phdrs.split("\n"): + if textrel_re.match(line): + sane = False + + if not sane: + messages["textrel"] = "ELF binary '%s' has relocations in .text" % path + +QAPATHTEST[ldflags] = "package_qa_hash_style" +def package_qa_hash_style(path, name, d, elf, messages): + """ + Check if the binary has the right hash style... + """ + + if not elf: + return + + if os.path.islink(path): + return + + gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS', True) + if not gnu_hash: + gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS', True) + if not gnu_hash: + return + + sane = False + has_syms = False + + phdrs = elf.run_objdump("-p", d) + + # If this binary has symbols, we expect it to have GNU_HASH too. + for line in phdrs.split("\n"): + if "SYMTAB" in line: + has_syms = True + if "GNU_HASH" in line: + sane = True + if "[mips32]" in line or "[mips64]" in line: + sane = True + + if has_syms and not sane: + messages["ldflags"] = "No GNU_HASH in the elf binary: '%s'" % path + + +QAPATHTEST[buildpaths] = "package_qa_check_buildpaths" +def package_qa_check_buildpaths(path, name, d, elf, messages): + """ + Check for build paths inside target files and error if not found in the whitelist + """ + # Ignore .debug files, not interesting + if path.find(".debug") != -1: + return + + # Ignore symlinks + if os.path.islink(path): + return + + tmpdir = d.getVar('TMPDIR', True) + with open(path) as f: + file_content = f.read() + if tmpdir in file_content: + messages["buildpaths"] = "File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d) + + +QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi" +def package_qa_check_xorg_driver_abi(path, name, d, elf, messages): + """ + Check that all packages containing Xorg drivers have ABI dependencies + """ + + # Skip dev, dbg or nativesdk packages + if name.endswith("-dev") or name.endswith("-dbg") or name.startswith("nativesdk-"): + return + + driverdir = d.expand("${libdir}/xorg/modules/drivers/") + if driverdir in path and path.endswith(".so"): + mlprefix = d.getVar('MLPREFIX', True) or '' + for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name, True) or ""): + if rdep.startswith("%sxorg-abi-" % mlprefix): + return + messages["xorg-driver-abi"] = "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)) + +QAPATHTEST[infodir] = "package_qa_check_infodir" +def package_qa_check_infodir(path, name, d, elf, messages): + """ + Check that /usr/share/info/dir isn't shipped in a particular package + """ + infodir = d.expand("${infodir}/dir") + + if infodir in path: + messages["infodir"] = "The /usr/share/info/dir file is not meant to be shipped in a particular package." + +QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot" +def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages): + """ + Check that the package doesn't contain any absolute symlinks to the sysroot. + """ + if os.path.islink(path): + target = os.readlink(path) + if os.path.isabs(target): + tmpdir = d.getVar('TMPDIR', True) + if target.startswith(tmpdir): + trimmed = path.replace(os.path.join (d.getVar("PKGDEST", True), name), "") + messages["symlink-to-sysroot"] = "Symlink %s in %s points to TMPDIR" % (trimmed, name) + +def package_qa_check_license(workdir, d): + """ + Check for changes in the license files + """ + import tempfile + sane = True + + lic_files = d.getVar('LIC_FILES_CHKSUM', True) + lic = d.getVar('LICENSE', True) + pn = d.getVar('PN', True) + + if lic == "CLOSED": + return True + + if not lic_files: + bb.error(pn + ": Recipe file does not have license file information (LIC_FILES_CHKSUM)") + return False + + srcdir = d.getVar('S', True) + + for url in lic_files.split(): + try: + (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url) + except bb.fetch.MalformedUrl: + raise bb.build.FuncFailed( pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url) + srclicfile = os.path.join(srcdir, path) + if not os.path.isfile(srclicfile): + raise bb.build.FuncFailed( pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile) + + recipemd5 = parm.get('md5', '') + beginline, endline = 0, 0 + if 'beginline' in parm: + beginline = int(parm['beginline']) + if 'endline' in parm: + endline = int(parm['endline']) + + if (not beginline) and (not endline): + md5chksum = bb.utils.md5_file(srclicfile) + else: + fi = open(srclicfile, 'rb') + fo = tempfile.NamedTemporaryFile(mode='wb', prefix='poky.', suffix='.tmp', delete=False) + tmplicfile = fo.name; + lineno = 0 + linesout = 0 + for line in fi: + lineno += 1 + if (lineno >= beginline): + if ((lineno <= endline) or not endline): + fo.write(line) + linesout += 1 + else: + break + fo.flush() + fo.close() + fi.close() + md5chksum = bb.utils.md5_file(tmplicfile) + os.unlink(tmplicfile) + + if recipemd5 == md5chksum: + bb.note (pn + ": md5 checksum matched for ", url) + else: + if recipemd5: + bb.error(pn + ": md5 data is not matching for ", url) + bb.error(pn + ": The new md5 checksum is ", md5chksum) + if beginline: + if endline: + srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline) + else: + srcfiledesc = "%s (beginning on line %d)" % (srclicfile, beginline) + elif endline: + srcfiledesc = "%s (ending on line %d)" % (srclicfile, endline) + else: + srcfiledesc = srclicfile + bb.error(pn + ": Check if the license information has changed in %s to verify that the LICENSE value \"%s\" remains valid" % (srcfiledesc, lic)) + else: + bb.error(pn + ": md5 checksum is not specified for ", url) + bb.error(pn + ": The md5 checksum is ", md5chksum) + sane = False + + return sane + +def package_qa_check_staged(path,d): + """ + Check staged la and pc files for sanity + -e.g. installed being false + + As this is run after every stage we should be able + to find the one responsible for the errors easily even + if we look at every .pc and .la file + """ + + sane = True + tmpdir = d.getVar('TMPDIR', True) + workdir = os.path.join(tmpdir, "work") + + installed = "installed=yes" + if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d): + pkgconfigcheck = workdir + else: + pkgconfigcheck = tmpdir + + # find all .la and .pc files + # read the content + # and check for stuff that looks wrong + for root, dirs, files in os.walk(path): + for file in files: + path = os.path.join(root,file) + if file.endswith(".la"): + with open(path) as f: + file_content = f.read() + if workdir in file_content: + error_msg = "%s failed sanity test (workdir) in path %s" % (file,root) + sane = package_qa_handle_error("la", error_msg, d) + elif file.endswith(".pc"): + with open(path) as f: + file_content = f.read() + if pkgconfigcheck in file_content: + error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root) + sane = package_qa_handle_error("pkgconfig", error_msg, d) + + return sane + +# Walk over all files in a directory and call func +def package_qa_walk(path, warnfuncs, errorfuncs, skip, package, d): + import oe.qa + + #if this will throw an exception, then fix the dict above + target_os = d.getVar('TARGET_OS', True) + target_arch = d.getVar('TARGET_ARCH', True) + + warnings = {} + errors = {} + for path in pkgfiles[package]: + elf = oe.qa.ELFFile(path) + try: + elf.open() + except: + elf = None + for func in warnfuncs: + func(path, package, d, elf, warnings) + for func in errorfuncs: + func(path, package, d, elf, errors) + + for w in warnings: + package_qa_handle_error(w, warnings[w], d) + for e in errors: + package_qa_handle_error(e, errors[e], d) + + return len(errors) == 0 + +def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d): + # Don't do this check for kernel/module recipes, there aren't too many debug/development + # packages and you can get false positives e.g. on kernel-module-lirc-dev + if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d): + return True + + sane = True + if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg: + localdata = bb.data.createCopy(d) + localdata.setVar('OVERRIDES', pkg) + bb.data.update_data(localdata) + + # Now check the RDEPENDS + rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS', True) or "") + + # Now do the sanity check!!! + for rdepend in rdepends: + if "-dbg" in rdepend and "debug-deps" not in skip: + error_msg = "%s rdepends on %s" % (pkg,rdepend) + sane = package_qa_handle_error("debug-deps", error_msg, d) + if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip: + error_msg = "%s rdepends on %s" % (pkg, rdepend) + sane = package_qa_handle_error("dev-deps", error_msg, d) + if rdepend not in packages: + rdep_data = oe.packagedata.read_subpkgdata(rdepend, d) + if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps: + continue + if not rdep_data or not 'PN' in rdep_data: + pkgdata_dir = d.getVar("PKGDATA_DIR", True) + try: + possibles = os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdepend)) + except OSError: + possibles = [] + for p in possibles: + rdep_data = oe.packagedata.read_subpkgdata(p, d) + if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps: + break + if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps: + continue + error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend) + sane = package_qa_handle_error("build-deps", error_msg, d) + + if "file-rdeps" not in skip: + ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)']) + if bb.data.inherits_class('nativesdk', d): + ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl']) + # For Saving the FILERDEPENDS + filerdepends = set() + rdep_data = oe.packagedata.read_subpkgdata(pkg, d) + for key in rdep_data: + if key.startswith("FILERDEPENDS_"): + for subkey in rdep_data[key].split(): + filerdepends.add(subkey) + filerdepends -= ignored_file_rdeps + + if filerdepends: + next = rdepends + done = rdepends[:] + # Find all the rdepends on the dependency chain + while next: + new = [] + for rdep in next: + rdep_data = oe.packagedata.read_subpkgdata(rdep, d) + sub_rdeps = rdep_data.get("RDEPENDS_" + rdep) + if not sub_rdeps: + continue + for sub_rdep in sub_rdeps.split(): + if sub_rdep in done: + continue + if not sub_rdep.startswith('(') and \ + oe.packagedata.has_subpkgdata(sub_rdep, d): + # It's a new rdep + done.append(sub_rdep) + new.append(sub_rdep) + next = new + + # Add the rprovides of itself + if pkg not in done: + done.insert(0, pkg) + + # The python is not a package, but python-core provides it, so + # skip checking /usr/bin/python if python is in the rdeps, in + # case there is a RDEPENDS_pkg = "python" in the recipe. + for py in [ d.getVar('MLPREFIX', True) + "python", "python" ]: + if py in done: + filerdepends.discard("/usr/bin/python") + done.remove(py) + for rdep in done: + # For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO + rdep_rprovides = set() + rdep_data = oe.packagedata.read_subpkgdata(rdep, d) + for key in rdep_data: + if key.startswith("FILERPROVIDES_") or key.startswith("RPROVIDES_"): + for subkey in rdep_data[key].split(): + rdep_rprovides.add(subkey) + # Add the files list to the rprovides + if key == "FILES_INFO": + # Use eval() to make it as a dict + for subkey in eval(rdep_data[key]): + rdep_rprovides.add(subkey) + filerdepends -= rdep_rprovides + if not filerdepends: + # Break if all the file rdepends are met + break + else: + # Clear it for the next loop + rdep_rprovides.clear() + if filerdepends: + error_msg = "%s requires %s, but no providers in its RDEPENDS" % \ + (pkg, ', '.join(str(e) for e in filerdepends)) + sane = package_qa_handle_error("file-rdeps", error_msg, d) + + return sane + +def package_qa_check_deps(pkg, pkgdest, skip, d): + sane = True + + localdata = bb.data.createCopy(d) + localdata.setVar('OVERRIDES', pkg) + bb.data.update_data(localdata) + + def check_valid_deps(var): + sane = True + try: + rvar = bb.utils.explode_dep_versions2(localdata.getVar(var, True) or "") + except ValueError as e: + bb.fatal("%s_%s: %s" % (var, pkg, e)) + for dep in rvar: + for v in rvar[dep]: + if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')): + error_msg = "%s_%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v) + sane = package_qa_handle_error("dep-cmp", error_msg, d) + return sane + + sane = True + if not check_valid_deps('RDEPENDS'): + sane = False + if not check_valid_deps('RRECOMMENDS'): + sane = False + if not check_valid_deps('RSUGGESTS'): + sane = False + if not check_valid_deps('RPROVIDES'): + sane = False + if not check_valid_deps('RREPLACES'): + sane = False + if not check_valid_deps('RCONFLICTS'): + sane = False + + return sane + +# The PACKAGE FUNC to scan each package +python do_package_qa () { + import subprocess + import oe.packagedata + + bb.note("DO PACKAGE QA") + + bb.build.exec_func("read_subpackage_metadata", d) + + logdir = d.getVar('T', True) + pkg = d.getVar('PN', True) + + # Check the compile log for host contamination + compilelog = os.path.join(logdir,"log.do_compile") + + if os.path.exists(compilelog): + statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog + if subprocess.call(statement, shell=True) == 0: + msg = "%s: The compile log indicates that host include and/or library paths were used.\n \ + Please check the log '%s' for more information." % (pkg, compilelog) + package_qa_handle_error("compile-host-path", msg, d) + + # Check the install log for host contamination + installlog = os.path.join(logdir,"log.do_install") + + if os.path.exists(installlog): + statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog + if subprocess.call(statement, shell=True) == 0: + msg = "%s: The install log indicates that host include and/or library paths were used.\n \ + Please check the log '%s' for more information." % (pkg, installlog) + package_qa_handle_error("install-host-path", msg, d) + + # Scan the packages... + pkgdest = d.getVar('PKGDEST', True) + packages = d.getVar('PACKAGES', True) + + cpath = oe.cachedpath.CachedPath() + global pkgfiles + pkgfiles = {} + for pkg in (packages or "").split(): + pkgfiles[pkg] = [] + for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg): + for file in files: + pkgfiles[pkg].append(walkroot + os.sep + file) + + # no packages should be scanned + if not packages: + return + + testmatrix = d.getVarFlags("QAPATHTEST") + import re + # The package name matches the [a-z0-9.+-]+ regular expression + pkgname_pattern = re.compile("^[a-z0-9.+-]+$") + + taskdepdata = d.getVar("BB_TASKDEPDATA", False) + taskdeps = set() + for dep in taskdepdata: + taskdeps.add(taskdepdata[dep][0]) + + g = globals() + walk_sane = True + rdepends_sane = True + deps_sane = True + for package in packages.split(): + skip = (d.getVar('INSANE_SKIP_' + package, True) or "").split() + if skip: + bb.note("Package %s skipping QA tests: %s" % (package, str(skip))) + warnchecks = [] + for w in (d.getVar("WARN_QA", True) or "").split(): + if w in skip: + continue + if w in testmatrix and testmatrix[w] in g: + warnchecks.append(g[testmatrix[w]]) + errorchecks = [] + for e in (d.getVar("ERROR_QA", True) or "").split(): + if e in skip: + continue + if e in testmatrix and testmatrix[e] in g: + errorchecks.append(g[testmatrix[e]]) + + bb.note("Checking Package: %s" % package) + # Check package name + if not pkgname_pattern.match(package): + package_qa_handle_error("pkgname", + "%s doesn't match the [a-z0-9.+-]+ regex\n" % package, d) + + path = "%s/%s" % (pkgdest, package) + if not package_qa_walk(path, warnchecks, errorchecks, skip, package, d): + walk_sane = False + if not package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d): + rdepends_sane = False + if not package_qa_check_deps(package, pkgdest, skip, d): + deps_sane = False + + + if 'libdir' in d.getVar("ALL_QA", True).split(): + package_qa_check_libdir(d) + + qa_sane = d.getVar("QA_SANE", True) + if not walk_sane or not rdepends_sane or not deps_sane or not qa_sane: + bb.fatal("QA run found fatal errors. Please consider fixing them.") + bb.note("DONE with PACKAGE QA") +} + +do_package_qa[rdeptask] = "do_packagedata" +addtask do_package_qa after do_packagedata do_package before do_build + +SSTATETASKS += "do_package_qa" +do_package_qa[sstate-inputdirs] = "" +do_package_qa[sstate-outputdirs] = "" +python do_package_qa_setscene () { + sstate_setscene(d) +} +addtask do_package_qa_setscene + +python do_qa_staging() { + bb.note("QA checking staging") + + if not package_qa_check_staged(d.expand('${SYSROOT_DESTDIR}${STAGING_LIBDIR}'), d): + bb.fatal("QA staging was broken by the package built above") +} + +python do_qa_configure() { + import subprocess + + ########################################################################### + # Check config.log for cross compile issues + ########################################################################### + + configs = [] + workdir = d.getVar('WORKDIR', True) + bb.note("Checking autotools environment for common misconfiguration") + for root, dirs, files in os.walk(workdir): + statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % \ + os.path.join(root,"config.log") + if "config.log" in files: + if subprocess.call(statement, shell=True) == 0: + bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities. +Rerun configure task after fixing this. The path was '%s'""" % root) + + if "configure.ac" in files: + configs.append(os.path.join(root,"configure.ac")) + if "configure.in" in files: + configs.append(os.path.join(root, "configure.in")) + + ########################################################################### + # Check gettext configuration and dependencies are correct + ########################################################################### + + cnf = d.getVar('EXTRA_OECONF', True) or "" + if "gettext" not in d.getVar('P', True) and "gcc-runtime" not in d.getVar('P', True) and "--disable-nls" not in cnf: + ml = d.getVar("MLPREFIX", True) or "" + if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk', d): + gt = "gettext-native" + elif bb.data.inherits_class('cross-canadian', d): + gt = "nativesdk-gettext" + else: + gt = "virtual/" + ml + "gettext" + deps = bb.utils.explode_deps(d.getVar('DEPENDS', True) or "") + if gt not in deps: + for config in configs: + gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config + if subprocess.call(gnu, shell=True) == 0: + bb.fatal("""%s required but not in DEPENDS for file %s. +Missing inherit gettext?""" % (gt, config)) + + ########################################################################### + # Check license variables + ########################################################################### + + if not package_qa_check_license(workdir, d): + bb.fatal("Licensing Error: LIC_FILES_CHKSUM does not match, please fix") + + ########################################################################### + # Check unrecognised configure options (with a white list) + ########################################################################### + if bb.data.inherits_class("autotools", d): + bb.note("Checking configure output for unrecognised options") + try: + flag = "WARNING: unrecognized options:" + log = os.path.join(d.getVar('B', True), 'config.log') + output = subprocess.check_output(['grep', '-F', flag, log]).replace(', ', ' ') + options = set() + for line in output.splitlines(): + options |= set(line.partition(flag)[2].split()) + whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST", True).split()) + options -= whitelist + if options: + pn = d.getVar('PN', True) + error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options) + package_qa_handle_error("unknown-configure-option", error_msg, d) + except subprocess.CalledProcessError: + pass +} +# The Staging Func, to check all staging +#addtask qa_staging after do_populate_sysroot before do_build +do_populate_sysroot[postfuncs] += "do_qa_staging " + +# Check broken config.log files, for packages requiring Gettext which don't +# have it in DEPENDS and for correct LIC_FILES_CHKSUM +#addtask qa_configure after do_configure before do_compile +do_configure[postfuncs] += "do_qa_configure " + +python () { + tests = d.getVar('ALL_QA', True).split() + if "desktop" in tests: + d.appendVar("PACKAGE_DEPENDS", "desktop-file-utils-native") + + ########################################################################### + # Check various variables + ########################################################################### + + # Checking ${FILESEXTRAPATHS} + extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "") + if '__default' not in extrapaths.split(":"): + msg = "FILESEXTRAPATHS-variable, must always use _prepend (or _append)\n" + msg += "type of assignment, and don't forget the colon.\n" + msg += "Please assign it with the format of:\n" + msg += " FILESEXTRAPATHS_append := \":${THISDIR}/Your_Files_Path\" or\n" + msg += " FILESEXTRAPATHS_prepend := \"${THISDIR}/Your_Files_Path:\"\n" + msg += "in your bbappend file\n\n" + msg += "Your incorrect assignment is:\n" + msg += "%s\n" % extrapaths + bb.warn(msg) + + if d.getVar('do_stage', True) is not None: + bb.fatal("Legacy staging found for %s as it has a do_stage function. This will need conversion to a do_install or often simply removal to work with OE-core" % d.getVar("FILE", True)) + + overrides = d.getVar('OVERRIDES', True).split(':') + pn = d.getVar('PN', True) + if pn in overrides: + msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE", True), pn) + package_qa_handle_error("pn-overrides", msg, d) + + issues = [] + if (d.getVar('PACKAGES', True) or "").split(): + for dep in (d.getVar('QADEPENDS', True) or "").split(): + d.appendVarFlag('do_package_qa', 'depends', " %s:do_populate_sysroot" % dep) + for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY': + if d.getVar(var): + issues.append(var) + else: + d.setVarFlag('do_package_qa', 'rdeptask', '') + for i in issues: + package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE", True), i), d) +} diff --git a/meta/classes/insserv.bbclass b/meta/classes/insserv.bbclass new file mode 100644 index 0000000000..14290a77e2 --- /dev/null +++ b/meta/classes/insserv.bbclass @@ -0,0 +1,5 @@ +do_rootfs[depends] += "insserv-native:do_populate_sysroot" +run_insserv () { + insserv -p ${IMAGE_ROOTFS}/etc/init.d -c ${STAGING_ETCDIR_NATIVE}/insserv.conf +} +ROOTFS_POSTPROCESS_COMMAND += " run_insserv ; " diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass new file mode 100644 index 0000000000..bbcfa15b84 --- /dev/null +++ b/meta/classes/kernel-arch.bbclass @@ -0,0 +1,60 @@ +# +# set the ARCH environment variable for kernel compilation (including +# modules). return value must match one of the architecture directories +# in the kernel source "arch" directory +# + +valid_archs = "alpha cris ia64 \ + i386 x86 \ + m68knommu m68k ppc powerpc powerpc64 ppc64 \ + sparc sparc64 \ + arm aarch64 \ + m32r mips \ + sh sh64 um h8300 \ + parisc s390 v850 \ + avr32 blackfin \ + microblaze" + +def map_kernel_arch(a, d): + import re + + valid_archs = d.getVar('valid_archs', True).split() + + if re.match('(i.86|athlon|x86.64)$', a): return 'x86' + elif re.match('armeb$', a): return 'arm' + elif re.match('aarch64$', a): return 'arm64' + elif re.match('aarch64_be$', a): return 'arm64' + elif re.match('mips(el|64|64el)$', a): return 'mips' + elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc' + elif re.match('sh(3|4)$', a): return 'sh' + elif re.match('bfin', a): return 'blackfin' + elif re.match('microblazeel', a): return 'microblaze' + elif a in valid_archs: return a + else: + bb.error("cannot map '%s' to a linux kernel architecture" % a) + +export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH', True), d)}" + +def map_uboot_arch(a, d): + import re + + if re.match('p(pc|owerpc)(|64)', a): return 'ppc' + elif re.match('i.86$', a): return 'x86' + elif re.match('arm64$', a): return 'arm' + return a + +export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH', True), d)}" + +# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture +# specific options necessary for building the kernel and modules. +TARGET_CC_KERNEL_ARCH ?= "" +HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}" +TARGET_LD_KERNEL_ARCH ?= "" +HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}" +TARGET_AR_KERNEL_ARCH ?= "" +HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}" + +KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH}" +KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}" +KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}" + diff --git a/meta/classes/kernel-grub.bbclass b/meta/classes/kernel-grub.bbclass new file mode 100644 index 0000000000..a63f482a91 --- /dev/null +++ b/meta/classes/kernel-grub.bbclass @@ -0,0 +1,91 @@ +# +# While installing a rpm to update kernel on a deployed target, it will update +# the boot area and the boot menu with the kernel as the priority but allow +# you to fall back to the original kernel as well. +# +# - In kernel-image's preinstall scriptlet, it backs up original kernel to avoid +# probable confliction with the new one. +# +# - In kernel-image's postinstall scriptlet, it modifies grub's config file to +# updates the new kernel as the boot priority. +# + +pkg_preinst_kernel-image_append () { + # Parsing confliction + [ -f "$D/boot/grub/menu.list" ] && grubcfg="$D/boot/grub/menu.list" + [ -f "$D/boot/grub/grub.cfg" ] && grubcfg="$D/boot/grub/grub.cfg" + if [ -n "$grubcfg" ]; then + # Dereference symlink to avoid confliction with new kernel name. + if grep -q "/${KERNEL_IMAGETYPE} \+root=" $grubcfg; then + if [ -L "$D/boot/${KERNEL_IMAGETYPE}" ]; then + kimage=`realpath $D/boot/${KERNEL_IMAGETYPE} 2>/dev/null` + if [ -f "$D$kimage" ]; then + sed -i "s:${KERNEL_IMAGETYPE} \+root=:${kimage##*/} root=:" $grubcfg + fi + fi + fi + + # Rename old kernel if it conflicts with new kernel name. + if grep -q "/${KERNEL_IMAGETYPE}-${KERNEL_VERSION} \+root=" $grubcfg; then + if [ -f "$D/boot/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}" ]; then + timestamp=`date +%s` + kimage="$D/boot/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}-$timestamp-back" + sed -i "s:${KERNEL_IMAGETYPE}-${KERNEL_VERSION} \+root=:${kimage##*/} root=:" $grubcfg + mv "$D/boot/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}" "$kimage" + fi + fi + fi +} + +pkg_postinst_kernel-image_prepend () { + get_new_grub_cfg() { + grubcfg="$1" + old_image="$2" + title="Update ${KERNEL_IMAGETYPE}-${KERNEL_VERSION}-${PV}" + if [ "${grubcfg##*/}" = "grub.cfg" ]; then + rootfs=`grep " *linux \+[^ ]\+ \+root=" $grubcfg -m 1 | \ + sed "s#${old_image}#${old_image%/*}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}#"` + + echo "menuentry \"$title\" {" + echo " set root=(hd0,1)" + echo "$rootfs" + echo "}" + elif [ "${grubcfg##*/}" = "menu.list" ]; then + rootfs=`grep "kernel \+[^ ]\+ \+root=" $grubcfg -m 1 | \ + sed "s#${old_image}#${old_image%/*}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}#"` + + echo "default 0" + echo "timeout 30" + echo "title $title" + echo "root (hd0,0)" + echo "$rootfs" + fi + } + + get_old_grub_cfg() { + grubcfg="$1" + if [ "${grubcfg##*/}" = "grub.cfg" ]; then + cat "$grubcfg" + elif [ "${grubcfg##*/}" = "menu.list" ]; then + sed -e '/^default/d' -e '/^timeout/d' "$grubcfg" + fi + } + + if [ -f "$D/boot/grub/grub.cfg" ]; then + grubcfg="$D/boot/grub/grub.cfg" + old_image=`grep ' *linux \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'` + elif [ -f "$D/boot/grub/menu.list" ]; then + grubcfg="$D/boot/grub/menu.list" + old_image=`grep '^kernel \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'` + fi + + # Don't update grubcfg at first install while old bzImage doesn't exist. + if [ -f "$D/boot/${old_image##*/}" ]; then + grubcfgtmp="$grubcfg.tmp" + get_new_grub_cfg "$grubcfg" "$old_image" > $grubcfgtmp + get_old_grub_cfg "$grubcfg" >> $grubcfgtmp + mv $grubcfgtmp $grubcfg + echo "Caution! Update kernel may affect kernel-module!" + fi +} + diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass new file mode 100644 index 0000000000..9a95b72744 --- /dev/null +++ b/meta/classes/kernel-module-split.bbclass @@ -0,0 +1,200 @@ +pkg_postinst_modules () { +if [ -z "$D" ]; then + depmod -a ${KERNEL_VERSION} +else + # image.bbclass will call depmodwrapper after everything is installed, + # no need to do it here as well + : +fi +} + +pkg_postrm_modules () { +if [ -z "$D" ]; then + depmod -a ${KERNEL_VERSION} +else + depmodwrapper -a -b $D ${KERNEL_VERSION} +fi +} + +autoload_postinst_fragment() { +if [ x"$D" = "x" ]; then + modprobe %s || true +fi +} + +do_install_append() { + install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/ +} + +PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages " + +KERNEL_MODULES_META_PACKAGE ?= "kernel-modules" + +python split_kernel_module_packages () { + import re + + modinfoexp = re.compile("([^=]+)=(.*)") + kerverrexp = re.compile('^(.*-hh.*)[\.\+].*$') + depmodpat0 = re.compile("^(.*\.k?o):..*$") + depmodpat1 = re.compile("^(.*\.k?o):\s*(.*\.k?o)\s*$") + depmodpat2 = re.compile("^(.*\.k?o):\s*(.*\.k?o)\s*\\\$") + depmodpat3 = re.compile("^\t(.*\.k?o)\s*\\\$") + depmodpat4 = re.compile("^\t(.*\.k?o)\s*$") + + def extract_modinfo(file): + import tempfile, subprocess + tempfile.tempdir = d.getVar("WORKDIR", True) + tf = tempfile.mkstemp() + tmpfile = tf[1] + cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX", True) or "", file, tmpfile) + subprocess.call(cmd, shell=True) + f = open(tmpfile) + l = f.read().split("\000") + f.close() + os.close(tf[0]) + os.unlink(tmpfile) + vals = {} + for i in l: + m = modinfoexp.match(i) + if not m: + continue + vals[m.group(1)] = m.group(2) + return vals + + def parse_depmod(): + + dvar = d.getVar('PKGD', True) + + kernelver = d.getVar('KERNEL_VERSION', True) + kernelver_stripped = kernelver + m = kerverrexp.match(kernelver) + if m: + kernelver_stripped = m.group(1) + staging_kernel_dir = d.getVar("STAGING_KERNEL_DIR", True) + system_map_file = "%s/boot/System.map-%s" % (dvar, kernelver) + if not os.path.exists(system_map_file): + system_map_file = "%s/System.map-%s" % (staging_kernel_dir, kernelver) + if not os.path.exists(system_map_file): + bb.fatal("System.map-%s does not exist in '%s/boot' nor STAGING_KERNEL_DIR '%s'" % (kernelver, dvar, staging_kernel_dir)) + + cmd = "depmod -n -a -b %s -F %s %s" % (dvar, system_map_file, kernelver_stripped) + f = os.popen(cmd, 'r') + + deps = {} + line = f.readline() + while line: + if not depmodpat0.match(line): + line = f.readline() + continue + m1 = depmodpat1.match(line) + if m1: + deps[m1.group(1)] = m1.group(2).split() + else: + m2 = depmodpat2.match(line) + if m2: + deps[m2.group(1)] = m2.group(2).split() + line = f.readline() + m3 = depmodpat3.match(line) + while m3: + deps[m2.group(1)].extend(m3.group(1).split()) + line = f.readline() + m3 = depmodpat3.match(line) + m4 = depmodpat4.match(line) + deps[m2.group(1)].extend(m4.group(1).split()) + line = f.readline() + f.close() + return deps + + def get_dependencies(file, pattern, format): + # file no longer includes PKGD + file = file.replace(d.getVar('PKGD', True) or '', '', 1) + # instead is prefixed with /lib/modules/${KERNEL_VERSION} + file = file.replace("/lib/modules/%s/" % d.getVar('KERNEL_VERSION', True) or '', '', 1) + + if file in module_deps: + dependencies = [] + for i in module_deps[file]: + m = re.match(pattern, os.path.basename(i)) + if not m: + continue + on = legitimize_package_name(m.group(1)) + dependency_pkg = format % on + dependencies.append(dependency_pkg) + return dependencies + return [] + + def frob_metadata(file, pkg, pattern, format, basename): + vals = extract_modinfo(file) + + dvar = d.getVar('PKGD', True) + + # If autoloading is requested, output /etc/modules-load.d/.conf and append + # appropriate modprobe commands to the postinst + autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD", True) or "").split() + autoload = d.getVar('module_autoload_%s' % basename, True) + if autoload and autoload == basename: + bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename) + if autoload and basename not in autoloadlist: + bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename)) + if basename in autoloadlist: + name = '%s/etc/modules-load.d/%s.conf' % (dvar, basename) + f = open(name, 'w') + if autoload: + for m in autoload.split(): + f.write('%s\n' % m) + else: + f.write('%s\n' % basename) + f.close() + postinst = d.getVar('pkg_postinst_%s' % pkg, True) + if not postinst: + bb.fatal("pkg_postinst_%s not defined" % pkg) + postinst += d.getVar('autoload_postinst_fragment', True) % autoload + d.setVar('pkg_postinst_%s' % pkg, postinst) + + # Write out any modconf fragment + modconflist = (d.getVar("KERNEL_MODULE_PROBECONF", True) or "").split() + modconf = d.getVar('module_conf_%s' % basename, True) + if modconf and basename in modconflist: + name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename) + f = open(name, 'w') + f.write("%s\n" % modconf) + f.close() + elif modconf: + bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename)) + + files = d.getVar('FILES_%s' % pkg, True) + files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename) + d.setVar('FILES_%s' % pkg, files) + + if "description" in vals: + old_desc = d.getVar('DESCRIPTION_' + pkg, True) or "" + d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"]) + + rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "") + for dep in get_dependencies(file, pattern, format): + if not dep in rdepends: + rdepends[dep] = [] + d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False)) + + module_deps = parse_depmod() + module_regex = '^(.*)\.k?o$' + module_pattern = 'kernel-module-%s' + + postinst = d.getVar('pkg_postinst_modules', True) + postrm = d.getVar('pkg_postrm_modules', True) + + modules = do_split_packages(d, root='/lib/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='kernel-%s' % (d.getVar("KERNEL_VERSION", True))) + if modules: + metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE', True) + d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules)) + + # If modules-load.d and modprobe.d are empty at this point, remove them to + # avoid warnings. removedirs only raises an OSError if an empty + # directory cannot be removed. + dvar = d.getVar('PKGD', True) + for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]: + if len(os.listdir(dir)) == 0: + os.rmdir(dir) +} + +do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF", True) or "").split()))}' diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass new file mode 100644 index 0000000000..f42a5c2534 --- /dev/null +++ b/meta/classes/kernel-yocto.bbclass @@ -0,0 +1,361 @@ +S = "${WORKDIR}/linux" + +# remove tasks that modify the source tree in case externalsrc is inherited +SRCTREECOVEREDTASKS += "do_kernel_link_vmlinux do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_patch" + +# returns local (absolute) path names for all valid patches in the +# src_uri +def find_patches(d): + patches = src_patches(d) + patch_list=[] + for p in patches: + _, _, local, _, _, _ = bb.fetch.decodeurl(p) + patch_list.append(local) + + return patch_list + +# returns all the elements from the src uri that are .scc files +def find_sccs(d): + sources=src_patches(d, True) + sources_list=[] + for s in sources: + base, ext = os.path.splitext(os.path.basename(s)) + if ext and ext in [".scc", ".cfg"]: + sources_list.append(s) + elif base and base in 'defconfig': + sources_list.append(s) + + return sources_list + +# check the SRC_URI for "kmeta" type'd git repositories. Return the name of +# the repository as it will be found in WORKDIR +def find_kernel_feature_dirs(d): + feature_dirs=[] + fetch = bb.fetch2.Fetch([], d) + for url in fetch.urls: + urldata = fetch.ud[url] + parm = urldata.parm + if "type" in parm: + type = parm["type"] + if "destsuffix" in parm: + destdir = parm["destsuffix"] + if type == "kmeta": + feature_dirs.append(destdir) + + return feature_dirs + +# find the master/machine source branch. In the same way that the fetcher proceses +# git repositories in the SRC_URI we take the first repo found, first branch. +def get_machine_branch(d, default): + fetch = bb.fetch2.Fetch([], d) + for url in fetch.urls: + urldata = fetch.ud[url] + parm = urldata.parm + if "branch" in parm: + branches = urldata.parm.get("branch").split(',') + return branches[0] + + return default + +do_patch() { + cd ${S} + export KMETA=${KMETA} + + # if kernel tools are available in-tree, they are preferred + # and are placed on the path before any external tools. Unless + # the external tools flag is set, in that case we do nothing. + if [ -f "${S}/scripts/util/configme" ]; then + if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then + PATH=${S}/scripts/util:${PATH} + fi + fi + + machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}" + machine_srcrev="${SRCREV_machine}" + if [ -z "${machine_srcrev}" ]; then + # fallback to SRCREV if a non machine_meta tree is being built + machine_srcrev="${SRCREV}" + fi + + # if we have a defined/set meta branch we should not be generating + # any meta data. The passed branch has what we need. + if [ -n "${KMETA}" ]; then + createme_flags="--disable-meta-gen --meta ${KMETA}" + fi + + createme ${createme_flags} ${ARCH} ${machine_branch} + if [ $? -ne 0 ]; then + bbfatal "Could not create ${machine_branch}" + fi + + sccs="${@" ".join(find_sccs(d))}" + patches="${@" ".join(find_patches(d))}" + feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}" + + set +e + # add any explicitly referenced features onto the end of the feature + # list that is passed to the kernel build scripts. + if [ -n "${KERNEL_FEATURES}" ]; then + for feat in ${KERNEL_FEATURES}; do + addon_features="$addon_features --feature $feat" + done + fi + + # check for feature directories/repos/branches that were part of the + # SRC_URI. If they were supplied, we convert them into include directives + # for the update part of the process + if [ -n "${feat_dirs}" ]; then + for f in ${feat_dirs}; do + if [ -d "${WORKDIR}/$f/meta" ]; then + includes="$includes -I${WORKDIR}/$f/meta" + elif [ -d "${WORKDIR}/$f" ]; then + includes="$includes -I${WORKDIR}/$f" + fi + done + fi + + # updates or generates the target description + updateme ${updateme_flags} -DKDESC=${KMACHINE}:${LINUX_KERNEL_TYPE} \ + ${includes} ${addon_features} ${ARCH} ${KMACHINE} ${sccs} ${patches} + if [ $? -ne 0 ]; then + bbfatal "Could not update ${machine_branch}" + fi + + # executes and modifies the source tree as required + patchme ${KMACHINE} + if [ $? -ne 0 ]; then + bberror "Could not apply patches for ${KMACHINE}." + bbfatal "Patch failures can be resolved in the devshell (bitbake -c devshell ${PN})" + fi + + # check to see if the specified SRCREV is reachable from the final branch. + # if it wasn't something wrong has happened, and we should error. + if [ "${machine_srcrev}" != "AUTOINC" ]; then + if ! [ "$(git rev-parse --verify ${machine_srcrev})" = "$(git merge-base ${machine_srcrev} HEAD)" ]; then + bberror "SRCREV ${machine_srcrev} was specified, but is not reachable" + bbfatal "Check the BSP description for incorrect branch selection, or other errors." + fi + fi +} + +do_kernel_checkout() { + set +e + + # A linux yocto SRC_URI should use the bareclone option. That + # ensures that all the branches are available in the WORKDIR version + # of the repository. + source_dir=`echo ${S} | sed 's%/$%%'` + source_workdir="${WORKDIR}/git" + if [ -d "${WORKDIR}/git/" ] && [ -d "${WORKDIR}/git/.git" ]; then + # case2: the repository is a non-bare clone + + # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree. + if [ "${source_dir}" != "${source_workdir}" ]; then + rm -rf ${S} + mv ${WORKDIR}/git ${S} + fi + cd ${S} + elif [ -d "${WORKDIR}/git/" ] && [ ! -d "${WORKDIR}/git/.git" ]; then + # case2: the repository is a bare clone + + # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree. + if [ "${source_dir}" != "${source_workdir}" ]; then + rm -rf ${S} + mkdir -p ${S}/.git + mv ${WORKDIR}/git/* ${S}/.git + rm -rf ${WORKDIR}/git/ + fi + cd ${S} + git config core.bare false + else + # case 3: we have no git repository at all. + # To support low bandwidth options for building the kernel, we'll just + # convert the tree to a git repo and let the rest of the process work unchanged + + # if ${S} hasn't been set to the proper subdirectory a default of "linux" is + # used, but we can't initialize that empty directory. So check it and throw a + # clear error + + cd ${S} + if [ ! -f "Makefile" ]; then + bberror "S is not set to the linux source directory. Check " + bbfatal "the recipe and set S to the proper extracted subdirectory" + fi + git init + git add . + git commit -q -m "baseline commit: creating repo for ${PN}-${PV}" + fi + # end debare + + # convert any remote branches to local tracking ones + for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do + b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`; + git show-ref --quiet --verify -- "refs/heads/$b" + if [ $? -ne 0 ]; then + git branch $b $i > /dev/null + fi + done + + # If KMETA is defined, the branch must exist, but a machine branch + # can be missing since it may be created later by the tools. + if [ -n "${KMETA}" ]; then + git show-ref --quiet --verify -- "refs/heads/${KMETA}" + if [ $? -eq 1 ]; then + bberror "The branch '${KMETA}' is required and was not found" + bberror "Ensure that the SRC_URI points to a valid linux-yocto" + bbfatal "kernel repository" + fi + fi + + + # Create a working tree copy of the kernel by checking out a branch + machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}" + git show-ref --quiet --verify -- "refs/heads/${machine_branch}" + if [ $? -eq 0 ]; then + machine_branch="master" + fi + + # checkout and clobber any unimportant files + git checkout -f ${machine_branch} +} +do_kernel_checkout[dirs] = "${S}" + +addtask kernel_checkout before do_patch after do_unpack + +do_kernel_configme[dirs] += "${S} ${B}" +do_kernel_configme() { + bbnote "kernel configme" + export KMETA=${KMETA} + + if [ -n "${KCONFIG_MODE}" ]; then + configmeflags=${KCONFIG_MODE} + else + # If a defconfig was passed, use =n as the baseline, which is achieved + # via --allnoconfig + if [ -f ${WORKDIR}/defconfig ]; then + configmeflags="--allnoconfig" + fi + fi + + cd ${S} + PATH=${PATH}:${S}/scripts/util + configme ${configmeflags} --reconfig --output ${B} ${LINUX_KERNEL_TYPE} ${KMACHINE} + if [ $? -ne 0 ]; then + bbfatal "Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}" + fi + + echo "# Global settings from linux recipe" >> ${B}/.config + echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config +} + +addtask kernel_configme after do_patch + +python do_kernel_configcheck() { + import re, string, sys + + bb.plain("NOTE: validating kernel config, see log.do_kernel_configcheck for details") + + # if KMETA isn't set globally by a recipe using this routine, we need to + # set the default to 'meta'. Otherwise, kconf_check is not passed a valid + # meta-series for processing + kmeta = d.getVar( "KMETA", True ) or "meta" + if not os.path.exists(kmeta): + kmeta = "." + kmeta + + pathprefix = "export PATH=%s:%s; " % (d.getVar('PATH', True), "${S}/scripts/util/") + cmd = d.expand("cd ${S}; kconf_check -config- %s/meta-series ${S} ${B}" % kmeta) + ret, result = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd)) + + config_check_visibility = d.getVar( "KCONF_AUDIT_LEVEL", True ) or 1 + if config_check_visibility == 1: + bb.debug( 1, "%s" % result ) + else: + bb.note( "%s" % result ) +} + +# Ensure that the branches (BSP and meta) are on the locations specified by +# their SRCREV values. If they are NOT on the right commits, the branches +# are corrected to the proper commit. +do_validate_branches() { + set +e + cd ${S} + export KMETA=${KMETA} + + machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}" + machine_srcrev="${SRCREV_machine}" + + # if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to + # check and we can exit early + if [ "${machine_srcrev}" = "AUTOINC" ]; then + bbnote "SRCREV validation is not required for AUTOREV" + elif [ "${machine_srcrev}" = "" ] && [ "${SRCREV}" != "AUTOINC" ]; then + # SRCREV_machine_ was not set. This means that a custom recipe + # that doesn't use the SRCREV_FORMAT "machine_meta" is being built. In + # this case, we need to reset to the give SRCREV before heading to patching + bbnote "custom recipe is being built, forcing SRCREV to ${SRCREV}" + force_srcrev="${SRCREV}" + else + git cat-file -t ${machine_srcrev} > /dev/null + if [ $? -ne 0 ]; then + bberror "${machine_srcrev} is not a valid commit ID." + bbfatal "The kernel source tree may be out of sync" + fi + force_srcrev=${machine_srcrev} + fi + + ## KMETA branch validation. + target_meta_head="${SRCREV_meta}" + if [ "${target_meta_head}" = "AUTOINC" ] || [ "${target_meta_head}" = "" ]; then + bbnote "SRCREV validation skipped for AUTOREV or empty meta branch" + else + meta_head=`git show-ref -s --heads ${KMETA}` + + git cat-file -t ${target_meta_head} > /dev/null + if [ $? -ne 0 ]; then + bberror "${target_meta_head} is not a valid commit ID" + bbfatal "The kernel source tree may be out of sync" + fi + if [ "$meta_head" != "$target_meta_head" ]; then + bbnote "Setting branch ${KMETA} to ${target_meta_head}" + git branch -m ${KMETA} ${KMETA}-orig + git checkout -q -b ${KMETA} ${target_meta_head} + if [ $? -ne 0 ];then + bbfatal "Could not checkout ${KMETA} branch from known hash ${target_meta_head}" + fi + fi + fi + + git checkout -q -f ${machine_branch} + if [ -n "${force_srcrev}" ]; then + # see if the branch we are about to patch has been properly reset to the defined + # SRCREV .. if not, we reset it. + branch_head=`git rev-parse HEAD` + if [ "${force_srcrev}" != "${branch_head}" ]; then + current_branch=`git rev-parse --abbrev-ref HEAD` + git branch "$current_branch-orig" + git reset --hard ${force_srcrev} + fi + fi +} + +# Many scripts want to look in arch/$arch/boot for the bootable +# image. This poses a problem for vmlinux based booting. This +# task arranges to have vmlinux appear in the normalized directory +# location. +do_kernel_link_vmlinux() { + if [ ! -d "${B}/arch/${ARCH}/boot" ]; then + mkdir ${B}/arch/${ARCH}/boot + fi + cd ${B}/arch/${ARCH}/boot + ln -sf ../../../vmlinux +} + +OE_TERMINAL_EXPORTS += "GUILT_BASE KBUILD_OUTPUT" +GUILT_BASE = "meta" +KBUILD_OUTPUT = "${B}" + +python () { + # If diffconfig is available, ensure it runs after kernel_configme + if 'do_diffconfig' in d: + bb.build.addtask('do_diffconfig', None, 'do_kernel_configme', d) +} diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass new file mode 100644 index 0000000000..2a6ec34c36 --- /dev/null +++ b/meta/classes/kernel.bbclass @@ -0,0 +1,505 @@ +inherit linux-kernel-base kernel-module-split + +PROVIDES += "virtual/kernel" +DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native depmodwrapper-cross bc-native" + +# we include gcc above, we dont need virtual/libc +INHIBIT_DEFAULT_DEPS = "1" + +KERNEL_IMAGETYPE ?= "zImage" +INITRAMFS_IMAGE ?= "" +INITRAMFS_TASK ?= "" +INITRAMFS_IMAGE_BUNDLE ?= "" + +python __anonymous () { + kerneltype = d.getVar('KERNEL_IMAGETYPE', True) + if kerneltype == 'uImage': + depends = d.getVar("DEPENDS", True) + depends = "%s u-boot-mkimage-native" % depends + d.setVar("DEPENDS", depends) + + image = d.getVar('INITRAMFS_IMAGE', True) + if image: + d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_rootfs') + + # NOTE: setting INITRAMFS_TASK is for backward compatibility + # The preferred method is to set INITRAMFS_IMAGE, because + # this INITRAMFS_TASK has circular dependency problems + # if the initramfs requires kernel modules + image_task = d.getVar('INITRAMFS_TASK', True) + if image_task: + d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}') +} + +inherit kernel-arch deploy + +PACKAGES_DYNAMIC += "^kernel-module-.*" +PACKAGES_DYNAMIC += "^kernel-image-.*" +PACKAGES_DYNAMIC += "^kernel-firmware-.*" + +export OS = "${TARGET_OS}" +export CROSS_COMPILE = "${TARGET_PREFIX}" + +KERNEL_PRIORITY ?= "${@int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[0]) * 10000 + \ + int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[1]) * 100 + \ + int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[-1])}" + +KERNEL_RELEASE ?= "${KERNEL_VERSION}" + +# Where built kernel lies in the kernel tree +KERNEL_OUTPUT ?= "arch/${ARCH}/boot/${KERNEL_IMAGETYPE}" +KERNEL_IMAGEDEST = "boot" + +# +# configuration +# +export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE",1) or "ttyS0"}" + +KERNEL_VERSION = "${@get_kernelversion('${B}')}" + +KERNEL_LOCALVERSION ?= "" + +# kernels are generally machine specific +PACKAGE_ARCH = "${MACHINE_ARCH}" + +# U-Boot support +UBOOT_ENTRYPOINT ?= "20008000" +UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}" + +# Some Linux kernel configurations need additional parameters on the command line +KERNEL_EXTRA_ARGS ?= "" + +# For the kernel, we don't want the '-e MAKEFLAGS=' in EXTRA_OEMAKE. +# We don't want to override kernel Makefile variables from the environment +EXTRA_OEMAKE = "" + +KERNEL_ALT_IMAGETYPE ??= "" + +# Define where the kernel headers are installed on the target as well as where +# they are staged. +KERNEL_SRC_PATH = "/usr/src/kernel" + +KERNEL_IMAGETYPE_FOR_MAKE = "${@(lambda s: s[:-3] if s[-3:] == ".gz" else s)(d.getVar('KERNEL_IMAGETYPE', True))}" + +copy_initramfs() { + echo "Copying initramfs into ./usr ..." + # In case the directory is not created yet from the first pass compile: + mkdir -p ${B}/usr + # Find and use the first initramfs image archive type we find + rm -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio + for img in cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do + if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img" ]; then + cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img ${B}/usr/. + case $img in + *gz) + echo "gzip decompressing image" + gunzip -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img + break + ;; + *lz4) + echo "lz4 decompressing image" + lz4 -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img + break + ;; + *lzo) + echo "lzo decompressing image" + lzop -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img + break + ;; + *lzma) + echo "lzma decompressing image" + lzma -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img + break + ;; + *xz) + echo "xz decompressing image" + xz -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img + break + ;; + esac + fi + done + echo "Finished copy of initramfs into ./usr" +} + +INITRAMFS_BASE_NAME = "${KERNEL_IMAGETYPE}-initramfs-${PV}-${PR}-${MACHINE}-${DATETIME}" +INITRAMFS_BASE_NAME[vardepsexclude] = "DATETIME" +do_bundle_initramfs () { + if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then + echo "Creating a kernel image with a bundled initramfs..." + copy_initramfs + if [ -e ${KERNEL_OUTPUT} ] ; then + mv -f ${KERNEL_OUTPUT} ${KERNEL_OUTPUT}.bak + fi + use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio + kernel_do_compile + mv -f ${KERNEL_OUTPUT} ${KERNEL_OUTPUT}.initramfs + mv -f ${KERNEL_OUTPUT}.bak ${KERNEL_OUTPUT} + # Update install area + echo "There is kernel image bundled with initramfs: ${B}/${KERNEL_OUTPUT}.initramfs" + install -m 0644 ${B}/${KERNEL_OUTPUT}.initramfs ${D}/boot/${KERNEL_IMAGETYPE}-initramfs-${MACHINE}.bin + echo "${B}/${KERNEL_OUTPUT}.initramfs" + fi +} + +python do_devshell_prepend () { + os.environ["LDFLAGS"] = '' +} + +addtask bundle_initramfs after do_install before do_deploy + +kernel_do_compile() { + unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE + # The $use_alternate_initrd is only set from + # do_bundle_initramfs() This variable is specifically for the + # case where we are making a second pass at the kernel + # compilation and we want to force the kernel build to use a + # different initramfs image. The way to do that in the kernel + # is to specify: + # make ...args... CONFIG_INITRAMFS_SOURCE=some_other_initramfs.cpio + if [ "$use_alternate_initrd" = "" ] && [ "${INITRAMFS_TASK}" != "" ] ; then + # The old style way of copying an prebuilt image and building it + # is turned on via INTIRAMFS_TASK != "" + copy_initramfs + use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio + fi + oe_runmake ${KERNEL_IMAGETYPE_FOR_MAKE} ${KERNEL_ALT_IMAGETYPE} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd + if test "${KERNEL_IMAGETYPE_FOR_MAKE}.gz" = "${KERNEL_IMAGETYPE}"; then + gzip -9c < "${KERNEL_IMAGETYPE_FOR_MAKE}" > "${KERNEL_OUTPUT}" + fi +} + +do_compile_kernelmodules() { + unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE + if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then + oe_runmake ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} + else + bbnote "no modules to compile" + fi +} +addtask compile_kernelmodules after do_compile before do_strip + +kernel_do_install() { + # + # First install the modules + # + unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE + if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then + oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" modules_install + rm "${D}/lib/modules/${KERNEL_VERSION}/build" + rm "${D}/lib/modules/${KERNEL_VERSION}/source" + # If the kernel/ directory is empty remove it to prevent QA issues + rmdir --ignore-fail-on-non-empty "${D}/lib/modules/${KERNEL_VERSION}/kernel" + else + bbnote "no modules to install" + fi + + # + # Install various kernel output (zImage, map file, config, module support files) + # + install -d ${D}/${KERNEL_IMAGEDEST} + install -d ${D}/boot + install -m 0644 ${KERNEL_OUTPUT} ${D}/${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION} + install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION} + install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION} + install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION} + [ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/boot/Module.symvers-${KERNEL_VERSION} + install -d ${D}${sysconfdir}/modules-load.d + install -d ${D}${sysconfdir}/modprobe.d + + # + # Support for external module building - create a minimal copy of the + # kernel source tree. + # + kerneldir=${D}${KERNEL_SRC_PATH} + install -d $kerneldir + mkdir -p ${D}/lib/modules/${KERNEL_VERSION} + ln -sf ${KERNEL_SRC_PATH} "${D}/lib/modules/${KERNEL_VERSION}/build" + + # + # Store the kernel version in sysroots for module-base.bbclass + # + + echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion + + # + # Store kernel image name to allow use during image generation + # + + echo "${KERNEL_IMAGE_BASE_NAME}" >$kerneldir/kernel-image-name + + # + # Copy the entire source tree. In case an external build directory is + # used, copy the build directory over first, then copy over the source + # dir. This ensures the original Makefiles are used and not the + # redirecting Makefiles in the build directory. + # + find . -depth -not -name "*.cmd" -not -name "*.o" -not -name "*.so.dbg" -not -name "*.so" -not -path "./Documentation*" -not -path "./source*" -not -path "./.*" -print0 | cpio --null -pdlu $kerneldir + cp .config $kerneldir + if [ "${S}" != "${B}" ]; then + pwd="$PWD" + cd "${S}" + find . -depth -not -path "./Documentation*" -not -path "./.*" -print0 | cpio --null -pdlu $kerneldir + cd "$pwd" + fi + + # Test to ensure that the output file and image type are not actually + # the same file. If hardlinking is used, they will be the same, and there's + # no need to install. + ! [ ${KERNEL_OUTPUT} -ef $kerneldir/${KERNEL_IMAGETYPE} ] && install -m 0644 ${KERNEL_OUTPUT} $kerneldir/${KERNEL_IMAGETYPE} + install -m 0644 System.map $kerneldir/System.map-${KERNEL_VERSION} + + # Dummy Makefile so the clean below works + mkdir $kerneldir/Documentation + touch $kerneldir/Documentation/Makefile + + # + # Clean and remove files not needed for building modules. + # Some distributions go through a lot more trouble to strip out + # unecessary headers, for now, we just prune the obvious bits. + # + # We don't want to leave host-arch binaries in /sysroots, so + # we clean the scripts dir while leaving the generated config + # and include files. + # + oe_runmake -C $kerneldir CC="${KERNEL_CC}" LD="${KERNEL_LD}" clean _mrproper_scripts + + # hide directories that shouldn't have their .c, s and S files deleted + for d in tools scripts lib; do + mv $kerneldir/$d $kerneldir/.$d + done + + # delete .c, .s and .S files, unless we hid a directory as .. This technique is + # much faster than find -prune and -exec + find $kerneldir -not -path '*/\.*' -type f -name "*.[csS]" -delete + + # put the hidden dirs back + for d in tools scripts lib; do + mv $kerneldir/.$d $kerneldir/$d + done + + # As of Linux kernel version 3.0.1, the clean target removes + # arch/powerpc/lib/crtsavres.o which is present in + # KBUILD_LDFLAGS_MODULE, making it required to build external modules. + if [ ${ARCH} = "powerpc" ]; then + cp -l arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o + fi + + # Necessary for building modules like compat-wireless. + if [ -f include/generated/bounds.h ]; then + cp -l include/generated/bounds.h $kerneldir/include/generated/bounds.h + fi + if [ -d arch/${ARCH}/include/generated ]; then + mkdir -p $kerneldir/arch/${ARCH}/include/generated/ + cp -flR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/ + fi + + # Remove the following binaries which cause strip or arch QA errors + # during do_package for cross-compiled platforms + bin_files="arch/powerpc/boot/addnote arch/powerpc/boot/hack-coff \ + arch/powerpc/boot/mktree scripts/kconfig/zconf.tab.o \ + scripts/kconfig/conf.o scripts/kconfig/kxgettext.o" + for entry in $bin_files; do + rm -f $kerneldir/$entry + done + + # kernels <2.6.30 don't have $kerneldir/tools directory so we check if it exists before calling sed + if [ -f $kerneldir/tools/perf/Makefile ]; then + # Fix SLANG_INC for slang.h + sed -i 's#-I/usr/include/slang#-I=/usr/include/slang#g' $kerneldir/tools/perf/Makefile + fi +} +do_install[prefuncs] += "package_get_auto_pr" + +python sysroot_stage_all () { + oe.path.copyhardlinktree(d.expand("${D}${KERNEL_SRC_PATH}"), d.expand("${SYSROOT_DESTDIR}${KERNEL_SRC_PATH}")) +} + +KERNEL_CONFIG_COMMAND ?= "oe_runmake_call oldnoconfig || yes '' | oe_runmake oldconfig" + +kernel_do_configure() { + # fixes extra + in /lib/modules/2.6.37+ + # $ scripts/setlocalversion . => + + # $ make kernelversion => 2.6.37 + # $ make kernelrelease => 2.6.37+ + touch ${B}/.scmversion ${S}/.scmversion + + # Copy defconfig to .config if .config does not exist. This allows + # recipes to manage the .config themselves in do_configure_prepend(). + if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then + cp "${WORKDIR}/defconfig" "${B}/.config" + fi + eval ${KERNEL_CONFIG_COMMAND} +} + +do_savedefconfig() { + oe_runmake savedefconfig +} +do_savedefconfig[nostamp] = "1" +addtask savedefconfig after do_configure + +inherit cml1 + +EXPORT_FUNCTIONS do_compile do_install do_configure + +# kernel-base becomes kernel-${KERNEL_VERSION} +# kernel-image becomes kernel-image-${KERNEL_VERISON} +PACKAGES = "kernel kernel-base kernel-vmlinux kernel-image kernel-dev kernel-modules" +FILES_${PN} = "" +FILES_kernel-base = "/lib/modules/${KERNEL_VERSION}/modules.order /lib/modules/${KERNEL_VERSION}/modules.builtin" +FILES_kernel-image = "/boot/${KERNEL_IMAGETYPE}*" +FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} /lib/modules/${KERNEL_VERSION}/build" +FILES_kernel-vmlinux = "/boot/vmlinux*" +FILES_kernel-modules = "" +RDEPENDS_kernel = "kernel-base" +# Allow machines to override this dependency if kernel image files are +# not wanted in images as standard +RDEPENDS_kernel-base ?= "kernel-image" +PKG_kernel-image = "kernel-image-${@legitimize_package_name('${KERNEL_VERSION}')}" +PKG_kernel-base = "kernel-${@legitimize_package_name('${KERNEL_VERSION}')}" +RPROVIDES_kernel-base += "kernel-${KERNEL_VERSION}" +ALLOW_EMPTY_kernel = "1" +ALLOW_EMPTY_kernel-base = "1" +ALLOW_EMPTY_kernel-image = "1" +ALLOW_EMPTY_kernel-modules = "1" +DESCRIPTION_kernel-modules = "Kernel modules meta package" + +pkg_postinst_kernel-base () { + if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then + mkdir -p $D/lib/modules/${KERNEL_VERSION} + fi + if [ -n "$D" ]; then + depmodwrapper -a -b $D ${KERNEL_VERSION} + else + depmod -a ${KERNEL_VERSION} + fi +} + +pkg_postinst_kernel-image () { + update-alternatives --install /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE} /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION} ${KERNEL_PRIORITY} || true +} + +pkg_postrm_kernel-image () { + update-alternatives --remove ${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE}-${KERNEL_VERSION} || true +} + +PACKAGESPLITFUNCS_prepend = "split_kernel_packages " + +python split_kernel_packages () { + do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.(bin|fw|cis|dsp)$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='') +} + +do_strip() { + if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then + if [ "${KERNEL_IMAGETYPE}" != "vmlinux" ]; then + bbwarn "image type will not be stripped (not supported): ${KERNEL_IMAGETYPE}" + return + fi + + cd ${B} + headers=`"$CROSS_COMPILE"readelf -S ${KERNEL_OUTPUT} | \ + grep "^ \{1,\}\[[0-9 ]\{1,\}\] [^ ]" | \ + sed "s/^ \{1,\}\[[0-9 ]\{1,\}\] //" | \ + gawk '{print $1}'` + + for str in ${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}; do { + if [ "$headers" != *"$str"* ]; then + bbwarn "Section not found: $str"; + fi + + "$CROSS_COMPILE"strip -s -R $str ${KERNEL_OUTPUT} + }; done + + bbnote "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections:" \ + "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" + fi; +} +do_strip[dirs] = "${B}" + +addtask do_strip before do_sizecheck after do_kernel_link_vmlinux + +# Support checking the kernel size since some kernels need to reside in partitions +# with a fixed length or there is a limit in transferring the kernel to memory +do_sizecheck() { + if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then + invalid=`echo ${KERNEL_IMAGE_MAXSIZE} | sed 's/[0-9]//g'` + if [ -n "$invalid" ]; then + die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integerx (The unit is Kbytes)" + fi + size=`du -ks ${B}/${KERNEL_OUTPUT} | awk '{ print $1}'` + if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then + die "This kernel (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device. Please reduce the size of the kernel by making more of it modular." + fi + fi +} +do_sizecheck[dirs] = "${B}" + +addtask sizecheck before do_install after do_strip + +KERNEL_IMAGE_BASE_NAME ?= "${KERNEL_IMAGETYPE}-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}" +# Don't include the DATETIME variable in the sstate package signatures +KERNEL_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME" +KERNEL_IMAGE_SYMLINK_NAME ?= "${KERNEL_IMAGETYPE}-${MACHINE}" +MODULE_IMAGE_BASE_NAME ?= "modules-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}" +MODULE_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME" +MODULE_TARBALL_BASE_NAME ?= "${MODULE_IMAGE_BASE_NAME}.tgz" +# Don't include the DATETIME variable in the sstate package signatures +MODULE_TARBALL_SYMLINK_NAME ?= "modules-${MACHINE}.tgz" +MODULE_TARBALL_DEPLOY ?= "1" + +do_uboot_mkimage() { + if test "x${KERNEL_IMAGETYPE}" = "xuImage" ; then + if test "x${KEEPUIMAGE}" != "xyes" ; then + ENTRYPOINT=${UBOOT_ENTRYPOINT} + if test -n "${UBOOT_ENTRYSYMBOL}"; then + ENTRYPOINT=`${HOST_PREFIX}nm ${S}/vmlinux | \ + awk '$3=="${UBOOT_ENTRYSYMBOL}" {print $1}'` + fi + if test -e arch/${ARCH}/boot/compressed/vmlinux ; then + ${OBJCOPY} -O binary -R .note -R .comment -S arch/${ARCH}/boot/compressed/vmlinux linux.bin + uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C none -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin arch/${ARCH}/boot/uImage + rm -f linux.bin + else + ${OBJCOPY} -O binary -R .note -R .comment -S vmlinux linux.bin + rm -f linux.bin.gz + gzip -9 linux.bin + uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C gzip -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin.gz arch/${ARCH}/boot/uImage + rm -f linux.bin.gz + fi + fi + fi +} + +addtask uboot_mkimage before do_install after do_compile + +kernel_do_deploy() { + install -m 0644 ${KERNEL_OUTPUT} ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin + if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then + mkdir -p ${D}/lib + tar -cvzf ${DEPLOYDIR}/${MODULE_TARBALL_BASE_NAME} -C ${D} lib + ln -sf ${MODULE_TARBALL_BASE_NAME} ${DEPLOYDIR}/${MODULE_TARBALL_SYMLINK_NAME} + fi + + ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${DEPLOYDIR}/${KERNEL_IMAGE_SYMLINK_NAME}.bin + ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${DEPLOYDIR}/${KERNEL_IMAGETYPE} + + cp ${COREBASE}/meta/files/deploydir_readme.txt ${DEPLOYDIR}/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt + + cd ${B} + # Update deploy directory + if [ -e "${KERNEL_OUTPUT}.initramfs" ]; then + echo "Copying deploy kernel-initramfs image and setting up links..." + initramfs_base_name=${INITRAMFS_BASE_NAME} + initramfs_symlink_name=${KERNEL_IMAGETYPE}-initramfs-${MACHINE} + install -m 0644 ${KERNEL_OUTPUT}.initramfs ${DEPLOYDIR}/${initramfs_base_name}.bin + cd ${DEPLOYDIR} + ln -sf ${initramfs_base_name}.bin ${initramfs_symlink_name}.bin + fi +} +do_deploy[dirs] = "${DEPLOYDIR} ${B}" +do_deploy[prefuncs] += "package_get_auto_pr" + +addtask deploy before do_build after do_install + +EXPORT_FUNCTIONS do_deploy + diff --git a/meta/classes/lib_package.bbclass b/meta/classes/lib_package.bbclass new file mode 100644 index 0000000000..8849f59042 --- /dev/null +++ b/meta/classes/lib_package.bbclass @@ -0,0 +1,7 @@ +# +# ${PN}-bin is defined in bitbake.conf +# +# We need to allow the other packages to be greedy with what they +# want out of /usr/bin and /usr/sbin before ${PN}-bin gets greedy. +# +PACKAGE_BEFORE_PN = "${PN}-bin" diff --git a/meta/classes/libc-common.bbclass b/meta/classes/libc-common.bbclass new file mode 100644 index 0000000000..bbc80167dd --- /dev/null +++ b/meta/classes/libc-common.bbclass @@ -0,0 +1,43 @@ +do_install() { + oe_runmake install_root=${D} install + for r in ${rpcsvc}; do + h=`echo $r|sed -e's,\.x$,.h,'` + install -m 0644 ${S}/sunrpc/rpcsvc/$h ${D}/${includedir}/rpcsvc/ + done + install -d ${D}/${sysconfdir}/ + install -m 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/ + install -d ${D}${localedir} + make -f ${WORKDIR}/generate-supported.mk IN="${S}/localedata/SUPPORTED" OUT="${WORKDIR}/SUPPORTED" + # get rid of some broken files... + for i in ${GLIBC_BROKEN_LOCALES}; do + grep -v $i ${WORKDIR}/SUPPORTED > ${WORKDIR}/SUPPORTED.tmp + mv ${WORKDIR}/SUPPORTED.tmp ${WORKDIR}/SUPPORTED + done + rm -f ${D}${sysconfdir}/rpc + rm -rf ${D}${datadir}/zoneinfo + rm -rf ${D}${libexecdir}/getconf +} + +def get_libc_fpu_setting(bb, d): + if d.getVar('TARGET_FPU', True) in [ 'soft', 'ppc-efd' ]: + return "--without-fp" + return "" + +python populate_packages_prepend () { + if d.getVar('DEBIAN_NAMES', True): + pkgs = d.getVar('PACKAGES', True).split() + bpn = d.getVar('BPN', True) + prefix = d.getVar('MLPREFIX', True) or "" + # Set the base package... + d.setVar('PKG_' + prefix + bpn, prefix + 'libc6') + libcprefix = prefix + bpn + '-' + for p in pkgs: + # And all the subpackages. + if p.startswith(libcprefix): + renamed = p.replace(bpn, 'libc6', 1) + d.setVar('PKG_' + p, renamed) + # For backward compatibility with old -dbg package + d.appendVar('RPROVIDES_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg') + d.appendVar('RCONFLICTS_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg') + d.appendVar('RREPLACES_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg') +} diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass new file mode 100644 index 0000000000..c1bc399c18 --- /dev/null +++ b/meta/classes/libc-package.bbclass @@ -0,0 +1,390 @@ +# +# This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains +# may need packaging and its pointless to duplicate this code. +# +# Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of: +# "compile" - Use QEMU to generate the binary locale files +# "precompiled" - The binary locale files are pregenerated and already present +# "ondevice" - The device will build the locale files upon first boot through the postinst + +GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice" + +python __anonymous () { + enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION", True) + + pn = d.getVar("PN", True) + if pn.endswith("-initial"): + enabled = False + + if enabled and int(enabled): + import re + + target_arch = d.getVar("TARGET_ARCH", True) + binary_arches = d.getVar("BINARY_LOCALE_ARCHES", True) or "" + use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or "" + + for regexp in binary_arches.split(" "): + r = re.compile(regexp) + + if r.match(target_arch): + depends = d.getVar("DEPENDS", True) + if use_cross_localedef == "1" : + depends = "%s cross-localedef-native" % depends + else: + depends = "%s qemu-native" % depends + d.setVar("DEPENDS", depends) + d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile") + break + + # try to fix disable charsets/locales/locale-code compile fail + if bb.utils.contains('DISTRO_FEATURES', 'libc-charsets', True, False, d) and \ + bb.utils.contains('DISTRO_FEATURES', 'libc-locales', True, False, d) and \ + bb.utils.contains('DISTRO_FEATURES', 'libc-locale-code', True, False, d): + d.setVar('PACKAGE_NO_GCONV', '0') + else: + d.setVar('PACKAGE_NO_GCONV', '1') +} + +OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}" + +do_configure_prepend() { + if [ -e ${S}/elf/ldd.bash.in ]; then + sed -e "s#@BASH@#/bin/sh#" -i ${S}/elf/ldd.bash.in + fi +} + + + +# indentation removed on purpose +locale_base_postinst() { +#!/bin/sh + +if [ "x$D" != "x" ]; then + exit 1 +fi + +rm -rf ${TMP_LOCALE} +mkdir -p ${TMP_LOCALE} +if [ -f ${localedir}/locale-archive ]; then + cp ${localedir}/locale-archive ${TMP_LOCALE}/ +fi +localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s --prefix=/tmp/locale %s +mkdir -p ${localedir}/ +mv ${TMP_LOCALE}/locale-archive ${localedir}/ +rm -rf ${TMP_LOCALE} +} + +# indentation removed on purpose +locale_base_postrm() { +#!/bin/sh + +rm -rf ${TMP_LOCALE} +mkdir -p ${TMP_LOCALE} +if [ -f ${localedir}/locale-archive ]; then + cp ${localedir}/locale-archive ${TMP_LOCALE}/ +fi +localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s --prefix=/tmp/locale %s +mv ${TMP_LOCALE}/locale-archive ${localedir}/ +rm -rf ${TMP_LOCALE} +} + + +TMP_LOCALE="/tmp/locale${localedir}" +LOCALETREESRC ?= "${PKGD}" + +do_prep_locale_tree() { + treedir=${WORKDIR}/locale-tree + rm -rf $treedir + mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${localedir} + tar -cf - -C ${LOCALETREESRC}${datadir} -p i18n | tar -xf - -C $treedir/${datadir} + # unzip to avoid parsing errors + for i in $treedir/${datadir}/i18n/charmaps/*gz; do + gunzip $i + done + tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir} + if [ -f ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.* ]; then + tar -cf - -C ${STAGING_DIR_NATIVE}/${prefix_native}/${base_libdir} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir} + fi + install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir} +} + +do_collect_bins_from_locale_tree() { + treedir=${WORKDIR}/locale-tree + + parent=$(dirname ${localedir}) + mkdir -p ${PKGD}/$parent + tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent +} + +inherit qemu + +python package_do_split_gconvs () { + import re + if (d.getVar('PACKAGE_NO_GCONV', True) == '1'): + bb.note("package requested not splitting gconvs") + return + + if not d.getVar('PACKAGES', True): + return + + mlprefix = d.getVar("MLPREFIX", True) or "" + + bpn = d.getVar('BPN', True) + libdir = d.getVar('libdir', True) + if not libdir: + bb.error("libdir not defined") + return + datadir = d.getVar('datadir', True) + if not datadir: + bb.error("datadir not defined") + return + + gconv_libdir = base_path_join(libdir, "gconv") + charmap_dir = base_path_join(datadir, "i18n", "charmaps") + locales_dir = base_path_join(datadir, "i18n", "locales") + binary_locales_dir = d.getVar('localedir', True) + + def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group): + deps = [] + f = open(fn, "rb") + c_re = re.compile('^copy "(.*)"') + i_re = re.compile('^include "(\w+)".*') + for l in f.readlines(): + m = c_re.match(l) or i_re.match(l) + if m: + dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1))) + if not dp in deps: + deps.append(dp) + f.close() + if deps != []: + d.setVar('RDEPENDS_%s' % pkg, " ".join(deps)) + if bpn != 'glibc': + d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc')) + + do_split_packages(d, gconv_libdir, file_regex='^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \ + description='gconv module for character set %s', hook=calc_gconv_deps, \ + extra_depends=bpn+'-gconv') + + def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group): + deps = [] + f = open(fn, "rb") + c_re = re.compile('^copy "(.*)"') + i_re = re.compile('^include "(\w+)".*') + for l in f.readlines(): + m = c_re.match(l) or i_re.match(l) + if m: + dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1))) + if not dp in deps: + deps.append(dp) + f.close() + if deps != []: + d.setVar('RDEPENDS_%s' % pkg, " ".join(deps)) + if bpn != 'glibc': + d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc')) + + do_split_packages(d, charmap_dir, file_regex='^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \ + description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='') + + def calc_locale_deps(fn, pkg, file_regex, output_pattern, group): + deps = [] + f = open(fn, "rb") + c_re = re.compile('^copy "(.*)"') + i_re = re.compile('^include "(\w+)".*') + for l in f.readlines(): + m = c_re.match(l) or i_re.match(l) + if m: + dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1)) + if not dp in deps: + deps.append(dp) + f.close() + if deps != []: + d.setVar('RDEPENDS_%s' % pkg, " ".join(deps)) + if bpn != 'glibc': + d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc')) + + do_split_packages(d, locales_dir, file_regex='(.*)', output_pattern=bpn+'-localedata-%s', \ + description='locale definition for %s', hook=calc_locale_deps, extra_depends='') + d.setVar('PACKAGES', d.getVar('PACKAGES') + ' ' + d.getVar('MLPREFIX') + bpn + '-gconv') + + use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", True) + + dot_re = re.compile("(.*)\.(.*)") + + # Read in supported locales and associated encodings + supported = {} + with open(base_path_join(d.getVar('WORKDIR', True), "SUPPORTED")) as f: + for line in f.readlines(): + try: + locale, charset = line.rstrip().split() + except ValueError: + continue + supported[locale] = charset + + # GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales + to_generate = d.getVar('GLIBC_GENERATE_LOCALES', True) + if not to_generate or to_generate == 'all': + to_generate = supported.keys() + else: + to_generate = to_generate.split() + for locale in to_generate: + if locale not in supported: + if '.' in locale: + charset = locale.split('.')[1] + else: + charset = 'UTF-8' + bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset)) + supported[locale] = charset + + def output_locale_source(name, pkgname, locale, encoding): + d.setVar('RDEPENDS_%s' % pkgname, 'localedef %s-localedata-%s %s-charmap-%s' % \ + (mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding))) + d.setVar('pkg_postinst_%s' % pkgname, d.getVar('locale_base_postinst', True) \ + % (locale, encoding, locale)) + d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm', True) % \ + (locale, encoding, locale)) + + def output_locale_binary_rdepends(name, pkgname, locale, encoding): + m = re.match("(.*)\.(.*)", name) + if m: + libc_name = "%s.%s" % (m.group(1), m.group(2).lower()) + else: + libc_name = name + d.setVar('RDEPENDS_%s' % pkgname, legitimize_package_name('%s-binary-localedata-%s' \ + % (mlprefix+bpn, libc_name))) + + commands = {} + + def output_locale_binary(name, pkgname, locale, encoding): + treedir = base_path_join(d.getVar("WORKDIR", True), "locale-tree") + ldlibdir = base_path_join(treedir, d.getVar("base_libdir", True)) + path = d.getVar("PATH", True) + i18npath = base_path_join(treedir, datadir, "i18n") + gconvpath = base_path_join(treedir, "iconvdata") + outputpath = base_path_join(treedir, binary_locales_dir) + + use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or "0" + if use_cross_localedef == "1": + target_arch = d.getVar('TARGET_ARCH', True) + locale_arch_options = { \ + "arm": " --uint32-align=4 --little-endian ", \ + "armeb": " --uint32-align=4 --big-endian ", \ + "aarch64_be": " --uint32-align=4 --big-endian ", \ + "sh4": " --uint32-align=4 --big-endian ", \ + "powerpc": " --uint32-align=4 --big-endian ", \ + "powerpc64": " --uint32-align=4 --big-endian ", \ + "mips": " --uint32-align=4 --big-endian ", \ + "mips64": " --uint32-align=4 --big-endian ", \ + "mipsel": " --uint32-align=4 --little-endian ", \ + "mips64el":" --uint32-align=4 --little-endian ", \ + "i586": " --uint32-align=4 --little-endian ", \ + "i686": " --uint32-align=4 --little-endian ", \ + "x86_64": " --uint32-align=4 --little-endian " } + + if target_arch in locale_arch_options: + localedef_opts = locale_arch_options[target_arch] + else: + bb.error("locale_arch_options not found for target_arch=" + target_arch) + raise bb.build.FuncFailed("unknown arch:" + target_arch + " for locale_arch_options") + + localedef_opts += " --force --old-style --no-archive --prefix=%s \ + --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \ + % (treedir, treedir, datadir, locale, encoding, outputpath, name) + + cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \ + (path, i18npath, gconvpath, localedef_opts) + else: # earlier slower qemu way + qemu = qemu_target_binary(d) + localedef_opts = "--force --old-style --no-archive --prefix=%s \ + --inputfile=%s/i18n/locales/%s --charmap=%s %s" \ + % (treedir, datadir, locale, encoding, name) + + qemu_options = d.getVar("QEMU_OPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True) + if not qemu_options: + qemu_options = d.getVar('QEMU_OPTIONS', True) + + cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \ + -E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \ + (path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts) + + commands["%s/%s" % (outputpath, name)] = cmd + + bb.note("generating locale %s (%s)" % (locale, encoding)) + + def output_locale(name, locale, encoding): + pkgname = d.getVar('MLPREFIX') + 'locale-base-' + legitimize_package_name(name) + d.setVar('ALLOW_EMPTY_%s' % pkgname, '1') + d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES', True))) + rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name)) + m = re.match("(.*)_(.*)", name) + if m: + rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1)) + d.setVar('RPROVIDES_%s' % pkgname, rprovides) + + if use_bin == "compile": + output_locale_binary_rdepends(name, pkgname, locale, encoding) + output_locale_binary(name, pkgname, locale, encoding) + elif use_bin == "precompiled": + output_locale_binary_rdepends(name, pkgname, locale, encoding) + else: + output_locale_source(name, pkgname, locale, encoding) + + if use_bin == "compile": + bb.note("preparing tree for binary locale generation") + bb.build.exec_func("do_prep_locale_tree", d) + + utf8_only = int(d.getVar('LOCALE_UTF8_ONLY', True) or 0) + encodings = {} + for locale in to_generate: + charset = supported[locale] + if utf8_only and charset != 'UTF-8': + continue + + m = dot_re.match(locale) + if m: + base = m.group(1) + else: + base = locale + + # Precompiled locales are kept as is, obeying SUPPORTED, while + # others are adjusted, ensuring that the non-suffixed locales + # are utf-8, while the suffixed are not. + if use_bin == "precompiled": + output_locale(locale, base, charset) + else: + if charset == 'UTF-8': + output_locale(base, base, charset) + else: + output_locale('%s.%s' % (base, charset), base, charset) + + if use_bin == "compile": + makefile = base_path_join(d.getVar("WORKDIR", True), "locale-tree", "Makefile") + m = open(makefile, "w") + m.write("all: %s\n\n" % " ".join(commands.keys())) + for cmd in commands: + m.write(cmd + ":\n") + m.write("\t" + commands[cmd] + "\n\n") + m.close() + d.setVar("B", os.path.dirname(makefile)) + d.setVar("EXTRA_OEMAKE", "${PARALLEL_MAKE}") + bb.note("Executing binary locale generation makefile") + bb.build.exec_func("oe_runmake", d) + bb.note("collecting binary locales from locale tree") + bb.build.exec_func("do_collect_bins_from_locale_tree", d) + do_split_packages(d, binary_locales_dir, file_regex='(.*)', \ + output_pattern=bpn+'-binary-localedata-%s', \ + description='binary locale definition for %s', extra_depends='', allow_dirs=True) + elif use_bin == "precompiled": + do_split_packages(d, binary_locales_dir, file_regex='(.*)', \ + output_pattern=bpn+'-binary-localedata-%s', \ + description='binary locale definition for %s', extra_depends='', allow_dirs=True) + else: + bb.note("generation of binary locales disabled. this may break i18n!") + +} + +# We want to do this indirection so that we can safely 'return' +# from the called function even though we're prepending +python populate_packages_prepend () { + bb.build.exec_func('package_do_split_gconvs', d) +} + diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass new file mode 100644 index 0000000000..69e8f12cba --- /dev/null +++ b/meta/classes/license.bbclass @@ -0,0 +1,397 @@ +# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by +# LIC_FILES_CHKSUM. +# TODO: +# - There is a real issue revolving around license naming standards. + +LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses" +LICSSTATEDIR = "${WORKDIR}/license-destdir/" + +# Create extra package with license texts and add it to RRECOMMENDS_${PN} +LICENSE_CREATE_PACKAGE[type] = "boolean" +LICENSE_CREATE_PACKAGE ??= "0" +LICENSE_PACKAGE_SUFFIX ??= "-lic" +LICENSE_FILES_DIRECTORY ??= "${datadir}/licenses/" + +addtask populate_lic after do_patch before do_build +do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}" +do_populate_lic[cleandirs] = "${LICSSTATEDIR}" + +python write_package_manifest() { + # Get list of installed packages + license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}') + bb.utils.mkdirhier(license_image_dir) + from oe.rootfs import image_list_installed_packages + open(os.path.join(license_image_dir, 'package.manifest'), + 'w+').write(image_list_installed_packages(d)) +} + +license_create_manifest() { + # Test if BUILD_IMAGES_FROM_FEEDS is defined in env + if [ -n "${BUILD_IMAGES_FROM_FEEDS}" ]; then + exit 0 + fi + + INSTALLED_PKGS=`cat ${LICENSE_DIRECTORY}/${IMAGE_NAME}/package.manifest` + LICENSE_MANIFEST="${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest" + # remove existing license.manifest file + if [ -f ${LICENSE_MANIFEST} ]; then + rm ${LICENSE_MANIFEST} + fi + touch ${LICENSE_MANIFEST} + for pkg in ${INSTALLED_PKGS}; do + filename=`ls ${PKGDATA_DIR}/runtime-reverse/${pkg}| head -1` + pkged_pn="$(sed -n 's/^PN: //p' ${filename})" + + # check to see if the package name exists in the manifest. if so, bail. + if grep -q "^PACKAGE NAME: ${pkg}" ${LICENSE_MANIFEST}; then + continue + fi + + pkged_pv="$(sed -n 's/^PV: //p' ${filename})" + pkged_name="$(basename $(readlink ${filename}))" + pkged_lic="$(sed -n "/^LICENSE_${pkged_name}: /{ s/^LICENSE_${pkged_name}: //; s/[|&()*]/ /g; s/ */ /g; p }" ${filename})" + if [ -z ${pkged_lic} ]; then + # fallback checking value of LICENSE + pkged_lic="$(sed -n "/^LICENSE: /{ s/^LICENSE: //; s/[|&()*]/ /g; s/ */ /g; p }" ${filename})" + fi + + echo "PACKAGE NAME:" ${pkg} >> ${LICENSE_MANIFEST} + echo "PACKAGE VERSION:" ${pkged_pv} >> ${LICENSE_MANIFEST} + echo "RECIPE NAME:" ${pkged_pn} >> ${LICENSE_MANIFEST} + printf "LICENSE:" >> ${LICENSE_MANIFEST} + for lic in ${pkged_lic}; do + # to reference a license file trim trailing + symbol + if ! [ -e "${LICENSE_DIRECTORY}/${pkged_pn}/generic_${lic%+}" ]; then + bbwarn "The license listed ${lic} was not in the licenses collected for ${pkged_pn}" + fi + printf " ${lic}" >> ${LICENSE_MANIFEST} + done + printf "\n\n" >> ${LICENSE_MANIFEST} + done + + # Two options here: + # - Just copy the manifest + # - Copy the manifest and the license directories + # With both options set we see a .5 M increase in core-image-minimal + if [ "${COPY_LIC_MANIFEST}" = "1" ]; then + mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/ + cp ${LICENSE_MANIFEST} ${IMAGE_ROOTFS}/usr/share/common-licenses/license.manifest + if [ "${COPY_LIC_DIRS}" = "1" ]; then + for pkg in ${INSTALLED_PKGS}; do + mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg} + pkged_pn="$(oe-pkgdata-util lookup-recipe ${PKGDATA_DIR} ${pkg})" + for lic in `ls ${LICENSE_DIRECTORY}/${pkged_pn}`; do + # Really don't need to copy the generics as they're + # represented in the manifest and in the actual pkg licenses + # Doing so would make your image quite a bit larger + if [ "${lic#generic_}" = "${lic}" ]; then + cp ${LICENSE_DIRECTORY}/${pkged_pn}/${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}/${lic} + else + if [ ! -f ${IMAGE_ROOTFS}/usr/share/common-licenses/${lic} ]; then + cp ${LICENSE_DIRECTORY}/${pkged_pn}/${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/ + fi + ln -sf ../${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}/${lic} + fi + done + done + fi + fi + +} + +python do_populate_lic() { + """ + Populate LICENSE_DIRECTORY with licenses. + """ + lic_files_paths = find_license_files(d) + + # The base directory we wrangle licenses to + destdir = os.path.join(d.getVar('LICSSTATEDIR', True), d.getVar('PN', True)) + copy_license_files(lic_files_paths, destdir) +} + +# it would be better to copy them in do_install_append, but find_license_filesa is python +python perform_packagecopy_prepend () { + enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d) + if d.getVar('CLASSOVERRIDE', True) == 'class-target' and enabled: + lic_files_paths = find_license_files(d) + + # LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY + destdir = d.getVar('D', True) + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY', True), d.getVar('PN', True)) + copy_license_files(lic_files_paths, destdir) + add_package_and_files(d) +} + +def add_package_and_files(d): + packages = d.getVar('PACKAGES', True) + files = d.getVar('LICENSE_FILES_DIRECTORY', True) + pn = d.getVar('PN', True) + pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX')) + if pn_lic in packages: + bb.warn("%s package already existed in %s." % (pn_lic, pn)) + else: + # first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY + d.setVar('PACKAGES', "%s %s" % (pn_lic, packages)) + d.setVar('FILES_' + pn_lic, files) + rrecommends_pn = d.getVar('RRECOMMENDS_' + pn, True) + if rrecommends_pn: + d.setVar('RRECOMMENDS_' + pn, "%s %s" % (pn_lic, rrecommends_pn)) + else: + d.setVar('RRECOMMENDS_' + pn, "%s" % (pn_lic)) + +def copy_license_files(lic_files_paths, destdir): + import shutil + import stat + + bb.utils.mkdirhier(destdir) + for (basename, path) in lic_files_paths: + try: + src = path + dst = os.path.join(destdir, basename) + if os.path.exists(dst): + os.remove(dst) + if (os.stat(src).st_dev == os.stat(destdir).st_dev): + os.link(src, dst) + else: + shutil.copyfile(src, dst) + os.chmod(dst, os.stat(dst).st_mode | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH); + except Exception as e: + bb.warn("Could not copy license file %s: %s" % (basename, e)) + +def find_license_files(d): + """ + Creates list of files used in LIC_FILES_CHKSUM and generic LICENSE files. + """ + import shutil + import oe.license + + pn = d.getVar('PN', True) + for package in d.getVar('PACKAGES', True): + if d.getVar('LICENSE_' + package, True): + license_types = license_types + ' & ' + \ + d.getVar('LICENSE_' + package, True) + + #If we get here with no license types, then that means we have a recipe + #level license. If so, we grab only those. + try: + license_types + except NameError: + # All the license types at the recipe level + license_types = d.getVar('LICENSE', True) + + # All the license files for the package + lic_files = d.getVar('LIC_FILES_CHKSUM', True) + pn = d.getVar('PN', True) + # The license files are located in S/LIC_FILE_CHECKSUM. + srcdir = d.getVar('S', True) + # Directory we store the generic licenses as set in the distro configuration + generic_directory = d.getVar('COMMON_LICENSE_DIR', True) + # List of basename, path tuples + lic_files_paths = [] + license_source_dirs = [] + license_source_dirs.append(generic_directory) + try: + additional_lic_dirs = d.getVar('LICENSE_PATH', True).split() + for lic_dir in additional_lic_dirs: + license_source_dirs.append(lic_dir) + except: + pass + + class FindVisitor(oe.license.LicenseVisitor): + def visit_Str(self, node): + # + # Until I figure out what to do with + # the two modifiers I support (or greater = + + # and "with exceptions" being * + # we'll just strip out the modifier and put + # the base license. + find_license(node.s.replace("+", "").replace("*", "")) + self.generic_visit(node) + + def find_license(license_type): + try: + bb.utils.mkdirhier(gen_lic_dest) + except: + pass + spdx_generic = None + license_source = None + # If the generic does not exist we need to check to see if there is an SPDX mapping to it + for lic_dir in license_source_dirs: + if not os.path.isfile(os.path.join(lic_dir, license_type)): + if d.getVarFlag('SPDXLICENSEMAP', license_type) != None: + # Great, there is an SPDXLICENSEMAP. We can copy! + bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type)) + spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type) + license_source = lic_dir + break + elif os.path.isfile(os.path.join(lic_dir, license_type)): + spdx_generic = license_type + license_source = lic_dir + break + + if spdx_generic and license_source: + # we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest + # audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes) + + lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic))) + else: + # And here is where we warn people that their licenses are lousy + bb.warn("%s: No generic license file exists for: %s in any provider" % (pn, license_type)) + pass + + if not generic_directory: + raise bb.build.FuncFailed("COMMON_LICENSE_DIR is unset. Please set this in your distro config") + + if not lic_files: + # No recipe should have an invalid license file. This is checked else + # where, but let's be pedantic + bb.note(pn + ": Recipe file does not have license file information.") + return lic_files_paths + + for url in lic_files.split(): + try: + (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url) + except bb.fetch.MalformedUrl: + raise bb.build.FuncFailed("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF', True), url)) + # We want the license filename and path + srclicfile = os.path.join(srcdir, path) + lic_files_paths.append((os.path.basename(path), srclicfile)) + + v = FindVisitor() + try: + v.visit_string(license_types) + except oe.license.InvalidLicense as exc: + bb.fatal('%s: %s' % (d.getVar('PF', True), exc)) + except SyntaxError: + bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF', True))) + + return lic_files_paths + +def return_spdx(d, license): + """ + This function returns the spdx mapping of a license if it exists. + """ + return d.getVarFlag('SPDXLICENSEMAP', license, True) + +def canonical_license(d, license): + """ + Return the canonical (SPDX) form of the license if available (so GPLv3 + becomes GPL-3.0), for the license named 'X+', return canonical form of + 'X' if availabel and the tailing '+' (so GPLv3+ becomes GPL-3.0+), + or the passed license if there is no canonical form. + """ + lic = d.getVarFlag('SPDXLICENSEMAP', license, True) or "" + if not lic and license.endswith('+'): + lic = d.getVarFlag('SPDXLICENSEMAP', license.rstrip('+'), True) + if lic: + lic += '+' + return lic or license + +def incompatible_license(d, dont_want_licenses, package=None): + """ + This function checks if a recipe has only incompatible licenses. It also + take into consideration 'or' operand. dont_want_licenses should be passed + as canonical (SPDX) names. + """ + import re + import oe.license + from fnmatch import fnmatchcase as fnmatch + license = d.getVar("LICENSE_%s" % package, True) if package else None + if not license: + license = d.getVar('LICENSE', True) + + def license_ok(license): + for dwl in dont_want_licenses: + # If you want to exclude license named generically 'X', we + # surely want to exclude 'X+' as well. In consequence, we + # will exclude a trailing '+' character from LICENSE in + # case INCOMPATIBLE_LICENSE is not a 'X+' license. + lic = license + if not re.search('\+$', dwl): + lic = re.sub('\+', '', license) + if fnmatch(lic, dwl): + return False + return True + + # Handles an "or" or two license sets provided by + # flattened_licenses(), pick one that works if possible. + def choose_lic_set(a, b): + return a if all(license_ok(lic) for lic in a) else b + + try: + licenses = oe.license.flattened_licenses(license, choose_lic_set) + except oe.license.LicenseError as exc: + bb.fatal('%s: %s' % (d.getVar('P', True), exc)) + return any(not license_ok(canonical_license(d, l)) for l in licenses) + +def check_license_flags(d): + """ + This function checks if a recipe has any LICENSE_FLAGS that + aren't whitelisted. + + If it does, it returns the first LICENSE_FLAGS item missing from the + whitelist, or all of the LICENSE_FLAGS if there is no whitelist. + + If everything is is properly whitelisted, it returns None. + """ + + def license_flag_matches(flag, whitelist, pn): + """ + Return True if flag matches something in whitelist, None if not. + + Before we test a flag against the whitelist, we append _${PN} + to it. We then try to match that string against the + whitelist. This covers the normal case, where we expect + LICENSE_FLAGS to be a simple string like 'commercial', which + the user typically matches exactly in the whitelist by + explicitly appending the package name e.g 'commercial_foo'. + If we fail the match however, we then split the flag across + '_' and append each fragment and test until we either match or + run out of fragments. + """ + flag_pn = ("%s_%s" % (flag, pn)) + for candidate in whitelist: + if flag_pn == candidate: + return True + + flag_cur = "" + flagments = flag_pn.split("_") + flagments.pop() # we've already tested the full string + for flagment in flagments: + if flag_cur: + flag_cur += "_" + flag_cur += flagment + for candidate in whitelist: + if flag_cur == candidate: + return True + return False + + def all_license_flags_match(license_flags, whitelist): + """ Return first unmatched flag, None if all flags match """ + pn = d.getVar('PN', True) + split_whitelist = whitelist.split() + for flag in license_flags.split(): + if not license_flag_matches(flag, split_whitelist, pn): + return flag + return None + + license_flags = d.getVar('LICENSE_FLAGS', True) + if license_flags: + whitelist = d.getVar('LICENSE_FLAGS_WHITELIST', True) + if not whitelist: + return license_flags + unmatched_flag = all_license_flags_match(license_flags, whitelist) + if unmatched_flag: + return unmatched_flag + return None + +SSTATETASKS += "do_populate_lic" +do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}" +do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/" + +ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; " + +python do_populate_lic_setscene () { + sstate_setscene(d) +} +addtask do_populate_lic_setscene diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass new file mode 100644 index 0000000000..4f2b0a4a98 --- /dev/null +++ b/meta/classes/linux-kernel-base.bbclass @@ -0,0 +1,32 @@ +# parse kernel ABI version out of +def get_kernelversion(p): + import re + + fn = p + '/include/linux/utsrelease.h' + if not os.path.isfile(fn): + # after 2.6.33-rc1 + fn = p + '/include/generated/utsrelease.h' + if not os.path.isfile(fn): + fn = p + '/include/linux/version.h' + + import re + try: + f = open(fn, 'r') + except IOError: + return None + + l = f.readlines() + f.close() + r = re.compile("#define UTS_RELEASE \"(.*)\"") + for s in l: + m = r.match(s) + if m: + return m.group(1) + return None + +def linux_module_packages(s, d): + suffix = "" + return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split())) + +# that's all + diff --git a/meta/classes/logging.bbclass b/meta/classes/logging.bbclass new file mode 100644 index 0000000000..78d65bda3a --- /dev/null +++ b/meta/classes/logging.bbclass @@ -0,0 +1,72 @@ +# The following logging mechanisms are to be used in bash functions of recipes. +# They are intended to map one to one in intention and output format with the +# python recipe logging functions of a similar naming convention: bb.plain(), +# bb.note(), etc. +# +# For the time being, all of these print only to the task logs. Future +# enhancements may integrate these calls with the bitbake logging +# infrastructure, allowing for printing to the console as appropriate. The +# interface and intention statements reflect that future goal. Once it is +# in place, no changes will be necessary to recipes using these logging +# mechanisms. + +# Print the output exactly as it is passed in. Typically used for output of +# tasks that should be seen on the console. Use sparingly. +# Output: logs console +# NOTE: console output is not currently implemented. +bbplain() { + echo "$*" +} + +# Notify the user of a noteworthy condition. +# Output: logs console +# NOTE: console output is not currently implemented. +bbnote() { + echo "NOTE: $*" +} + +# Print a warning to the log. Warnings are non-fatal, and do not +# indicate a build failure. +# Output: logs +bbwarn() { + echo "WARNING: $*" +} + +# Print an error to the log. Errors are non-fatal in that the build can +# continue, but they do indicate a build failure. +# Output: logs +bberror() { + echo "ERROR: $*" +} + +# Print a fatal error to the log. Fatal errors indicate build failure +# and halt the build, exiting with an error code. +# Output: logs +bbfatal() { + echo "ERROR: $*" + exit 1 +} + +# Print debug messages. These are appropriate for progress checkpoint +# messages to the logs. Depending on the debug log level, they may also +# go to the console. +# Output: logs console +# Usage: bbdebug 1 "first level debug message" +# bbdebug 2 "second level debug message" +# NOTE: console output is not currently implemented. +bbdebug() { + USAGE='Usage: bbdebug [123] "message"' + if [ $# -lt 2 ]; then + bbfatal "$USAGE" + fi + + # Strip off the debug level and ensure it is an integer + DBGLVL=$1; shift + if ! [[ "$DBGLVL" =~ ^[0-9]+ ]]; then + bbfatal "$USAGE" + fi + + # All debug output is printed to the logs + echo "DEBUG: $*" +} + diff --git a/meta/classes/meta.bbclass b/meta/classes/meta.bbclass new file mode 100644 index 0000000000..5e6890238b --- /dev/null +++ b/meta/classes/meta.bbclass @@ -0,0 +1,4 @@ + +PACKAGES = "" + +do_build[recrdeptask] = "do_build" diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass new file mode 100644 index 0000000000..237e61821d --- /dev/null +++ b/meta/classes/metadata_scm.bbclass @@ -0,0 +1,82 @@ +METADATA_BRANCH ?= "${@base_detect_branch(d)}" +METADATA_REVISION ?= "${@base_detect_revision(d)}" + +def base_detect_revision(d): + path = base_get_scmbasepath(d) + + scms = [base_get_metadata_git_revision, \ + base_get_metadata_svn_revision] + + for scm in scms: + rev = scm(path, d) + if rev != "": + return rev + + return "" + +def base_detect_branch(d): + path = base_get_scmbasepath(d) + + scms = [base_get_metadata_git_branch] + + for scm in scms: + rev = scm(path, d) + if rev != "": + return rev.strip() + + return "" + +def base_get_scmbasepath(d): + return d.getVar( 'COREBASE', True) + +def base_get_metadata_monotone_branch(path, d): + monotone_branch = "" + try: + with open("%s/_MTN/options" % path) as f: + monotone_branch = f.read().strip() + if monotone_branch.startswith( "database" ): + monotone_branch_words = monotone_branch.split() + monotone_branch = monotone_branch_words[ monotone_branch_words.index( "branch" )+1][1:-1] + except: + pass + return monotone_branch + +def base_get_metadata_monotone_revision(path, d): + monotone_revision = "" + try: + with open("%s/_MTN/revision" % path) as f: + monotone_revision = f.read().strip() + if monotone_revision.startswith( "format_version" ): + monotone_revision_words = monotone_revision.split() + monotone_revision = monotone_revision_words[ monotone_revision_words.index( "old_revision" )+1][1:-1] + except IOError: + pass + return monotone_revision + +def base_get_metadata_svn_revision(path, d): + # This only works with older subversion. For newer versions + # this function will need to be fixed by someone interested + revision = "" + try: + with open("%s/.svn/entries" % path) as f: + revision = f.readlines()[3].strip() + except (IOError, IndexError): + pass + return revision + +def base_get_metadata_git_branch(path, d): + branch = os.popen('cd %s; git branch 2>&1 | grep "^* " | tr -d "* "' % path).read() + + if len(branch) != 0: + return branch + return "" + +def base_get_metadata_git_revision(path, d): + f = os.popen("cd %s; git log -n 1 --pretty=oneline -- 2>&1" % path) + data = f.read() + if f.close() is None: + rev = data.split(" ")[0] + if len(rev) != 0: + return rev + return "" + diff --git a/meta/classes/migrate_localcount.bbclass b/meta/classes/migrate_localcount.bbclass new file mode 100644 index 0000000000..aa0df8bb76 --- /dev/null +++ b/meta/classes/migrate_localcount.bbclass @@ -0,0 +1,46 @@ +PRSERV_DUMPDIR ??= "${LOG_DIR}/db" +LOCALCOUNT_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv-localcount-exports.inc" + +python migrate_localcount_handler () { + import bb.event + if not e.data: + return + + pv = e.data.getVar('PV', True) + if not 'AUTOINC' in pv: + return + + localcounts = bb.persist_data.persist('BB_URI_LOCALCOUNT', e.data) + pn = e.data.getVar('PN', True) + revs = localcounts.get_by_pattern('%%-%s_rev' % pn) + counts = localcounts.get_by_pattern('%%-%s_count' % pn) + if not revs or not counts: + return + + if len(revs) != len(counts): + bb.warn("The number of revs and localcounts don't match in %s" % pn) + return + + version = e.data.getVar('PRAUTOINX', True) + srcrev = bb.fetch2.get_srcrev(e.data) + base_ver = 'AUTOINC-%s' % version[:version.find(srcrev)] + pkgarch = e.data.getVar('PACKAGE_ARCH', True) + value = max(int(count) for count in counts) + + if len(revs) == 1: + if srcrev != ('AUTOINC+%s' % revs[0]): + value += 1 + else: + value += 1 + + bb.utils.mkdirhier(e.data.getVar('PRSERV_DUMPDIR', True)) + df = e.data.getVar('LOCALCOUNT_DUMPFILE', True) + flock = bb.utils.lockfile("%s.lock" % df) + with open(df, 'a') as fd: + fd.write('PRAUTO$%s$%s$%s = "%s"\n' % + (base_ver, pkgarch, srcrev, str(value))) + bb.utils.unlockfile(flock) +} + +addhandler migrate_localcount_handler +migrate_localcount_handler[eventmask] = "bb.event.RecipeParsed" diff --git a/meta/classes/mime.bbclass b/meta/classes/mime.bbclass new file mode 100644 index 0000000000..721c73fcff --- /dev/null +++ b/meta/classes/mime.bbclass @@ -0,0 +1,56 @@ +DEPENDS += "shared-mime-info-native shared-mime-info" + +mime_postinst() { +if [ "$1" = configure ]; then + UPDATEMIMEDB=`which update-mime-database` + if [ -x "$UPDATEMIMEDB" ] ; then + echo "Updating MIME database... this may take a while." + $UPDATEMIMEDB $D${datadir}/mime + else + echo "Missing update-mime-database, update of mime database failed!" + exit 1 + fi +fi +} + +mime_postrm() { +if [ "$1" = remove ] || [ "$1" = upgrade ]; then + UPDATEMIMEDB=`which update-mime-database` + if [ -x "$UPDATEMIMEDB" ] ; then + echo "Updating MIME database... this may take a while." + $UPDATEMIMEDB $D${datadir}/mime + else + echo "Missing update-mime-database, update of mime database failed!" + exit 1 + fi +fi +} + +python populate_packages_append () { + import re + packages = d.getVar('PACKAGES', True).split() + pkgdest = d.getVar('PKGDEST', True) + + for pkg in packages: + mime_dir = '%s/%s/usr/share/mime/packages' % (pkgdest, pkg) + mimes = [] + mime_re = re.compile(".*\.xml$") + if os.path.exists(mime_dir): + for f in os.listdir(mime_dir): + if mime_re.match(f): + mimes.append(f) + if mimes: + bb.note("adding mime postinst and postrm scripts to %s" % pkg) + postinst = d.getVar('pkg_postinst_%s' % pkg, True) + if not postinst: + postinst = '#!/bin/sh\n' + postinst += d.getVar('mime_postinst', True) + d.setVar('pkg_postinst_%s' % pkg, postinst) + postrm = d.getVar('pkg_postrm_%s' % pkg, True) + if not postrm: + postrm = '#!/bin/sh\n' + postrm += d.getVar('mime_postrm', True) + d.setVar('pkg_postrm_%s' % pkg, postrm) + bb.note("adding shared-mime-info-data dependency to %s" % pkg) + d.appendVar('RDEPENDS_' + pkg, " shared-mime-info-data") +} diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass new file mode 100644 index 0000000000..57fb90df5e --- /dev/null +++ b/meta/classes/mirrors.bbclass @@ -0,0 +1,82 @@ +MIRRORS += "\ +${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \n \ +${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \n \ +${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.de.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.au.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.cl.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.hr.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.fi.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.hk.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.hu.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.ie.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.it.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.jp.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.no.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.pl.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.ro.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.si.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.es.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.se.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} ftp://ftp.tr.debian.org/debian/pool \n \ +${GNU_MIRROR} ftp://mirrors.kernel.org/gnu \n \ +${KERNELORG_MIRROR} http://www.kernel.org/pub \n \ +ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt/ \n \ +ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.surfnet.nl/pub/security/gnupg/ \n \ +ftp://ftp.gnupg.org/gcrypt/ http://gulus.USherbrooke.ca/pub/appl/GnuPG/ \n \ +ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \ +ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \ +ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \ +ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.gnupg.org/gcrypt/gnutls/ \n \ +ftp://ftp.gnutls.org/pub/gnutls http://www.mirrors.wiretapped.net/security/network-security/gnutls/ \n \ +ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.mirrors.wiretapped.net/pub/security/network-security/gnutls/ \n \ +ftp://ftp.gnutls.org/pub/gnutls http://josefsson.org/gnutls/releases/ \n \ +http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \ +http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \ +ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/ \n \ +ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tau.ac.il/pub/unix/admin/ \n \ +ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cert.dfn.de/pub/tools/admin/lsof/ \n \ +ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.fu-berlin.de/pub/unix/tools/lsof/ \n \ +ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.kaizo.org/pub/lsof/ \n \ +ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tu-darmstadt.de/pub/sysadmin/lsof/ \n \ +ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tux.org/pub/sites/vic.cc.purdue.edu/tools/unix/lsof/ \n \ +ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://gd.tuwien.ac.at/utils/admin-tools/lsof/ \n \ +ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://sunsite.ualberta.ca/pub/Mirror/lsof/ \n \ +ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://the.wiretapped.net/pub/security/host-security/lsof/ \n \ +http://www.apache.org/dist http://archive.apache.org/dist \n \ +http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \ +${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \n \ +${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \n \ +cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ +svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ +git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ +hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ +bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ +svk://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ +p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ +osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ +https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ +ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \ +cvs://.*/.* http://sources.openembedded.org/ \n \ +svn://.*/.* http://sources.openembedded.org/ \n \ +git://.*/.* http://sources.openembedded.org/ \n \ +hg://.*/.* http://sources.openembedded.org/ \n \ +bzr://.*/.* http://sources.openembedded.org/ \n \ +svk://.*/.* http://sources.openembedded.org/ \n \ +p4://.*/.* http://sources.openembedded.org/ \n \ +osc://.*/.* http://sources.openembedded.org/ \n \ +https?$://.*/.* http://sources.openembedded.org/ \n \ +ftp://.*/.* http://sources.openembedded.org/ \n \ +${CPAN_MIRROR} http://cpan.metacpan.org/ \n \ +${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \ +cvs://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \ +svn://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \ +git://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \ +hg://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \ +bzr://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \ +svk://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \ +p4://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \ +osc://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \ +https?$://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \ +ftp://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \ +" diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass new file mode 100644 index 0000000000..9537ba9f43 --- /dev/null +++ b/meta/classes/module-base.bbclass @@ -0,0 +1,18 @@ +inherit kernel-arch + +export OS = "${TARGET_OS}" +export CROSS_COMPILE = "${TARGET_PREFIX}" + +export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-abiversion')}" +KERNEL_OBJECT_SUFFIX = ".ko" + +# kernel modules are generally machine specific +PACKAGE_ARCH = "${MACHINE_ARCH}" + +# Function to ensure the kernel scripts are created. Expected to +# be called before do_compile. See module.bbclass for an exmaple. +do_make_scripts() { + unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS + make CC="${KERNEL_CC}" LD="${KERNEL_LD}" AR="${KERNEL_AR}" \ + -C ${STAGING_KERNEL_DIR} scripts +} diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass new file mode 100644 index 0000000000..ad6f7af1bb --- /dev/null +++ b/meta/classes/module.bbclass @@ -0,0 +1,32 @@ +DEPENDS += "virtual/kernel" + +inherit module-base kernel-module-split + +addtask make_scripts after do_patch before do_compile +do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock" +do_make_scripts[deptask] = "do_populate_sysroot" + +module_do_compile() { + unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS + oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \ + KERNEL_SRC=${STAGING_KERNEL_DIR} \ + KERNEL_VERSION=${KERNEL_VERSION} \ + CC="${KERNEL_CC}" LD="${KERNEL_LD}" \ + AR="${KERNEL_AR}" \ + ${MAKE_TARGETS} +} + +module_do_install() { + unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS + oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" \ + KERNEL_SRC=${STAGING_KERNEL_DIR} \ + CC="${KERNEL_CC}" LD="${KERNEL_LD}" \ + modules_install +} + +EXPORT_FUNCTIONS do_compile do_install + +# add all splitted modules to PN RDEPENDS, PN can be empty now +KERNEL_MODULES_META_PACKAGE = "${PN}" +FILES_${PN} = "" +ALLOW_EMPTY_${PN} = "1" diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass new file mode 100644 index 0000000000..eea2fd59a1 --- /dev/null +++ b/meta/classes/multilib.bbclass @@ -0,0 +1,145 @@ +python multilib_virtclass_handler () { + cls = e.data.getVar("BBEXTENDCURR", True) + variant = e.data.getVar("BBEXTENDVARIANT", True) + if cls != "multilib" or not variant: + return + + e.data.setVar('STAGING_KERNEL_DIR', e.data.getVar('STAGING_KERNEL_DIR', True)) + + # There should only be one kernel in multilib configs + # We also skip multilib setup for module packages. + provides = (e.data.getVar("PROVIDES", True) or "").split() + if "virtual/kernel" in provides or bb.data.inherits_class('module-base', e.data): + raise bb.parse.SkipPackage("We shouldn't have multilib variants for the kernel") + + save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME", True) or "" + for name in save_var_name.split(): + val=e.data.getVar(name, True) + if val: + e.data.setVar(name + "_MULTILIB_ORIGINAL", val) + + if bb.data.inherits_class('image', e.data): + e.data.setVar("MLPREFIX", variant + "-") + e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False)) + target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False) + if target_vendor: + e.data.setVar("TARGET_VENDOR", target_vendor) + return + + if bb.data.inherits_class('cross-canadian', e.data): + e.data.setVar("MLPREFIX", variant + "-") + override = ":virtclass-multilib-" + variant + e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override) + bb.data.update_data(e.data) + return + + if bb.data.inherits_class('native', e.data): + raise bb.parse.SkipPackage("We can't extend native recipes") + + if bb.data.inherits_class('nativesdk', e.data) or bb.data.inherits_class('crosssdk', e.data): + raise bb.parse.SkipPackage("We can't extend nativesdk recipes") + + if bb.data.inherits_class('allarch', e.data) and not bb.data.inherits_class('packagegroup', e.data): + raise bb.parse.SkipPackage("Don't extend allarch recipes which are not packagegroups") + + + # Expand this since this won't work correctly once we set a multilib into place + e.data.setVar("ALL_MULTILIB_PACKAGE_ARCHS", e.data.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)) + + override = ":virtclass-multilib-" + variant + + e.data.setVar("MLPREFIX", variant + "-") + e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False)) + e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override) + + # Expand the WHITELISTs with multilib prefix + for whitelist in ["HOSTTOOLS_WHITELIST_GPL-3.0", "WHITELIST_GPL-3.0", "LGPLv2_WHITELIST_GPL-3.0"]: + pkgs = e.data.getVar(whitelist, True) + for pkg in pkgs.split(): + pkgs += " " + variant + "-" + pkg + e.data.setVar(whitelist, pkgs) + + # DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data + newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False) + if newtune: + e.data.setVar("DEFAULTTUNE", newtune) + e.data.setVar('DEFAULTTUNE_ML_%s' % variant, newtune) +} + +addhandler multilib_virtclass_handler +multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise" + +STAGINGCC_prepend = "${BBEXTENDVARIANT}-" + +python __anonymous () { + variant = d.getVar("BBEXTENDVARIANT", True) + + import oe.classextend + + clsextend = oe.classextend.ClassExtender(variant, d) + + if bb.data.inherits_class('image', d): + clsextend.map_depends_variable("PACKAGE_INSTALL") + clsextend.map_depends_variable("LINGUAS_INSTALL") + clsextend.map_depends_variable("RDEPENDS") + pinstall = d.getVar("LINGUAS_INSTALL", True) + " " + d.getVar("PACKAGE_INSTALL", True) + d.setVar("PACKAGE_INSTALL", pinstall) + d.setVar("LINGUAS_INSTALL", "") + # FIXME, we need to map this to something, not delete it! + d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "") + + if bb.data.inherits_class('populate_sdk_base', d): + clsextend.map_depends_variable("TOOLCHAIN_TARGET_TASK") + clsextend.map_depends_variable("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY") + + if bb.data.inherits_class('image', d): + return + + clsextend.map_depends_variable("DEPENDS") + clsextend.map_variable("PROVIDES") + + if bb.data.inherits_class('cross-canadian', d): + return + + clsextend.rename_packages() + clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split()) + + clsextend.map_packagevars() + clsextend.map_regexp_variable("PACKAGES_DYNAMIC") + clsextend.map_variable("PACKAGE_INSTALL") + clsextend.map_variable("INITSCRIPT_PACKAGES") + clsextend.map_variable("USERADD_PACKAGES") + clsextend.map_variable("SYSTEMD_PACKAGES") +} + +PACKAGEFUNCS_append = " do_package_qa_multilib" + +python do_package_qa_multilib() { + + def check_mlprefix(pkg, var, mlprefix): + values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg), True) or d.getVar(var, True) or "") + candidates = [] + for i in values: + if i.startswith('virtual/'): + i = i[len('virtual/'):] + if (not i.startswith('kernel-module')) and (not i.startswith(mlprefix)) and \ + (not 'cross-canadian' in i) and (not i.startswith("nativesdk-")) and \ + (not i.startswith("rtld")) and (not i.startswith('kernel-vmlinux')): + candidates.append(i) + if len(candidates) > 0: + bb.warn("Multilib QA Issue: %s package %s - suspicious values '%s' in %s" + % (d.getVar('PN', True), pkg, ' '.join(candidates), var)) + + ml = d.getVar('MLPREFIX', True) + if not ml: + return + + packages = d.getVar('PACKAGES', True) + for pkg in packages.split(): + check_mlprefix(pkg, 'RDEPENDS', ml) + check_mlprefix(pkg, 'RPROVIDES', ml) + check_mlprefix(pkg, 'RRECOMMENDS', ml) + check_mlprefix(pkg, 'RSUGGESTS', ml) + check_mlprefix(pkg, 'RREPLACES', ml) + check_mlprefix(pkg, 'RCONFLICTS', ml) +} diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass new file mode 100644 index 0000000000..8ea2a5a4b8 --- /dev/null +++ b/meta/classes/multilib_global.bbclass @@ -0,0 +1,158 @@ +def preferred_ml_updates(d): + # If any PREFERRED_PROVIDER or PREFERRED_VERSION are set, + # we need to mirror these variables in the multilib case; + multilibs = d.getVar('MULTILIBS', True) or "" + if not multilibs: + return + + prefixes = [] + for ext in multilibs.split(): + eext = ext.split(':') + if len(eext) > 1 and eext[0] == 'multilib': + prefixes.append(eext[1]) + + versions = [] + providers = [] + for v in d.keys(): + if v.startswith("PREFERRED_VERSION_"): + versions.append(v) + if v.startswith("PREFERRED_PROVIDER_"): + providers.append(v) + + for v in versions: + val = d.getVar(v, False) + pkg = v.replace("PREFERRED_VERSION_", "") + if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")): + continue + if '-cross-' in pkg and '${' in pkg: + for p in prefixes: + localdata = bb.data.createCopy(d) + override = ":virtclass-multilib-" + p + localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override) + bb.data.update_data(localdata) + newname = localdata.expand(v).replace("PREFERRED_VERSION_", "PREFERRED_VERSION_" + p + '-') + if newname != v: + newval = localdata.expand(val) + d.setVar(newname, newval) + # Avoid future variable key expansion + vexp = d.expand(v) + if v != vexp and d.getVar(v, False): + d.renameVar(v, vexp) + continue + for p in prefixes: + newname = "PREFERRED_VERSION_" + p + "-" + pkg + if not d.getVar(newname, False): + d.setVar(newname, val) + + for prov in providers: + val = d.getVar(prov, False) + pkg = prov.replace("PREFERRED_PROVIDER_", "") + if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")): + continue + if 'cross-canadian' in pkg: + for p in prefixes: + localdata = bb.data.createCopy(d) + override = ":virtclass-multilib-" + p + localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override) + bb.data.update_data(localdata) + newname = localdata.expand(prov) + if newname != prov: + newval = localdata.expand(val) + d.setVar(newname, newval) + # Avoid future variable key expansion + provexp = d.expand(prov) + if prov != provexp and d.getVar(prov, False): + d.renameVar(prov, provexp) + continue + virt = "" + if pkg.startswith("virtual/"): + pkg = pkg.replace("virtual/", "") + virt = "virtual/" + for p in prefixes: + if pkg != "kernel": + newval = p + "-" + val + + # implement variable keys + localdata = bb.data.createCopy(d) + override = ":virtclass-multilib-" + p + localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override) + bb.data.update_data(localdata) + newname = localdata.expand(prov) + if newname != prov and not d.getVar(newname, False): + d.setVar(newname, localdata.expand(newval)) + + # implement alternative multilib name + newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg) + if not d.getVar(newname, False): + d.setVar(newname, newval) + # Avoid future variable key expansion + provexp = d.expand(prov) + if prov != provexp and d.getVar(prov, False): + d.renameVar(prov, provexp) + + + mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split() + extramp = [] + for p in mp: + if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p: + continue + virt = "" + if p.startswith("virtual/"): + p = p.replace("virtual/", "") + virt = "virtual/" + for pref in prefixes: + extramp.append(virt + pref + "-" + p) + d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp)) + +python multilib_virtclass_handler_vendor () { + if isinstance(e, bb.event.ConfigParsed): + for v in e.data.getVar("MULTILIB_VARIANTS", True).split(): + if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None: + e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v) + preferred_ml_updates(e.data) +} +addhandler multilib_virtclass_handler_vendor +multilib_virtclass_handler_vendor[eventmask] = "bb.event.ConfigParsed" + +python multilib_virtclass_handler_global () { + if not e.data: + return + + variant = e.data.getVar("BBEXTENDVARIANT", True) + + if isinstance(e, bb.event.RecipeParsed) and not variant: + if bb.data.inherits_class('kernel', e.data) or \ + bb.data.inherits_class('module-base', e.data) or \ + (bb.data.inherits_class('allarch', e.data) and\ + not bb.data.inherits_class('packagegroup', e.data)): + variants = (e.data.getVar("MULTILIB_VARIANTS", True) or "").split() + + import oe.classextend + clsextends = [] + for variant in variants: + clsextends.append(oe.classextend.ClassExtender(variant, e.data)) + + # Process PROVIDES + origprovs = provs = e.data.getVar("PROVIDES", True) or "" + for clsextend in clsextends: + provs = provs + " " + clsextend.map_variable("PROVIDES", setvar=False) + e.data.setVar("PROVIDES", provs) + + # Process RPROVIDES + origrprovs = rprovs = e.data.getVar("RPROVIDES", True) or "" + for clsextend in clsextends: + rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES", setvar=False) + e.data.setVar("RPROVIDES", rprovs) + + # Process RPROVIDES_${PN}... + for pkg in (e.data.getVar("PACKAGES", True) or "").split(): + origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg, True) or "" + for clsextend in clsextends: + rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False) + rprovs = rprovs + " " + clsextend.extname + "-" + pkg + e.data.setVar("RPROVIDES_%s" % pkg, rprovs) +} + +addhandler multilib_virtclass_handler_global +multilib_virtclass_handler_global[eventmask] = "bb.event.RecipePreFinalise bb.event.RecipeParsed" + diff --git a/meta/classes/multilib_header.bbclass b/meta/classes/multilib_header.bbclass new file mode 100644 index 0000000000..5ee0a2d562 --- /dev/null +++ b/meta/classes/multilib_header.bbclass @@ -0,0 +1,54 @@ +inherit siteinfo + +# If applicable on the architecture, this routine will rename the header and +# add a unique identifier to the name for the ABI/bitsize that is being used. +# A wrapper will be generated for the architecture that knows how to call +# all of the ABI variants for that given architecture. +# +oe_multilib_header() { + + case ${HOST_OS} in + *-musl*) + return + ;; + *) + esac + # We use + # For ARM: We don't support multilib builds. + # For MIPS: "n32" is a special case, which needs to be + # distinct from both 64-bit and 32-bit. + case ${TARGET_ARCH} in + arm*) return + ;; + mips*) case "${MIPSPKGSFX_ABI}" in + "-n32") + ident=n32 + ;; + *) + ident=${SITEINFO_BITS} + ;; + esac + ;; + *) ident=${SITEINFO_BITS} + esac + if echo ${TARGET_ARCH} | grep -q arm; then + return + fi + for each_header in "$@" ; do + if [ ! -f "${D}/${includedir}/$each_header" ]; then + bberror "oe_multilib_header: Unable to find header $each_header." + continue + fi + stem=$(echo $each_header | sed 's#\.h$##') + # if mips64/n32 set ident to n32 + mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h + + sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header + done +} + +# Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic. +# We don't need multilib headers for native builds so brute force things. +oe_multilib_header_class-native () { + return +} diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass new file mode 100644 index 0000000000..dcd364b92c --- /dev/null +++ b/meta/classes/native.bbclass @@ -0,0 +1,175 @@ +# We want native packages to be relocatable +inherit relocatable + +# Native packages are built indirectly via dependency, +# no need for them to be a direct target of 'world' +EXCLUDE_FROM_WORLD = "1" + +PACKAGES = "" +PACKAGES_class-native = "" +PACKAGES_DYNAMIC = "" +PACKAGES_DYNAMIC_class-native = "" +PACKAGE_ARCH = "${BUILD_ARCH}" + +# used by cmake class +OECMAKE_RPATH = "${libdir}" +OECMAKE_RPATH_class-native = "${libdir}" + +# When this class has packaging enabled, setting +# RPROVIDES becomes unnecessary. +RPROVIDES = "${PN}" + +TARGET_ARCH = "${BUILD_ARCH}" +TARGET_OS = "${BUILD_OS}" +TARGET_VENDOR = "${BUILD_VENDOR}" +TARGET_PREFIX = "${BUILD_PREFIX}" +TARGET_CC_ARCH = "${BUILD_CC_ARCH}" +TARGET_LD_ARCH = "${BUILD_LD_ARCH}" +TARGET_AS_ARCH = "${BUILD_AS_ARCH}" +TARGET_CPPFLAGS = "${BUILD_CPPFLAGS}" +TARGET_CFLAGS = "${BUILD_CFLAGS}" +TARGET_CXXFLAGS = "${BUILD_CXXFLAGS}" +TARGET_LDFLAGS = "${BUILD_LDFLAGS}" +TARGET_FPU = "" + +HOST_ARCH = "${BUILD_ARCH}" +HOST_OS = "${BUILD_OS}" +HOST_VENDOR = "${BUILD_VENDOR}" +HOST_PREFIX = "${BUILD_PREFIX}" +HOST_CC_ARCH = "${BUILD_CC_ARCH}" +HOST_LD_ARCH = "${BUILD_LD_ARCH}" +HOST_AS_ARCH = "${BUILD_AS_ARCH}" + +CPPFLAGS = "${BUILD_CPPFLAGS}" +CFLAGS = "${BUILD_CFLAGS}" +CXXFLAGS = "${BUILD_CFLAGS}" +LDFLAGS = "${BUILD_LDFLAGS}" +LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE} " + +STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}" +STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}" + +# native pkg doesn't need the TOOLCHAIN_OPTIONS. +TOOLCHAIN_OPTIONS = "" + +DEPENDS_GETTEXT = "gettext-native" + +# Don't build ptest natively +PTEST_ENABLED = "0" + +# Don't use site files for native builds +export CONFIG_SITE = "${COREBASE}/meta/site/native" + +# set the compiler as well. It could have been set to something else +export CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_ARCH}" +export CXX = "${CCACHE}${HOST_PREFIX}g++ ${HOST_CC_ARCH}" +export FC = "${CCACHE}${HOST_PREFIX}gfortran ${HOST_CC_ARCH}" +export CPP = "${HOST_PREFIX}gcc ${HOST_CC_ARCH} -E" +export LD = "${HOST_PREFIX}ld ${HOST_LD_ARCH} " +export CCLD = "${CC}" +export AR = "${HOST_PREFIX}ar" +export AS = "${HOST_PREFIX}as ${HOST_AS_ARCH}" +export RANLIB = "${HOST_PREFIX}ranlib" +export STRIP = "${HOST_PREFIX}strip" + +# Path prefixes +base_prefix = "${STAGING_DIR_NATIVE}" +prefix = "${STAGING_DIR_NATIVE}${prefix_native}" +exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}" + +bindir = "${STAGING_BINDIR_NATIVE}" +sbindir = "${STAGING_SBINDIR_NATIVE}" +libdir = "${STAGING_LIBDIR_NATIVE}" +includedir = "${STAGING_INCDIR_NATIVE}" +sysconfdir = "${STAGING_ETCDIR_NATIVE}" +datadir = "${STAGING_DATADIR_NATIVE}" + +baselib = "lib" + +# Libtool's default paths are correct for the native machine +lt_cv_sys_lib_dlsearch_path_spec[unexport] = "1" + +NATIVE_PACKAGE_PATH_SUFFIX ?= "" +bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}" +libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}" +libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}" + +do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/" +do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_NATIVE}/" + +# Since we actually install these into situ there is no staging prefix +STAGING_DIR_HOST = "" +STAGING_DIR_TARGET = "" +PKG_CONFIG_DIR = "${libdir}/pkgconfig" + +EXTRA_NATIVE_PKGCONFIG_PATH ?= "" +PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}" +PKG_CONFIG_SYSROOT_DIR = "" + +# we dont want libc-uclibc or libc-glibc to kick in for native recipes +LIBCOVERRIDE = "" +CLASSOVERRIDE = "class-native" +MACHINEOVERRIDES = "" + +PATH_prepend = "${COREBASE}/scripts/native-intercept:" + +python native_virtclass_handler () { + classextend = e.data.getVar('BBCLASSEXTEND', True) or "" + if "native" not in classextend: + return + + pn = e.data.getVar("PN", True) + if not pn.endswith("-native"): + return + + def map_dependencies(varname, d, suffix = ""): + if suffix: + varname = varname + "_" + suffix + deps = d.getVar(varname, True) + if not deps: + return + deps = bb.utils.explode_deps(deps) + newdeps = [] + for dep in deps: + if "-cross-" in dep: + newdeps.append(dep.replace("-cross", "-native")) + elif not dep.endswith("-native"): + newdeps.append(dep + "-native") + else: + newdeps.append(dep) + d.setVar(varname, " ".join(newdeps)) + + map_dependencies("DEPENDS", e.data) + for pkg in [e.data.getVar("PN", True), "", "${PN}"]: + map_dependencies("RDEPENDS", e.data, pkg) + map_dependencies("RRECOMMENDS", e.data, pkg) + map_dependencies("RSUGGESTS", e.data, pkg) + map_dependencies("RPROVIDES", e.data, pkg) + map_dependencies("RREPLACES", e.data, pkg) + + provides = e.data.getVar("PROVIDES", True) + for prov in provides.split(): + if prov.find(pn) != -1: + continue + if not prov.endswith("-native"): + provides = provides.replace(prov, prov + "-native") + e.data.setVar("PROVIDES", provides) + + e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-native") +} + +addhandler native_virtclass_handler +native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise" + +deltask package +deltask packagedata +deltask package_qa +deltask package_write_ipk +deltask package_write_deb +deltask package_write_rpm +deltask package_write + +do_packagedata[stamp-extra-info] = "" +do_populate_sysroot[stamp-extra-info] = "" + +USE_NLS = "no" diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass new file mode 100644 index 0000000000..5e78116ab8 --- /dev/null +++ b/meta/classes/nativesdk.bbclass @@ -0,0 +1,95 @@ +# SDK packages are built either explicitly by the user, +# or indirectly via dependency. No need to be in 'world'. +EXCLUDE_FROM_WORLD = "1" + +STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}" + +# libc for the SDK can be different to that of the target +NATIVESDKLIBC ?= "libc-glibc" +LIBCOVERRIDE = ":${NATIVESDKLIBC}" +CLASSOVERRIDE = "class-nativesdk" +MACHINEOVERRIDES = "" + +# +# Update PACKAGE_ARCH and PACKAGE_ARCHS +# +PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}" +PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}" + +# +# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit +# binaries +# +DEPENDS_append = " chrpath-replacement-native" +EXTRANATIVEPATH += "chrpath-native" + +STAGING_DIR_HOST = "${STAGING_DIR}/${MULTIMACH_HOST_SYS}" +STAGING_DIR_TARGET = "${STAGING_DIR}/${MULTIMACH_TARGET_SYS}" + +HOST_ARCH = "${SDK_ARCH}" +HOST_VENDOR = "${SDK_VENDOR}" +HOST_OS = "${SDK_OS}" +HOST_PREFIX = "${SDK_PREFIX}" +HOST_CC_ARCH = "${SDK_CC_ARCH}" +HOST_LD_ARCH = "${SDK_LD_ARCH}" +HOST_AS_ARCH = "${SDK_AS_ARCH}" +#HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}" + +TARGET_ARCH = "${SDK_ARCH}" +TARGET_VENDOR = "${SDK_VENDOR}" +TARGET_OS = "${SDK_OS}" +TARGET_PREFIX = "${SDK_PREFIX}" +TARGET_CC_ARCH = "${SDK_CC_ARCH}" +TARGET_LD_ARCH = "${SDK_LD_ARCH}" +TARGET_AS_ARCH = "${SDK_AS_ARCH}" +TARGET_FPU = "" +EXTRA_OECONF_GCC_FLOAT = "" + +CPPFLAGS = "${BUILDSDK_CPPFLAGS}" +CFLAGS = "${BUILDSDK_CFLAGS}" +CXXFLAGS = "${BUILDSDK_CFLAGS}" +LDFLAGS = "${BUILDSDK_LDFLAGS}" + +# Change to place files in SDKPATH +base_prefix = "${SDKPATHNATIVE}" +prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" +exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}" +baselib = "lib" + +export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig" +export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}" + +python nativesdk_virtclass_handler () { + pn = e.data.getVar("PN", True) + if not (pn.endswith("-nativesdk") or pn.startswith("nativesdk-")): + return + + e.data.setVar("MLPREFIX", "nativesdk-") + e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN", True).replace("-nativesdk", "").replace("nativesdk-", "")) + e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-nativesdk") +} + +python () { + pn = d.getVar("PN", True) + if not pn.startswith("nativesdk-"): + return + + import oe.classextend + + clsextend = oe.classextend.NativesdkClassExtender("nativesdk", d) + clsextend.rename_packages() + clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split()) + + clsextend.map_depends_variable("DEPENDS") + clsextend.map_packagevars() + clsextend.map_variable("PROVIDES") + clsextend.map_regexp_variable("PACKAGES_DYNAMIC") +} + +addhandler nativesdk_virtclass_handler +nativesdk_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise" + +do_populate_sysroot[stamp-extra-info] = "" +do_packagedata[stamp-extra-info] = "" + +USE_NLS = "${SDKUSE_NLS}" diff --git a/meta/classes/oelint.bbclass b/meta/classes/oelint.bbclass new file mode 100644 index 0000000000..d00f468d9a --- /dev/null +++ b/meta/classes/oelint.bbclass @@ -0,0 +1,85 @@ +addtask lint before do_fetch +do_lint[nostamp] = "1" +python do_lint() { + pkgname = d.getVar("PN", True) + + ############################## + # Test that DESCRIPTION exists + # + description = d.getVar("DESCRIPTION") + if description[1:10] == '{SUMMARY}': + bb.warn("%s: DESCRIPTION is not set" % pkgname) + + + ############################## + # Test that HOMEPAGE exists + # + homepage = d.getVar("HOMEPAGE") + if homepage == '': + bb.warn("%s: HOMEPAGE is not set" % pkgname) + elif not homepage.startswith("http://") and not homepage.startswith("https://"): + bb.warn("%s: HOMEPAGE doesn't start with http:// or https://" % pkgname) + + + ############################## + # Test for valid SECTION + # + section = d.getVar("SECTION") + if section == '': + bb.warn("%s: SECTION is not set" % pkgname) + elif not section.islower(): + bb.warn("%s: SECTION should only use lower case" % pkgname) + + + ############################## + # Check that all patches have Signed-off-by and Upstream-Status + # + srcuri = d.getVar("SRC_URI").split() + fpaths = (d.getVar('FILESPATH', True) or '').split(':') + + def findPatch(patchname): + for dir in fpaths: + patchpath = dir + patchname + if os.path.exists(patchpath): + return patchpath + + def findKey(path, key): + ret = True + f = file('%s' % path, mode = 'r') + line = f.readline() + while line: + if line.find(key) != -1: + ret = False + line = f.readline() + f.close() + return ret + + length = len("file://") + for item in srcuri: + if item.startswith("file://"): + item = item[length:] + if item.endswith(".patch") or item.endswith(".diff"): + path = findPatch(item) + if findKey(path, "Signed-off-by"): + bb.warn("%s: %s doesn't have Signed-off-by" % (pkgname, item)) + if findKey(path, "Upstream-Status"): + bb.warn("%s: %s doesn't have Upstream-Status" % (pkgname, item)) + + + ############################## + # Check for ${PN} or ${P} usage in SRC_URI or S + # Should use ${BPN} or ${BP} instead to avoid breaking multilib + # + for s in srcuri: + if not s.startswith("file://"): + if not s.find("{PN}") == -1: + bb.warn("%s: should use BPN instead of PN in SRC_URI" % pkgname) + if not s.find("{P}") == -1: + bb.warn("%s: should use BP instead of P in SRC_URI" % pkgname) + + srcpath = d.getVar("S") + if not srcpath.find("{PN}") == -1: + bb.warn("%s: should use BPN instead of PN in S" % pkgname) + if not srcpath.find("{P}") == -1: + bb.warn("%s: should use BP instead of P in S" % pkgname) +} diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass new file mode 100644 index 0000000000..77bf0c1c14 --- /dev/null +++ b/meta/classes/own-mirrors.bbclass @@ -0,0 +1,13 @@ +PREMIRRORS() { +cvs://.*/.* ${SOURCE_MIRROR_URL} +svn://.*/.* ${SOURCE_MIRROR_URL} +git://.*/.* ${SOURCE_MIRROR_URL} +gitsm://.*/.* ${SOURCE_MIRROR_URL} +hg://.*/.* ${SOURCE_MIRROR_URL} +bzr://.*/.* ${SOURCE_MIRROR_URL} +svk://.*/.* ${SOURCE_MIRROR_URL} +p4://.*/.* ${SOURCE_MIRROR_URL} +osc://.*/.* ${SOURCE_MIRROR_URL} +https?$://.*/.* ${SOURCE_MIRROR_URL} +ftp://.*/.* ${SOURCE_MIRROR_URL} +} diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass new file mode 100644 index 0000000000..b81f4f9281 --- /dev/null +++ b/meta/classes/package.bbclass @@ -0,0 +1,2060 @@ +# +# Packaging process +# +# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS +# Taking D and splitting it up into the packages listed in PACKAGES, placing the +# resulting output in PKGDEST. +# +# There are the following default steps but PACKAGEFUNCS can be extended: +# +# a) package_get_auto_pr - get PRAUTO from remote PR service +# +# b) perform_packagecopy - Copy D into PKGD +# +# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES +# +# d) split_and_strip_files - split the files into runtime and debug and strip them. +# Debug files include debug info split, and associated sources that end up in -dbg packages +# +# e) fixup_perms - Fix up permissions in the package before we split it. +# +# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/ +# Also triggers the binary stripping code to put files in -dbg packages. +# +# g) package_do_filedeps - Collect perfile run-time dependency metadata +# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with +# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg +# +# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any +# depenedencies found. Also stores the package name so anyone else using this library +# knows which package to depend on. +# +# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files +# +# j) read_shlibdeps - Reads the stored shlibs information into the metadata +# +# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages +# +# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later +# packaging steps + +inherit packagedata +inherit prserv +inherit chrpath + +# Need the package_qa_handle_error() in insane.bbclass +inherit insane + +PKGD = "${WORKDIR}/package" +PKGDEST = "${WORKDIR}/packages-split" + +LOCALE_SECTION ?= '' + +ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}" + +# rpm is used for the per-file dependency identification +PACKAGE_DEPENDS += "rpm-native" + +def legitimize_package_name(s): + """ + Make sure package names are legitimate strings + """ + import re + + def fixutf(m): + cp = m.group(1) + if cp: + return ('\u%s' % cp).decode('unicode_escape').encode('utf-8') + + # Handle unicode codepoints encoded as , as in glibc locale files. + s = re.sub('', fixutf, s) + + # Remaining package name validity fixes + return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-') + +def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False, summary=None): + """ + Used in .bb files to split up dynamically generated subpackages of a + given package, usually plugins or modules. + + Arguments: + root -- the path in which to search + file_regex -- regular expression to match searched files. Use + parentheses () to mark the part of this expression + that should be used to derive the module name (to be + substituted where %s is used in other function + arguments as noted below) + output_pattern -- pattern to use for the package names. Must include %s. + description -- description to set for each package. Must include %s. + postinst -- postinstall script to use for all packages (as a + string) + recursive -- True to perform a recursive search - default False + hook -- a hook function to be called for every match. The + function will be called with the following arguments + (in the order listed): + f: full path to the file/directory match + pkg: the package name + file_regex: as above + output_pattern: as above + modulename: the module name derived using file_regex + extra_depends -- extra runtime dependencies (RDEPENDS) to be set for + all packages. The default value of None causes a + dependency on the main package (${PN}) - if you do + not want this, pass '' for this parameter. + aux_files_pattern -- extra item(s) to be added to FILES for each + package. Can be a single string item or a list of + strings for multiple items. Must include %s. + postrm -- postrm script to use for all packages (as a string) + allow_dirs -- True allow directories to be matched - default False + prepend -- if True, prepend created packages to PACKAGES instead + of the default False which appends them + match_path -- match file_regex on the whole relative path to the + root rather than just the file name + aux_files_pattern_verbatim -- extra item(s) to be added to FILES for + each package, using the actual derived module name + rather than converting it to something legal for a + package name. Can be a single string item or a list + of strings for multiple items. Must include %s. + allow_links -- True to allow symlinks to be matched - default False + summary -- Summary to set for each package. Must include %s; + defaults to description if not set. + + """ + + dvar = d.getVar('PKGD', True) + + # If the root directory doesn't exist, don't error out later but silently do + # no splitting. + if not os.path.exists(dvar + root): + return [] + + ml = d.getVar("MLPREFIX", True) + if ml: + if not output_pattern.startswith(ml): + output_pattern = ml + output_pattern + + newdeps = [] + for dep in (extra_depends or "").split(): + if dep.startswith(ml): + newdeps.append(dep) + else: + newdeps.append(ml + dep) + if newdeps: + extra_depends = " ".join(newdeps) + + + packages = d.getVar('PACKAGES', True).split() + split_packages = [] + + if postinst: + postinst = '#!/bin/sh\n' + postinst + '\n' + if postrm: + postrm = '#!/bin/sh\n' + postrm + '\n' + if not recursive: + objs = os.listdir(dvar + root) + else: + objs = [] + for walkroot, dirs, files in os.walk(dvar + root): + for file in files: + relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1) + if relpath: + objs.append(relpath) + + if extra_depends == None: + extra_depends = d.getVar("PN", True) + + if not summary: + summary = description + + for o in sorted(objs): + import re, stat + if match_path: + m = re.match(file_regex, o) + else: + m = re.match(file_regex, os.path.basename(o)) + + if not m: + continue + f = os.path.join(dvar + root, o) + mode = os.lstat(f).st_mode + if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))): + continue + on = legitimize_package_name(m.group(1)) + pkg = output_pattern % on + split_packages.append(pkg) + if not pkg in packages: + if prepend: + packages = [pkg] + packages + else: + packages.append(pkg) + oldfiles = d.getVar('FILES_' + pkg, True) + newfile = os.path.join(root, o) + # These names will be passed through glob() so if the filename actually + # contains * or ? (rare, but possible) we need to handle that specially + newfile = newfile.replace('*', '[*]') + newfile = newfile.replace('?', '[?]') + if not oldfiles: + the_files = [newfile] + if aux_files_pattern: + if type(aux_files_pattern) is list: + for fp in aux_files_pattern: + the_files.append(fp % on) + else: + the_files.append(aux_files_pattern % on) + if aux_files_pattern_verbatim: + if type(aux_files_pattern_verbatim) is list: + for fp in aux_files_pattern_verbatim: + the_files.append(fp % m.group(1)) + else: + the_files.append(aux_files_pattern_verbatim % m.group(1)) + d.setVar('FILES_' + pkg, " ".join(the_files)) + else: + d.setVar('FILES_' + pkg, oldfiles + " " + newfile) + if extra_depends != '': + d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends) + if not d.getVar('DESCRIPTION_' + pkg, True): + d.setVar('DESCRIPTION_' + pkg, description % on) + if not d.getVar('SUMMARY_' + pkg, True): + d.setVar('SUMMARY_' + pkg, summary % on) + if postinst: + d.setVar('pkg_postinst_' + pkg, postinst) + if postrm: + d.setVar('pkg_postrm_' + pkg, postrm) + if callable(hook): + hook(f, pkg, file_regex, output_pattern, m.group(1)) + + d.setVar('PACKAGES', ' '.join(packages)) + return split_packages + +PACKAGE_DEPENDS += "file-native" + +python () { + if d.getVar('PACKAGES', True) != '': + deps = "" + for dep in (d.getVar('PACKAGE_DEPENDS', True) or "").split(): + deps += " %s:do_populate_sysroot" % dep + d.appendVarFlag('do_package', 'depends', deps) + + # shlibs requires any DEPENDS to have already packaged for the *.list files + d.appendVarFlag('do_package', 'deptask', " do_packagedata") +} + +def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d): + # Function to split a single file into two components, one is the stripped + # target system binary, the other contains any debugging information. The + # two files are linked to reference each other. + # + # sourcefile is also generated containing a list of debugsources + + import stat + + dvar = d.getVar('PKGD', True) + objcopy = d.getVar("OBJCOPY", True) + debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit") + workdir = d.getVar("WORKDIR", True) + workparentdir = d.getVar("DEBUGSRC_OVERRIDE_PATH", True) or os.path.dirname(os.path.dirname(workdir)) + + # We ignore kernel modules, we don't generate debug info files. + if file.find("/lib/modules/") != -1 and file.endswith(".ko"): + return 1 + + newmode = None + if not os.access(file, os.W_OK) or os.access(file, os.R_OK): + origmode = os.stat(file)[stat.ST_MODE] + newmode = origmode | stat.S_IWRITE | stat.S_IREAD + os.chmod(file, newmode) + + # We need to extract the debug src information here... + if debugsrcdir: + cmd = "'%s' -b '%s' -d '%s' -i -l '%s' '%s'" % (debugedit, workparentdir, debugsrcdir, sourcefile, file) + (retval, output) = oe.utils.getstatusoutput(cmd) + if retval: + bb.fatal("debugedit failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")) + + bb.utils.mkdirhier(os.path.dirname(debugfile)) + + cmd = "'%s' --only-keep-debug '%s' '%s'" % (objcopy, file, debugfile) + (retval, output) = oe.utils.getstatusoutput(cmd) + if retval: + bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")) + + # Set the debuglink to have the view of the file path on the target + cmd = "'%s' --add-gnu-debuglink='%s' '%s'" % (objcopy, debugfile, file) + (retval, output) = oe.utils.getstatusoutput(cmd) + if retval: + bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")) + + if newmode: + os.chmod(file, origmode) + + return 0 + +def copydebugsources(debugsrcdir, d): + # The debug src information written out to sourcefile is further procecessed + # and copied to the destination here. + + import stat + + sourcefile = d.expand("${WORKDIR}/debugsources.list") + if debugsrcdir and os.path.isfile(sourcefile): + dvar = d.getVar('PKGD', True) + strip = d.getVar("STRIP", True) + objcopy = d.getVar("OBJCOPY", True) + debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit") + workdir = d.getVar("WORKDIR", True) + workparentdir = os.path.dirname(os.path.dirname(workdir)) + workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir) + + nosuchdir = [] + basepath = dvar + for p in debugsrcdir.split("/"): + basepath = basepath + "/" + p + if not cpath.exists(basepath): + nosuchdir.append(basepath) + bb.utils.mkdirhier(basepath) + cpath.updatecache(basepath) + + processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '(|)$' | " + # We need to ignore files that are not actually ours + # we do this by only paying attention to items from this package + processdebugsrc += "fgrep -zw '%s' | " + processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)" + + cmd = processdebugsrc % (sourcefile, workbasedir, workparentdir, dvar, debugsrcdir) + (retval, output) = oe.utils.getstatusoutput(cmd) + # Can "fail" if internal headers/transient sources are attempted + #if retval: + # bb.fatal("debug source copy failed with exit code %s (cmd was %s)" % (retval, cmd)) + + # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced. + # Work around this by manually finding and copying any symbolic links that made it through. + cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s' 2>/dev/null)" % (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir) + (retval, output) = oe.utils.getstatusoutput(cmd) + if retval: + bb.fatal("debugsrc symlink fixup failed with exit code %s (cmd was %s)" % (retval, cmd)) + + # The copy by cpio may have resulted in some empty directories! Remove these + cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir) + (retval, output) = oe.utils.getstatusoutput(cmd) + if retval: + bb.fatal("empty directory removal failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")) + + # Also remove debugsrcdir if its empty + for p in nosuchdir[::-1]: + if os.path.exists(p) and not os.listdir(p): + os.rmdir(p) + +# +# Package data handling routines +# + +def get_package_mapping (pkg, basepkg, d): + import oe.packagedata + + data = oe.packagedata.read_subpkgdata(pkg, d) + key = "PKG_%s" % pkg + + if key in data: + # Have to avoid undoing the write_extra_pkgs(global_variants...) + if bb.data.inherits_class('allarch', d) and data[key] == basepkg: + return pkg + return data[key] + + return pkg + +def get_package_additional_metadata (pkg_type, d): + base_key = "PACKAGE_ADD_METADATA" + for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key): + if d.getVar(key) is None: + continue + d.setVarFlag(key, "type", "list") + if d.getVarFlag(key, "separator") is None: + d.setVarFlag(key, "separator", "\\n") + metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)] + return "\n".join(metadata_fields).strip() + +def runtime_mapping_rename (varname, pkg, d): + #bb.note("%s before: %s" % (varname, d.getVar(varname, True))) + + if bb.data.inherits_class('packagegroup', d): + return + + new_depends = {} + deps = bb.utils.explode_dep_versions2(d.getVar(varname, True) or "") + for depend in deps: + new_depend = get_package_mapping(depend, pkg, d) + new_depends[new_depend] = deps[depend] + + d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False)) + + #bb.note("%s after: %s" % (varname, d.getVar(varname, True))) + +# +# Package functions suitable for inclusion in PACKAGEFUNCS +# + +python package_get_auto_pr() { + import oe.prservice + import re + + # Support per recipe PRSERV_HOST + pn = d.getVar('PN', True) + host = d.getVar("PRSERV_HOST_" + pn, True) + if not (host is None): + d.setVar("PRSERV_HOST", host) + + pkgv = d.getVar("PKGV", True) + + # PR Server not active, handle AUTOINC + if not d.getVar('PRSERV_HOST', True): + if 'AUTOINC' in pkgv: + d.setVar("PKGV", pkgv.replace("AUTOINC", "0")) + return + + auto_pr = None + pv = d.getVar("PV", True) + version = d.getVar("PRAUTOINX", True) + pkgarch = d.getVar("PACKAGE_ARCH", True) + checksum = d.getVar("BB_TASKHASH", True) + + if d.getVar('PRSERV_LOCKDOWN', True): + auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch, True) or d.getVar('PRAUTO_' + version, True) or None + if auto_pr is None: + bb.fatal("Can NOT get PRAUTO from lockdown exported file") + d.setVar('PRAUTO',str(auto_pr)) + return + + try: + conn = d.getVar("__PRSERV_CONN", True) + if conn is None: + conn = oe.prservice.prserv_make_conn(d) + if conn is not None: + if "AUTOINC" in pkgv: + srcpv = bb.fetch2.get_srcrev(d) + base_ver = "AUTOINC-%s" % version[:version.find(srcpv)] + value = conn.getPR(base_ver, pkgarch, srcpv) + d.setVar("PKGV", pkgv.replace("AUTOINC", str(value))) + + auto_pr = conn.getPR(version, pkgarch, checksum) + except Exception as e: + bb.fatal("Can NOT get PRAUTO, exception %s" % str(e)) + if auto_pr is None: + bb.fatal("Can NOT get PRAUTO from remote PR service") + d.setVar('PRAUTO',str(auto_pr)) +} + +LOCALEBASEPN ??= "${PN}" + +python package_do_split_locales() { + if (d.getVar('PACKAGE_NO_LOCALE', True) == '1'): + bb.debug(1, "package requested not splitting locales") + return + + packages = (d.getVar('PACKAGES', True) or "").split() + + datadir = d.getVar('datadir', True) + if not datadir: + bb.note("datadir not defined") + return + + dvar = d.getVar('PKGD', True) + pn = d.getVar('LOCALEBASEPN', True) + + if pn + '-locale' in packages: + packages.remove(pn + '-locale') + + localedir = os.path.join(dvar + datadir, 'locale') + + if not cpath.isdir(localedir): + bb.debug(1, "No locale files in this package") + return + + locales = os.listdir(localedir) + + summary = d.getVar('SUMMARY', True) or pn + description = d.getVar('DESCRIPTION', True) or "" + locale_section = d.getVar('LOCALE_SECTION', True) + mlprefix = d.getVar('MLPREFIX', True) or "" + for l in sorted(locales): + ln = legitimize_package_name(l) + pkg = pn + '-locale-' + ln + packages.append(pkg) + d.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l)) + d.setVar('RRECOMMENDS_' + pkg, '%svirtual-locale-%s' % (mlprefix, ln)) + d.setVar('RPROVIDES_' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln)) + d.setVar('SUMMARY_' + pkg, '%s - %s translations' % (summary, l)) + d.setVar('DESCRIPTION_' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l)) + if locale_section: + d.setVar('SECTION_' + pkg, locale_section) + + d.setVar('PACKAGES', ' '.join(packages)) + + # Disabled by RP 18/06/07 + # Wildcards aren't supported in debian + # They break with ipkg since glibc-locale* will mean that + # glibc-localedata-translit* won't install as a dependency + # for some other package which breaks meta-toolchain + # Probably breaks since virtual-locale- isn't provided anywhere + #rdep = (d.getVar('RDEPENDS_%s' % pn, True) or "").split() + #rdep.append('%s-locale*' % pn) + #d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep)) +} + +python perform_packagecopy () { + dest = d.getVar('D', True) + dvar = d.getVar('PKGD', True) + + # Start by package population by taking a copy of the installed + # files to operate on + # Preserve sparse files and hard links + cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (dest, dvar) + (retval, output) = oe.utils.getstatusoutput(cmd) + if retval: + bb.fatal("file copy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")) + + # replace RPATHs for the nativesdk binaries, to make them relocatable + if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d): + rpath_replace (dvar, d) +} +perform_packagecopy[cleandirs] = "${PKGD}" +perform_packagecopy[dirs] = "${PKGD}" + +# We generate a master list of directories to process, we start by +# seeding this list with reasonable defaults, then load from +# the fs-perms.txt files +python fixup_perms () { + import pwd, grp + + # init using a string with the same format as a line as documented in + # the fs-perms.txt file + # + # link + # + # __str__ can be used to print out an entry in the input format + # + # if fs_perms_entry.path is None: + # an error occured + # if fs_perms_entry.link, you can retrieve: + # fs_perms_entry.path = path + # fs_perms_entry.link = target of link + # if not fs_perms_entry.link, you can retrieve: + # fs_perms_entry.path = path + # fs_perms_entry.mode = expected dir mode or None + # fs_perms_entry.uid = expected uid or -1 + # fs_perms_entry.gid = expected gid or -1 + # fs_perms_entry.walk = 'true' or something else + # fs_perms_entry.fmode = expected file mode or None + # fs_perms_entry.fuid = expected file uid or -1 + # fs_perms_entry_fgid = expected file gid or -1 + class fs_perms_entry(): + def __init__(self, line): + lsplit = line.split() + if len(lsplit) == 3 and lsplit[1].lower() == "link": + self._setlink(lsplit[0], lsplit[2]) + elif len(lsplit) == 8: + self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7]) + else: + msg = "Fixup Perms: invalid config line %s" % line + package_qa_handle_error("perm-config", msg, d) + self.path = None + self.link = None + + def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid): + self.path = os.path.normpath(path) + self.link = None + self.mode = self._procmode(mode) + self.uid = self._procuid(uid) + self.gid = self._procgid(gid) + self.walk = walk.lower() + self.fmode = self._procmode(fmode) + self.fuid = self._procuid(fuid) + self.fgid = self._procgid(fgid) + + def _setlink(self, path, link): + self.path = os.path.normpath(path) + self.link = link + + def _procmode(self, mode): + if not mode or (mode and mode == "-"): + return None + else: + return int(mode,8) + + # Note uid/gid -1 has special significance in os.lchown + def _procuid(self, uid): + if uid is None or uid == "-": + return -1 + elif uid.isdigit(): + return int(uid) + else: + return pwd.getpwnam(uid).pw_uid + + def _procgid(self, gid): + if gid is None or gid == "-": + return -1 + elif gid.isdigit(): + return int(gid) + else: + return grp.getgrnam(gid).gr_gid + + # Use for debugging the entries + def __str__(self): + if self.link: + return "%s link %s" % (self.path, self.link) + else: + mode = "-" + if self.mode: + mode = "0%o" % self.mode + fmode = "-" + if self.fmode: + fmode = "0%o" % self.fmode + uid = self._mapugid(self.uid) + gid = self._mapugid(self.gid) + fuid = self._mapugid(self.fuid) + fgid = self._mapugid(self.fgid) + return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid) + + def _mapugid(self, id): + if id is None or id == -1: + return "-" + else: + return "%d" % id + + # Fix the permission, owner and group of path + def fix_perms(path, mode, uid, gid, dir): + if mode and not os.path.islink(path): + #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir)) + os.chmod(path, mode) + # -1 is a special value that means don't change the uid/gid + # if they are BOTH -1, don't bother to lchown + if not (uid == -1 and gid == -1): + #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir)) + os.lchown(path, uid, gid) + + # Return a list of configuration files based on either the default + # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES + # paths are resolved via BBPATH + def get_fs_perms_list(d): + str = "" + bbpath = d.getVar('BBPATH', True) + fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES', True) + if not fs_perms_tables: + fs_perms_tables = 'files/fs-perms.txt' + for conf_file in fs_perms_tables.split(): + str += " %s" % bb.utils.which(bbpath, conf_file) + return str + + + + dvar = d.getVar('PKGD', True) + + fs_perms_table = {} + + # By default all of the standard directories specified in + # bitbake.conf will get 0755 root:root. + target_path_vars = [ 'base_prefix', + 'prefix', + 'exec_prefix', + 'base_bindir', + 'base_sbindir', + 'base_libdir', + 'datadir', + 'sysconfdir', + 'servicedir', + 'sharedstatedir', + 'localstatedir', + 'infodir', + 'mandir', + 'docdir', + 'bindir', + 'sbindir', + 'libexecdir', + 'libdir', + 'includedir', + 'oldincludedir' ] + + for path in target_path_vars: + dir = d.getVar(path, True) or "" + if dir == "": + continue + fs_perms_table[dir] = fs_perms_entry(bb.data.expand("%s 0755 root root false - - -" % (dir), d)) + + # Now we actually load from the configuration files + for conf in get_fs_perms_list(d).split(): + if os.path.exists(conf): + f = open(conf) + for line in f: + if line.startswith('#'): + continue + lsplit = line.split() + if len(lsplit) == 0: + continue + if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"): + msg = "Fixup perms: %s invalid line: %s" % (conf, line) + package_qa_handle_error("perm-line", msg, d) + continue + entry = fs_perms_entry(d.expand(line)) + if entry and entry.path: + fs_perms_table[entry.path] = entry + f.close() + + # Debug -- list out in-memory table + #for dir in fs_perms_table: + # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir]))) + + # We process links first, so we can go back and fixup directory ownership + # for any newly created directories + for dir in fs_perms_table: + if not fs_perms_table[dir].link: + continue + + origin = dvar + dir + if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)): + continue + + link = fs_perms_table[dir].link + if link[0] == "/": + target = dvar + link + ptarget = link + else: + target = os.path.join(os.path.dirname(origin), link) + ptarget = os.path.join(os.path.dirname(dir), link) + if os.path.exists(target): + msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget) + package_qa_handle_error("perm-link", msg, d) + continue + + # Create path to move directory to, move it, and then setup the symlink + bb.utils.mkdirhier(os.path.dirname(target)) + #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget)) + os.rename(origin, target) + #bb.note("Fixup Perms: Link %s -> %s" % (dir, link)) + os.symlink(link, origin) + + for dir in fs_perms_table: + if fs_perms_table[dir].link: + continue + + origin = dvar + dir + if not (cpath.exists(origin) and cpath.isdir(origin)): + continue + + fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir) + + if fs_perms_table[dir].walk == 'true': + for root, dirs, files in os.walk(origin): + for dr in dirs: + each_dir = os.path.join(root, dr) + fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir) + for f in files: + each_file = os.path.join(root, f) + fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir) +} + +python split_and_strip_files () { + import stat, errno + + dvar = d.getVar('PKGD', True) + pn = d.getVar('PN', True) + + # We default to '.debug' style + if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory': + # Single debug-file-directory style debug info + debugappend = ".debug" + debugdir = "" + debuglibdir = "/usr/lib/debug" + debugsrcdir = "/usr/src/debug" + elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-without-src': + # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug + debugappend = "" + debugdir = "/.debug" + debuglibdir = "" + debugsrcdir = "" + else: + # Original OE-core, a.k.a. ".debug", style debug info + debugappend = "" + debugdir = "/.debug" + debuglibdir = "" + debugsrcdir = "/usr/src/debug" + + sourcefile = d.expand("${WORKDIR}/debugsources.list") + bb.utils.remove(sourcefile) + + os.chdir(dvar) + + # Return type (bits): + # 0 - not elf + # 1 - ELF + # 2 - stripped + # 4 - executable + # 8 - shared library + # 16 - kernel module + def isELF(path): + type = 0 + ret, result = oe.utils.getstatusoutput("file \"%s\"" % path.replace("\"", "\\\"")) + + if ret: + msg = "split_and_strip_files: 'file %s' failed" % path + package_qa_handle_error("split-strip", msg, d) + return type + + # Not stripped + if "ELF" in result: + type |= 1 + if "not stripped" not in result: + type |= 2 + if "executable" in result: + type |= 4 + if "shared" in result: + type |= 8 + return type + + + # + # First lets figure out all of the files we may have to process ... do this only once! + # + elffiles = {} + symlinks = {} + kernmods = [] + inodes = {} + libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir", True)) + baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir", True)) + if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'): + for root, dirs, files in cpath.walk(dvar): + for f in files: + file = os.path.join(root, f) + if file.endswith(".ko") and file.find("/lib/modules/") != -1: + kernmods.append(file) + continue + + # Skip debug files + if debugappend and file.endswith(debugappend): + continue + if debugdir and debugdir in os.path.dirname(file[len(dvar):]): + continue + + try: + ltarget = cpath.realpath(file, dvar, False) + s = cpath.lstat(ltarget) + except OSError as e: + (err, strerror) = e.args + if err != errno.ENOENT: + raise + # Skip broken symlinks + continue + if not s: + continue + # Check its an excutable + if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \ + or ((file.startswith(libdir) or file.startswith(baselibdir)) and ".so" in f): + # If it's a symlink, and points to an ELF file, we capture the readlink target + if cpath.islink(file): + target = os.readlink(file) + if isELF(ltarget): + #bb.note("Sym: %s (%d)" % (ltarget, isELF(ltarget))) + symlinks[file] = target + continue + + # It's a file (or hardlink), not a link + # ...but is it ELF, and is it already stripped? + elf_file = isELF(file) + if elf_file & 1: + if elf_file & 2: + if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn, True) or "").split(): + bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn)) + else: + msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn) + package_qa_handle_error("already-stripped", msg, d) + continue + + # At this point we have an unstripped elf file. We need to: + # a) Make sure any file we strip is not hardlinked to anything else outside this tree + # b) Only strip any hardlinked file once (no races) + # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks + + # Use a reference of device ID and inode number to indentify files + file_reference = "%d_%d" % (s.st_dev, s.st_ino) + if file_reference in inodes: + os.unlink(file) + os.link(inodes[file_reference][0], file) + inodes[file_reference].append(file) + else: + inodes[file_reference] = [file] + # break hardlink + bb.utils.copyfile(file, file) + elffiles[file] = elf_file + # Modified the file so clear the cache + cpath.updatecache(file) + + # + # First lets process debug splitting + # + if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1'): + for file in elffiles: + src = file[len(dvar):] + dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend + fpath = dvar + dest + + # Split the file... + bb.utils.mkdirhier(os.path.dirname(fpath)) + #bb.note("Split %s -> %s" % (file, fpath)) + # Only store off the hard link reference if we successfully split! + splitdebuginfo(file, fpath, debugsrcdir, sourcefile, d) + + # Hardlink our debug symbols to the other hardlink copies + for ref in inodes: + if len(inodes[ref]) == 1: + continue + for file in inodes[ref][1:]: + src = file[len(dvar):] + dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend + fpath = dvar + dest + target = inodes[ref][0][len(dvar):] + ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend + bb.utils.mkdirhier(os.path.dirname(fpath)) + #bb.note("Link %s -> %s" % (fpath, ftarget)) + os.link(ftarget, fpath) + + # Create symlinks for all cases we were able to split symbols + for file in symlinks: + src = file[len(dvar):] + dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend + fpath = dvar + dest + # Skip it if the target doesn't exist + try: + s = os.stat(fpath) + except OSError as e: + (err, strerror) = e.args + if err != errno.ENOENT: + raise + continue + + ltarget = symlinks[file] + lpath = os.path.dirname(ltarget) + lbase = os.path.basename(ltarget) + ftarget = "" + if lpath and lpath != ".": + ftarget += lpath + debugdir + "/" + ftarget += lbase + debugappend + if lpath.startswith(".."): + ftarget = os.path.join("..", ftarget) + bb.utils.mkdirhier(os.path.dirname(fpath)) + #bb.note("Symlink %s -> %s" % (fpath, ftarget)) + os.symlink(ftarget, fpath) + + # Process the debugsrcdir if requested... + # This copies and places the referenced sources for later debugging... + copydebugsources(debugsrcdir, d) + # + # End of debug splitting + # + + # + # Now lets go back over things and strip them + # + if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'): + strip = d.getVar("STRIP", True) + sfiles = [] + for file in elffiles: + elf_file = int(elffiles[file]) + #bb.note("Strip %s" % file) + sfiles.append((file, elf_file, strip)) + for f in kernmods: + sfiles.append((f, 16, strip)) + + oe.utils.multiprocess_exec(sfiles, oe.package.runstrip) + + # + # End of strip + # +} + +python populate_packages () { + import glob, re + + workdir = d.getVar('WORKDIR', True) + outdir = d.getVar('DEPLOY_DIR', True) + dvar = d.getVar('PKGD', True) + packages = d.getVar('PACKAGES', True) + pn = d.getVar('PN', True) + + bb.utils.mkdirhier(outdir) + os.chdir(dvar) + + # Sanity check PACKAGES for duplicates and for LICENSE_EXCLUSION + # Sanity should be moved to sanity.bbclass once we have the infrastucture + package_list = [] + + for pkg in packages.split(): + if d.getVar('LICENSE_EXCLUSION-' + pkg, True): + msg = "%s has an incompatible license. Excluding from packaging." % pkg + package_qa_handle_error("incompatible-license", msg, d) + if pkg in package_list: + msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg + package_qa_handle_error("packages-list", msg, d) + else: + package_list.append(pkg) + d.setVar('PACKAGES', ' '.join(package_list)) + pkgdest = d.getVar('PKGDEST', True) + + seen = [] + + # os.mkdir masks the permissions with umask so we have to unset it first + oldumask = os.umask(0) + + for pkg in package_list: + root = os.path.join(pkgdest, pkg) + bb.utils.mkdirhier(root) + + filesvar = d.getVar('FILES_%s' % pkg, True) or "" + if "//" in filesvar: + msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg + package_qa_handle_error("files-invalid", msg, d) + filesvar.replace("//", "/") + + origfiles = filesvar.split() + files = [] + for file in origfiles: + if os.path.isabs(file): + file = '.' + file + if not file.startswith("./"): + file = './' + file + globbed = glob.glob(file) + if globbed: + if [ file ] != globbed: + files += globbed + continue + files.append(file) + + for file in files: + if not cpath.islink(file): + if cpath.isdir(file): + newfiles = [ os.path.join(file,x) for x in os.listdir(file) ] + if newfiles: + files += newfiles + continue + if (not cpath.islink(file)) and (not cpath.exists(file)): + continue + if file in seen: + continue + seen.append(file) + + if d.getVar('LICENSE_EXCLUSION-' + pkg, True): + continue + + def mkdir(src, dest, p): + src = os.path.join(src, p) + dest = os.path.join(dest, p) + fstat = cpath.stat(src) + os.mkdir(dest, fstat.st_mode) + os.chown(dest, fstat.st_uid, fstat.st_gid) + if p not in seen: + seen.append(p) + cpath.updatecache(dest) + + def mkdir_recurse(src, dest, paths): + if cpath.exists(dest + '/' + paths): + return + while paths.startswith("./"): + paths = paths[2:] + p = "." + for c in paths.split("/"): + p = os.path.join(p, c) + if not cpath.exists(os.path.join(dest, p)): + mkdir(src, dest, p) + + if cpath.isdir(file) and not cpath.islink(file): + mkdir_recurse(dvar, root, file) + continue + + mkdir_recurse(dvar, root, os.path.dirname(file)) + fpath = os.path.join(root,file) + if not cpath.islink(file): + os.link(file, fpath) + fstat = cpath.stat(file) + os.chmod(fpath, fstat.st_mode) + os.chown(fpath, fstat.st_uid, fstat.st_gid) + continue + ret = bb.utils.copyfile(file, fpath) + if ret is False or ret == 0: + raise bb.build.FuncFailed("File population failed") + + os.umask(oldumask) + os.chdir(workdir) + + unshipped = [] + for root, dirs, files in cpath.walk(dvar): + dir = root[len(dvar):] + if not dir: + dir = os.sep + for f in (files + dirs): + path = os.path.join(dir, f) + if ('.' + path) not in seen: + unshipped.append(path) + + if unshipped != []: + msg = pn + ": Files/directories were installed but not shipped" + if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn, True) or "").split(): + bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn) + else: + for f in unshipped: + msg = msg + "\n " + f + package_qa_handle_error("installed-vs-shipped", msg, d) +} +populate_packages[dirs] = "${D}" + +python package_fixsymlinks () { + import errno + pkgdest = d.getVar('PKGDEST', True) + packages = d.getVar("PACKAGES").split() + + dangling_links = {} + pkg_files = {} + for pkg in packages: + dangling_links[pkg] = [] + pkg_files[pkg] = [] + inst_root = os.path.join(pkgdest, pkg) + for path in pkgfiles[pkg]: + rpath = path[len(inst_root):] + pkg_files[pkg].append(rpath) + rtarget = cpath.realpath(path, inst_root, True, assume_dir = True) + if not cpath.lexists(rtarget): + dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):])) + + newrdepends = {} + for pkg in dangling_links: + for l in dangling_links[pkg]: + found = False + bb.debug(1, "%s contains dangling link %s" % (pkg, l)) + for p in packages: + if l in pkg_files[p]: + found = True + bb.debug(1, "target found in %s" % p) + if p == pkg: + break + if pkg not in newrdepends: + newrdepends[pkg] = [] + newrdepends[pkg].append(p) + break + if found == False: + bb.note("%s contains dangling symlink to %s" % (pkg, l)) + + for pkg in newrdepends: + rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "") + for p in newrdepends[pkg]: + if p not in rdepends: + rdepends[p] = [] + d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False)) +} + + +python package_package_name_hook() { + """ + A package_name_hook function can be used to rewrite the package names by + changing PKG. For an example, see debian.bbclass. + """ + pass +} + +EXPORT_FUNCTIONS package_name_hook + + +PKGDESTWORK = "${WORKDIR}/pkgdata" + +python emit_pkgdata() { + from glob import glob + import json + + def write_if_exists(f, pkg, var): + def encode(str): + import codecs + c = codecs.getencoder("string_escape") + return c(str)[0] + + val = d.getVar('%s_%s' % (var, pkg), True) + if val: + f.write('%s_%s: %s\n' % (var, pkg, encode(val))) + return val + val = d.getVar('%s' % (var), True) + if val: + f.write('%s: %s\n' % (var, encode(val))) + return val + + def write_extra_pkgs(variants, pn, packages, pkgdatadir): + for variant in variants: + with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd: + fd.write("PACKAGES: %s\n" % ' '.join( + map(lambda pkg: '%s-%s' % (variant, pkg), packages.split()))) + + def write_extra_runtime_pkgs(variants, packages, pkgdatadir): + for variant in variants: + for pkg in packages.split(): + ml_pkg = "%s-%s" % (variant, pkg) + subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg) + with open(subdata_file, 'w') as fd: + fd.write("PKG_%s: %s" % (ml_pkg, pkg)) + + packages = d.getVar('PACKAGES', True) + pkgdest = d.getVar('PKGDEST', True) + pkgdatadir = d.getVar('PKGDESTWORK', True) + + # Take shared lock since we're only reading, not writing + lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True) + + data_file = pkgdatadir + d.expand("/${PN}" ) + f = open(data_file, 'w') + f.write("PACKAGES: %s\n" % packages) + f.close() + + pn = d.getVar('PN', True) + global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS', True) or "").split() + variants = (d.getVar('MULTILIB_VARIANTS', True) or "").split() + + if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d): + write_extra_pkgs(variants, pn, packages, pkgdatadir) + + if (bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d)): + write_extra_pkgs(global_variants, pn, packages, pkgdatadir) + + workdir = d.getVar('WORKDIR', True) + + for pkg in packages.split(): + pkgval = d.getVar('PKG_%s' % pkg, True) + if pkgval is None: + pkgval = pkg + d.setVar('PKG_%s' % pkg, pkg) + + pkgdestpkg = os.path.join(pkgdest, pkg) + files = {} + total_size = 0 + for f in pkgfiles[pkg]: + relpth = os.path.relpath(f, pkgdestpkg) + fstat = os.lstat(f) + total_size += fstat.st_size + files[os.sep + relpth] = fstat.st_size + d.setVar('FILES_INFO', json.dumps(files)) + + subdata_file = pkgdatadir + "/runtime/%s" % pkg + sf = open(subdata_file, 'w') + write_if_exists(sf, pkg, 'PN') + write_if_exists(sf, pkg, 'PE') + write_if_exists(sf, pkg, 'PV') + write_if_exists(sf, pkg, 'PR') + write_if_exists(sf, pkg, 'PKGE') + write_if_exists(sf, pkg, 'PKGV') + write_if_exists(sf, pkg, 'PKGR') + write_if_exists(sf, pkg, 'LICENSE') + write_if_exists(sf, pkg, 'DESCRIPTION') + write_if_exists(sf, pkg, 'SUMMARY') + write_if_exists(sf, pkg, 'RDEPENDS') + rprov = write_if_exists(sf, pkg, 'RPROVIDES') + write_if_exists(sf, pkg, 'RRECOMMENDS') + write_if_exists(sf, pkg, 'RSUGGESTS') + write_if_exists(sf, pkg, 'RREPLACES') + write_if_exists(sf, pkg, 'RCONFLICTS') + write_if_exists(sf, pkg, 'SECTION') + write_if_exists(sf, pkg, 'PKG') + write_if_exists(sf, pkg, 'ALLOW_EMPTY') + write_if_exists(sf, pkg, 'FILES') + write_if_exists(sf, pkg, 'pkg_postinst') + write_if_exists(sf, pkg, 'pkg_postrm') + write_if_exists(sf, pkg, 'pkg_preinst') + write_if_exists(sf, pkg, 'pkg_prerm') + write_if_exists(sf, pkg, 'FILERPROVIDESFLIST') + write_if_exists(sf, pkg, 'FILES_INFO') + for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg, True) or "").split(): + write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile) + + write_if_exists(sf, pkg, 'FILERDEPENDSFLIST') + for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg, True) or "").split(): + write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile) + + sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size)) + sf.close() + + # Symlinks needed for rprovides lookup + if rprov: + for p in rprov.strip().split(): + subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg) + bb.utils.mkdirhier(os.path.dirname(subdata_sym)) + oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True) + + allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg, True) + if not allow_empty: + allow_empty = d.getVar('ALLOW_EMPTY', True) + root = "%s/%s" % (pkgdest, pkg) + os.chdir(root) + g = glob('*') + if g or allow_empty == "1": + # Symlinks needed for reverse lookups (from the final package name) + subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval + oe.path.symlink("../runtime/%s" % pkg, subdata_sym, True) + + packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg + open(packagedfile, 'w').close() + + if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d): + write_extra_runtime_pkgs(variants, packages, pkgdatadir) + + if bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d): + write_extra_runtime_pkgs(global_variants, packages, pkgdatadir) + + bb.utils.unlockfile(lf) +} +emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides" + +ldconfig_postinst_fragment() { +if [ x"$D" = "x" ]; then + if [ -x /sbin/ldconfig ]; then /sbin/ldconfig ; fi +fi +} + +RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/bin/rpmdeps-oecore --macros ${STAGING_LIBDIR_NATIVE}/rpm/macros --define '_rpmfc_magic_path ${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc' --rpmpopt ${STAGING_LIBDIR_NATIVE}/rpm/rpmpopt" + +# Collect perfile run-time dependency metadata +# Output: +# FILERPROVIDESFLIST_pkg - list of all files w/ deps +# FILERPROVIDES_filepath_pkg - per file dep +# +# FILERDEPENDSFLIST_pkg - list of all files w/ deps +# FILERDEPENDS_filepath_pkg - per file dep + +python package_do_filedeps() { + if d.getVar('SKIP_FILEDEPS', True) == '1': + return + + pkgdest = d.getVar('PKGDEST', True) + packages = d.getVar('PACKAGES', True) + rpmdeps = d.getVar('RPMDEPS', True) + + def chunks(files, n): + return [files[i:i+n] for i in range(0, len(files), n)] + + pkglist = [] + for pkg in packages.split(): + if d.getVar('SKIP_FILEDEPS_' + pkg, True) == '1': + continue + if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-'): + continue + for files in chunks(pkgfiles[pkg], 100): + pkglist.append((pkg, files, rpmdeps, pkgdest)) + + processed = oe.utils.multiprocess_exec( pkglist, oe.package.filedeprunner) + + provides_files = {} + requires_files = {} + + for result in processed: + (pkg, provides, requires) = result + + if pkg not in provides_files: + provides_files[pkg] = [] + if pkg not in requires_files: + requires_files[pkg] = [] + + for file in provides: + provides_files[pkg].append(file) + key = "FILERPROVIDES_" + file + "_" + pkg + d.setVar(key, " ".join(provides[file])) + + for file in requires: + requires_files[pkg].append(file) + key = "FILERDEPENDS_" + file + "_" + pkg + d.setVar(key, " ".join(requires[file])) + + for pkg in requires_files: + d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files[pkg])) + for pkg in provides_files: + d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg])) +} + +SHLIBSDIRS = "${PKGDATA_DIR}/${MLPREFIX}shlibs2" +SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2" + +python package_do_shlibs() { + import re, pipes + import subprocess as sub + + exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', 0) + if exclude_shlibs: + bb.note("not generating shlibs") + return + + lib_re = re.compile("^.*\.so") + libdir_re = re.compile(".*/%s$" % d.getVar('baselib', True)) + + packages = d.getVar('PACKAGES', True) + targetos = d.getVar('TARGET_OS', True) + + workdir = d.getVar('WORKDIR', True) + + ver = d.getVar('PKGV', True) + if not ver: + msg = "PKGV not defined" + package_qa_handle_error("pkgv-undefined", msg, d) + return + + pkgdest = d.getVar('PKGDEST', True) + + shlibs_dirs = d.getVar('SHLIBSDIRS', True).split() + shlibswork_dir = d.getVar('SHLIBSWORKDIR', True) + + # Take shared lock since we're only reading, not writing + lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}")) + + def read_shlib_providers(): + list_re = re.compile('^(.*)\.list$') + # Go from least to most specific since the last one found wins + for dir in reversed(shlibs_dirs): + bb.debug(2, "Reading shlib providers in %s" % (dir)) + if not os.path.exists(dir): + continue + for file in os.listdir(dir): + m = list_re.match(file) + if m: + dep_pkg = m.group(1) + fd = open(os.path.join(dir, file)) + lines = fd.readlines() + fd.close() + for l in lines: + s = l.strip().split(":") + if s[0] not in shlib_provider: + shlib_provider[s[0]] = {} + shlib_provider[s[0]][s[1]] = (dep_pkg, s[2]) + + def linux_so(file, needed, sonames, renames, pkgver): + needs_ldconfig = False + ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '') + cmd = d.getVar('OBJDUMP', True) + " -p " + pipes.quote(file) + " 2>/dev/null" + fd = os.popen(cmd) + lines = fd.readlines() + fd.close() + rpath = [] + for l in lines: + m = re.match("\s+RPATH\s+([^\s]*)", l) + if m: + rpaths = m.group(1).replace("$ORIGIN", ldir).split(":") + rpath = map(os.path.normpath, rpaths) + for l in lines: + m = re.match("\s+NEEDED\s+([^\s]*)", l) + if m: + dep = m.group(1) + if dep not in needed[pkg]: + needed[pkg].append((dep, file, rpath)) + m = re.match("\s+SONAME\s+([^\s]*)", l) + if m: + this_soname = m.group(1) + prov = (this_soname, ldir, pkgver) + if not prov in sonames: + # if library is private (only used by package) then do not build shlib for it + if not private_libs or this_soname not in private_libs: + sonames.append(prov) + if libdir_re.match(os.path.dirname(file)): + needs_ldconfig = True + if snap_symlinks and (os.path.basename(file) != this_soname): + renames.append((file, os.path.join(os.path.dirname(file), this_soname))) + return needs_ldconfig + + def darwin_so(file, needed, sonames, renames, pkgver): + if not os.path.exists(file): + return + ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '') + + def get_combinations(base): + # + # Given a base library name, find all combinations of this split by "." and "-" + # + combos = [] + options = base.split(".") + for i in range(1, len(options) + 1): + combos.append(".".join(options[0:i])) + options = base.split("-") + for i in range(1, len(options) + 1): + combos.append("-".join(options[0:i])) + return combos + + if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg'): + # Drop suffix + name = os.path.basename(file).rsplit(".",1)[0] + # Find all combinations + combos = get_combinations(name) + for combo in combos: + if not combo in sonames: + prov = (combo, ldir, pkgver) + sonames.append(prov) + if file.endswith('.dylib') or file.endswith('.so'): + rpath = [] + p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file],stdout=sub.PIPE,stderr=sub.PIPE) + err, out = p.communicate() + # If returned succesfully, process stderr for results + if p.returncode == 0: + for l in err.split("\n"): + l = l.strip() + if l.startswith('path '): + rpath.append(l.split()[1]) + + p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file],stdout=sub.PIPE,stderr=sub.PIPE) + err, out = p.communicate() + # If returned succesfully, process stderr for results + if p.returncode == 0: + for l in err.split("\n"): + l = l.strip() + if not l or l.endswith(":"): + continue + if "is not an object file" in l: + continue + name = os.path.basename(l.split()[0]).rsplit(".", 1)[0] + if name and name not in needed[pkg]: + needed[pkg].append((name, file, [])) + + if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS', True) == "1": + snap_symlinks = True + else: + snap_symlinks = False + + if (d.getVar('USE_LDCONFIG', True) or "1") == "1": + use_ldconfig = True + else: + use_ldconfig = False + + needed = {} + shlib_provider = {} + read_shlib_providers() + + for pkg in packages.split(): + private_libs = d.getVar('PRIVATE_LIBS_' + pkg, True) or d.getVar('PRIVATE_LIBS', True) or "" + private_libs = private_libs.split() + needs_ldconfig = False + bb.debug(2, "calculating shlib provides for %s" % pkg) + + pkgver = d.getVar('PKGV_' + pkg, True) + if not pkgver: + pkgver = d.getVar('PV_' + pkg, True) + if not pkgver: + pkgver = ver + + needed[pkg] = [] + sonames = list() + renames = list() + for file in pkgfiles[pkg]: + soname = None + if cpath.islink(file): + continue + if targetos == "darwin" or targetos == "darwin8": + darwin_so(file, needed, sonames, renames, pkgver) + elif os.access(file, os.X_OK) or lib_re.match(file): + ldconfig = linux_so(file, needed, sonames, renames, pkgver) + needs_ldconfig = needs_ldconfig or ldconfig + for (old, new) in renames: + bb.note("Renaming %s to %s" % (old, new)) + os.rename(old, new) + pkgfiles[pkg].remove(old) + + shlibs_file = os.path.join(shlibswork_dir, pkg + ".list") + if len(sonames): + fd = open(shlibs_file, 'w') + for s in sonames: + if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]: + (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]] + if old_pkg != pkg: + bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver)) + bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0])) + fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n') + if s[0] not in shlib_provider: + shlib_provider[s[0]] = {} + shlib_provider[s[0]][s[1]] = (pkg, pkgver) + fd.close() + if needs_ldconfig and use_ldconfig: + bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg) + postinst = d.getVar('pkg_postinst_%s' % pkg, True) + if not postinst: + postinst = '#!/bin/sh\n' + postinst += d.getVar('ldconfig_postinst_fragment', True) + d.setVar('pkg_postinst_%s' % pkg, postinst) + bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames)) + + bb.utils.unlockfile(lf) + + assumed_libs = d.getVar('ASSUME_SHLIBS', True) + if assumed_libs: + libdir = d.getVar("libdir", True) + for e in assumed_libs.split(): + l, dep_pkg = e.split(":") + lib_ver = None + dep_pkg = dep_pkg.rsplit("_", 1) + if len(dep_pkg) == 2: + lib_ver = dep_pkg[1] + dep_pkg = dep_pkg[0] + if l not in shlib_provider: + shlib_provider[l] = {} + shlib_provider[l][libdir] = (dep_pkg, lib_ver) + + libsearchpath = [d.getVar('libdir', True), d.getVar('base_libdir', True)] + + for pkg in packages.split(): + bb.debug(2, "calculating shlib requirements for %s" % pkg) + + deps = list() + for n in needed[pkg]: + # if n is in private libraries, don't try to search provider for it + # this could cause problem in case some abc.bb provides private + # /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1 + # but skipping it is still better alternative than providing own + # version and then adding runtime dependency for the same system library + if private_libs and n[0] in private_libs: + bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0])) + continue + if n[0] in shlib_provider.keys(): + shlib_provider_path = list() + for k in shlib_provider[n[0]].keys(): + shlib_provider_path.append(k) + match = None + for p in n[2] + shlib_provider_path + libsearchpath: + if p in shlib_provider[n[0]]: + match = p + break + if match: + (dep_pkg, ver_needed) = shlib_provider[n[0]][match] + + bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1])) + + if dep_pkg == pkg: + continue + + if ver_needed: + dep = "%s (>= %s)" % (dep_pkg, ver_needed) + else: + dep = dep_pkg + if not dep in deps: + deps.append(dep) + continue + bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n[0], n[1])) + + deps_file = os.path.join(pkgdest, pkg + ".shlibdeps") + if os.path.exists(deps_file): + os.remove(deps_file) + if len(deps): + fd = open(deps_file, 'w') + for dep in deps: + fd.write(dep + '\n') + fd.close() +} + +python package_do_pkgconfig () { + import re + + packages = d.getVar('PACKAGES', True) + workdir = d.getVar('WORKDIR', True) + pkgdest = d.getVar('PKGDEST', True) + + shlibs_dirs = d.getVar('SHLIBSDIRS', True).split() + shlibswork_dir = d.getVar('SHLIBSWORKDIR', True) + + pc_re = re.compile('(.*)\.pc$') + var_re = re.compile('(.*)=(.*)') + field_re = re.compile('(.*): (.*)') + + pkgconfig_provided = {} + pkgconfig_needed = {} + for pkg in packages.split(): + pkgconfig_provided[pkg] = [] + pkgconfig_needed[pkg] = [] + for file in pkgfiles[pkg]: + m = pc_re.match(file) + if m: + pd = bb.data.init() + name = m.group(1) + pkgconfig_provided[pkg].append(name) + if not os.access(file, os.R_OK): + continue + f = open(file, 'r') + lines = f.readlines() + f.close() + for l in lines: + m = var_re.match(l) + if m: + name = m.group(1) + val = m.group(2) + pd.setVar(name, pd.expand(val)) + continue + m = field_re.match(l) + if m: + hdr = m.group(1) + exp = bb.data.expand(m.group(2), pd) + if hdr == 'Requires': + pkgconfig_needed[pkg] += exp.replace(',', ' ').split() + + # Take shared lock since we're only reading, not writing + lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}")) + + for pkg in packages.split(): + pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist") + if pkgconfig_provided[pkg] != []: + f = open(pkgs_file, 'w') + for p in pkgconfig_provided[pkg]: + f.write('%s\n' % p) + f.close() + + # Go from least to most specific since the last one found wins + for dir in reversed(shlibs_dirs): + if not os.path.exists(dir): + continue + for file in os.listdir(dir): + m = re.match('^(.*)\.pclist$', file) + if m: + pkg = m.group(1) + fd = open(os.path.join(dir, file)) + lines = fd.readlines() + fd.close() + pkgconfig_provided[pkg] = [] + for l in lines: + pkgconfig_provided[pkg].append(l.rstrip()) + + for pkg in packages.split(): + deps = [] + for n in pkgconfig_needed[pkg]: + found = False + for k in pkgconfig_provided.keys(): + if n in pkgconfig_provided[k]: + if k != pkg and not (k in deps): + deps.append(k) + found = True + if found == False: + bb.note("couldn't find pkgconfig module '%s' in any package" % n) + deps_file = os.path.join(pkgdest, pkg + ".pcdeps") + if len(deps): + fd = open(deps_file, 'w') + for dep in deps: + fd.write(dep + '\n') + fd.close() + + bb.utils.unlockfile(lf) +} + +def read_libdep_files(d): + pkglibdeps = {} + packages = d.getVar('PACKAGES', True).split() + for pkg in packages: + pkglibdeps[pkg] = {} + for extension in ".shlibdeps", ".pcdeps", ".clilibdeps": + depsfile = d.expand("${PKGDEST}/" + pkg + extension) + if os.access(depsfile, os.R_OK): + fd = open(depsfile) + lines = fd.readlines() + fd.close() + for l in lines: + l.rstrip() + deps = bb.utils.explode_dep_versions2(l) + for dep in deps: + if not dep in pkglibdeps[pkg]: + pkglibdeps[pkg][dep] = deps[dep] + return pkglibdeps + +python read_shlibdeps () { + pkglibdeps = read_libdep_files(d) + + packages = d.getVar('PACKAGES', True).split() + for pkg in packages: + rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "") + for dep in pkglibdeps[pkg]: + # Add the dep if it's not already there, or if no comparison is set + if dep not in rdepends: + rdepends[dep] = [] + for v in pkglibdeps[pkg][dep]: + if v not in rdepends[dep]: + rdepends[dep].append(v) + d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False)) +} + +python package_depchains() { + """ + For a given set of prefix and postfix modifiers, make those packages + RRECOMMENDS on the corresponding packages for its RDEPENDS. + + Example: If package A depends upon package B, and A's .bb emits an + A-dev package, this would make A-dev Recommends: B-dev. + + If only one of a given suffix is specified, it will take the RRECOMMENDS + based on the RDEPENDS of *all* other packages. If more than one of a given + suffix is specified, its will only use the RDEPENDS of the single parent + package. + """ + + packages = d.getVar('PACKAGES', True) + postfixes = (d.getVar('DEPCHAIN_POST', True) or '').split() + prefixes = (d.getVar('DEPCHAIN_PRE', True) or '').split() + + def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d): + + #bb.note('depends for %s is %s' % (base, depends)) + rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "") + + for depend in depends: + if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'): + #bb.note("Skipping %s" % depend) + continue + if depend.endswith('-dev'): + depend = depend[:-4] + if depend.endswith('-dbg'): + depend = depend[:-4] + pkgname = getname(depend, suffix) + #bb.note("Adding %s for %s" % (pkgname, depend)) + if pkgname not in rreclist and pkgname != pkg: + rreclist[pkgname] = [] + + #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist))) + d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False)) + + def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d): + + #bb.note('rdepends for %s is %s' % (base, rdepends)) + rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "") + + for depend in rdepends: + if depend.find('virtual-locale-') != -1: + #bb.note("Skipping %s" % depend) + continue + if depend.endswith('-dev'): + depend = depend[:-4] + if depend.endswith('-dbg'): + depend = depend[:-4] + pkgname = getname(depend, suffix) + #bb.note("Adding %s for %s" % (pkgname, depend)) + if pkgname not in rreclist and pkgname != pkg: + rreclist[pkgname] = [] + + #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist))) + d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False)) + + def add_dep(list, dep): + if dep not in list: + list.append(dep) + + depends = [] + for dep in bb.utils.explode_deps(d.getVar('DEPENDS', True) or ""): + add_dep(depends, dep) + + rdepends = [] + for pkg in packages.split(): + for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg, True) or ""): + add_dep(rdepends, dep) + + #bb.note('rdepends is %s' % rdepends) + + def post_getname(name, suffix): + return '%s%s' % (name, suffix) + def pre_getname(name, suffix): + return '%s%s' % (suffix, name) + + pkgs = {} + for pkg in packages.split(): + for postfix in postfixes: + if pkg.endswith(postfix): + if not postfix in pkgs: + pkgs[postfix] = {} + pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname) + + for prefix in prefixes: + if pkg.startswith(prefix): + if not prefix in pkgs: + pkgs[prefix] = {} + pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname) + + if "-dbg" in pkgs: + pkglibdeps = read_libdep_files(d) + pkglibdeplist = [] + for pkg in pkglibdeps: + for k in pkglibdeps[pkg]: + add_dep(pkglibdeplist, k) + # FIXME this should not look at PN once all task recipes inherit from task.bbclass + dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS', True) == '1') or (d.getVar('PN', True) or '').startswith('packagegroup-')) + + for suffix in pkgs: + for pkg in pkgs[suffix]: + if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'): + continue + (base, func) = pkgs[suffix][pkg] + if suffix == "-dev": + pkg_adddeprrecs(pkg, base, suffix, func, depends, d) + elif suffix == "-dbg": + if not dbgdefaultdeps: + pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d) + continue + if len(pkgs[suffix]) == 1: + pkg_addrrecs(pkg, base, suffix, func, rdepends, d) + else: + rdeps = [] + for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base, True) or ""): + add_dep(rdeps, dep) + pkg_addrrecs(pkg, base, suffix, func, rdeps, d) +} + +# Since bitbake can't determine which variables are accessed during package +# iteration, we need to list them here: +PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES" + +def gen_packagevar(d): + ret = [] + pkgs = (d.getVar("PACKAGES", True) or "").split() + vars = (d.getVar("PACKAGEVARS", True) or "").split() + for p in pkgs: + for v in vars: + ret.append(v + "_" + p) + + # Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for + # affected recipes. + ret.append('LICENSE_EXCLUSION-%s' % p) + return " ".join(ret) + +PACKAGE_PREPROCESS_FUNCS ?= "" +# Functions for setting up PKGD +PACKAGEBUILDPKGD ?= " \ + perform_packagecopy \ + ${PACKAGE_PREPROCESS_FUNCS} \ + split_and_strip_files \ + fixup_perms \ + " +# Functions which split PKGD up into separate packages +PACKAGESPLITFUNCS ?= " \ + package_do_split_locales \ + populate_packages" +# Functions which process metadata based on split packages +PACKAGEFUNCS += " \ + package_fixsymlinks \ + package_name_hook \ + package_do_filedeps \ + package_do_shlibs \ + package_do_pkgconfig \ + read_shlibdeps \ + package_depchains \ + emit_pkgdata" + +python do_package () { + # Change the following version to cause sstate to invalidate the package + # cache. This is useful if an item this class depends on changes in a + # way that the output of this class changes. rpmdeps is a good example + # as any change to rpmdeps requires this to be rerun. + # PACKAGE_BBCLASS_VERSION = "1" + + # Init cachedpath + global cpath + cpath = oe.cachedpath.CachedPath() + + ########################################################################### + # Sanity test the setup + ########################################################################### + + packages = (d.getVar('PACKAGES', True) or "").split() + if len(packages) < 1: + bb.debug(1, "No packages to build, skipping do_package") + return + + workdir = d.getVar('WORKDIR', True) + outdir = d.getVar('DEPLOY_DIR', True) + dest = d.getVar('D', True) + dvar = d.getVar('PKGD', True) + pn = d.getVar('PN', True) + + if not workdir or not outdir or not dest or not dvar or not pn: + msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package" + package_qa_handle_error("var-undefined", msg, d) + return + + bb.build.exec_func("package_get_auto_pr", d) + + ########################################################################### + # Optimisations + ########################################################################### + + # Continually expanding complex expressions is inefficient, particularly + # when we write to the datastore and invalidate the expansion cache. This + # code pre-expands some frequently used variables + + def expandVar(x, d): + d.setVar(x, d.getVar(x, True)) + + for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO': + expandVar(x, d) + + ########################################################################### + # Setup PKGD (from D) + ########################################################################### + + for f in (d.getVar('PACKAGEBUILDPKGD', True) or '').split(): + bb.build.exec_func(f, d) + + ########################################################################### + # Split up PKGD into PKGDEST + ########################################################################### + + cpath = oe.cachedpath.CachedPath() + + for f in (d.getVar('PACKAGESPLITFUNCS', True) or '').split(): + bb.build.exec_func(f, d) + + ########################################################################### + # Process PKGDEST + ########################################################################### + + # Build global list of files in each split package + global pkgfiles + pkgfiles = {} + packages = d.getVar('PACKAGES', True).split() + pkgdest = d.getVar('PKGDEST', True) + for pkg in packages: + pkgfiles[pkg] = [] + for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg): + for file in files: + pkgfiles[pkg].append(walkroot + os.sep + file) + + for f in (d.getVar('PACKAGEFUNCS', True) or '').split(): + bb.build.exec_func(f, d) +} + +do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}" +do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}" +addtask package after do_install + +PACKAGELOCK = "${STAGING_DIR}/package-output.lock" +SSTATETASKS += "do_package" +do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}" +do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}" +do_package[sstate-lockfile-shared] = "${PACKAGELOCK}" +do_package_setscene[dirs] = "${STAGING_DIR}" + +python do_package_setscene () { + sstate_setscene(d) +} +addtask do_package_setscene + +do_packagedata () { + : +} + +addtask packagedata before do_build after do_package + +SSTATETASKS += "do_packagedata" +do_packagedata[sstate-inputdirs] = "${PKGDESTWORK}" +do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}" +do_packagedata[sstate-lockfile-shared] = "${PACKAGELOCK}" +do_packagedata[stamp-extra-info] = "${MACHINE}" + +python do_packagedata_setscene () { + sstate_setscene(d) +} +addtask do_packagedata_setscene + +# +# Helper functions for the package writing classes +# + +def mapping_rename_hook(d): + """ + Rewrite variables to account for package renaming in things + like debian.bbclass or manual PKG variable name changes + """ + pkg = d.getVar("PKG", True) + runtime_mapping_rename("RDEPENDS", pkg, d) + runtime_mapping_rename("RRECOMMENDS", pkg, d) + runtime_mapping_rename("RSUGGESTS", pkg, d) + diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass new file mode 100644 index 0000000000..5b5f7e2c9a --- /dev/null +++ b/meta/classes/package_deb.bbclass @@ -0,0 +1,330 @@ +# +# Copyright 2006-2008 OpenedHand Ltd. +# + +inherit package + +IMAGE_PKGTYPE ?= "deb" + +DPKG_ARCH ?= "${TARGET_ARCH}" + +PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs" + +APTCONF_TARGET = "${WORKDIR}" + +APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}" + +# +# install a bunch of packages using apt +# the following shell variables needs to be set before calling this func: +# INSTALL_ROOTFS_DEB - install root dir +# INSTALL_BASEARCH_DEB - install base architecutre +# INSTALL_ARCHS_DEB - list of available archs +# INSTALL_PACKAGES_NORMAL_DEB - packages to be installed +# INSTALL_PACKAGES_ATTEMPTONLY_DEB - packages attemped to be installed only +# INSTALL_PACKAGES_LINGUAS_DEB - additional packages for uclibc +# INSTALL_TASK_DEB - task name + +python do_package_deb () { + import re, copy + import textwrap + import subprocess + + workdir = d.getVar('WORKDIR', True) + if not workdir: + bb.error("WORKDIR not defined, unable to package") + return + + outdir = d.getVar('PKGWRITEDIRDEB', True) + if not outdir: + bb.error("PKGWRITEDIRDEB not defined, unable to package") + return + + packages = d.getVar('PACKAGES', True) + if not packages: + bb.debug(1, "PACKAGES not defined, nothing to package") + return + + tmpdir = d.getVar('TMPDIR', True) + + if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK): + os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN")) + + if packages == []: + bb.debug(1, "No packages; nothing to do") + return + + pkgdest = d.getVar('PKGDEST', True) + + def cleanupcontrol(root): + for p in ['CONTROL', 'DEBIAN']: + p = os.path.join(root, p) + if os.path.exists(p): + bb.utils.prunedir(p) + + for pkg in packages.split(): + localdata = bb.data.createCopy(d) + root = "%s/%s" % (pkgdest, pkg) + + lf = bb.utils.lockfile(root + ".lock") + + localdata.setVar('ROOT', '') + localdata.setVar('ROOT_%s' % pkg, root) + pkgname = localdata.getVar('PKG_%s' % pkg, True) + if not pkgname: + pkgname = pkg + localdata.setVar('PKG', pkgname) + + localdata.setVar('OVERRIDES', pkg) + + bb.data.update_data(localdata) + basedir = os.path.join(os.path.dirname(root)) + + pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH', True)) + bb.utils.mkdirhier(pkgoutdir) + + os.chdir(root) + cleanupcontrol(root) + from glob import glob + g = glob('*') + if not g and localdata.getVar('ALLOW_EMPTY') != "1": + bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True))) + bb.utils.unlockfile(lf) + continue + + controldir = os.path.join(root, 'DEBIAN') + bb.utils.mkdirhier(controldir) + os.chmod(controldir, 0755) + try: + ctrlfile = open(os.path.join(controldir, 'control'), 'w') + # import codecs + # ctrlfile = codecs.open("someFile", "w", "utf-8") + except OSError: + bb.utils.unlockfile(lf) + raise bb.build.FuncFailed("unable to open control file for writing.") + + fields = [] + pe = d.getVar('PKGE', True) + if pe and int(pe) > 0: + fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']]) + else: + fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']]) + fields.append(["Description: %s\n", ['DESCRIPTION']]) + fields.append(["Section: %s\n", ['SECTION']]) + fields.append(["Priority: %s\n", ['PRIORITY']]) + fields.append(["Maintainer: %s\n", ['MAINTAINER']]) + fields.append(["Architecture: %s\n", ['DPKG_ARCH']]) + fields.append(["OE: %s\n", ['PN']]) + fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']]) + if d.getVar('HOMEPAGE', True): + fields.append(["Homepage: %s\n", ['HOMEPAGE']]) + + # Package, Version, Maintainer, Description - mandatory + # Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional + + + def pullData(l, d): + l2 = [] + for i in l: + data = d.getVar(i, True) + if data is None: + raise KeyError(f) + if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH', True) == 'all': + data = 'all' + elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH': + # The params in deb package control don't allow character + # `_', so change the arch's `_' to `-'. Such as `x86_64' + # -->`x86-64' + data = data.replace('_', '-') + l2.append(data) + return l2 + + ctrlfile.write("Package: %s\n" % pkgname) + # check for required fields + try: + for (c, fs) in fields: + for f in fs: + if localdata.getVar(f) is None: + raise KeyError(f) + # Special behavior for description... + if 'DESCRIPTION' in fs: + summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "." + ctrlfile.write('Description: %s\n' % unicode(summary)) + description = localdata.getVar('DESCRIPTION', True) or "." + description = textwrap.dedent(description).strip() + if '\\n' in description: + # Manually indent + for t in description.split('\\n'): + # We don't limit the width when manually indent, but we do + # need the textwrap.fill() to set the initial_indent and + # subsequent_indent, so set a large width + ctrlfile.write('%s\n' % unicode(textwrap.fill(t, width=100000, initial_indent=' ', subsequent_indent=' '))) + else: + # Auto indent + ctrlfile.write('%s\n' % unicode(textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))) + + else: + ctrlfile.write(unicode(c % tuple(pullData(fs, localdata)))) + except KeyError: + import sys + (type, value, traceback) = sys.exc_info() + bb.utils.unlockfile(lf) + ctrlfile.close() + raise bb.build.FuncFailed("Missing field for deb generation: %s" % value) + # more fields + + custom_fields_chunk = get_package_additional_metadata("deb", localdata) + if custom_fields_chunk is not None: + ctrlfile.write(unicode(custom_fields_chunk)) + ctrlfile.write("\n") + + mapping_rename_hook(localdata) + + def debian_cmp_remap(var): + # dpkg does not allow for '(' or ')' in a dependency name + # replace these instances with '__' and '__' + # + # In debian '>' and '<' do not mean what it appears they mean + # '<' = less or equal + # '>' = greater or equal + # adjust these to the '<<' and '>>' equivalents + # + for dep in var: + if '(' in dep: + newdep = dep.replace('(', '__') + newdep = newdep.replace(')', '__') + if newdep != dep: + var[newdep] = var[dep] + del var[dep] + for dep in var: + for i, v in enumerate(var[dep]): + if (v or "").startswith("< "): + var[dep][i] = var[dep][i].replace("< ", "<< ") + elif (v or "").startswith("> "): + var[dep][i] = var[dep][i].replace("> ", ">> ") + + rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "") + debian_cmp_remap(rdepends) + for dep in rdepends: + if '*' in dep: + del rdepends[dep] + rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "") + debian_cmp_remap(rrecommends) + for dep in rrecommends: + if '*' in dep: + del rrecommends[dep] + rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "") + debian_cmp_remap(rsuggests) + rprovides = bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or "") + debian_cmp_remap(rprovides) + rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "") + debian_cmp_remap(rreplaces) + rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "") + debian_cmp_remap(rconflicts) + if rdepends: + ctrlfile.write("Depends: %s\n" % unicode(bb.utils.join_deps(rdepends))) + if rsuggests: + ctrlfile.write("Suggests: %s\n" % unicode(bb.utils.join_deps(rsuggests))) + if rrecommends: + ctrlfile.write("Recommends: %s\n" % unicode(bb.utils.join_deps(rrecommends))) + if rprovides: + ctrlfile.write("Provides: %s\n" % unicode(bb.utils.join_deps(rprovides))) + if rreplaces: + ctrlfile.write("Replaces: %s\n" % unicode(bb.utils.join_deps(rreplaces))) + if rconflicts: + ctrlfile.write("Conflicts: %s\n" % unicode(bb.utils.join_deps(rconflicts))) + ctrlfile.close() + + for script in ["preinst", "postinst", "prerm", "postrm"]: + scriptvar = localdata.getVar('pkg_%s' % script, True) + if not scriptvar: + continue + scriptvar = scriptvar.strip() + try: + scriptfile = open(os.path.join(controldir, script), 'w') + except OSError: + bb.utils.unlockfile(lf) + raise bb.build.FuncFailed("unable to open %s script file for writing." % script) + + if scriptvar.startswith("#!"): + pos = scriptvar.find("\n") + 1 + scriptfile.write(scriptvar[:pos]) + else: + pos = 0 + scriptfile.write("#!/bin/sh\n") + + # Prevent the prerm/postrm scripts from being run during an upgrade + if script in ('prerm', 'postrm'): + scriptfile.write('[ "$1" != "upgrade" ] || exit 0\n') + + scriptfile.write(scriptvar[pos:]) + scriptfile.write('\n') + scriptfile.close() + os.chmod(os.path.join(controldir, script), 0755) + + conffiles_str = localdata.getVar("CONFFILES", True) + if conffiles_str: + try: + conffiles = open(os.path.join(controldir, 'conffiles'), 'w') + except OSError: + bb.utils.unlockfile(lf) + raise bb.build.FuncFailed("unable to open conffiles for writing.") + for f in conffiles_str.split(): + if os.path.exists(oe.path.join(root, f)): + conffiles.write('%s\n' % f) + conffiles.close() + + os.chdir(basedir) + ret = subprocess.call("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH", True), root, pkgoutdir), shell=True) + if ret != 0: + bb.utils.unlockfile(lf) + raise bb.build.FuncFailed("dpkg-deb execution failed") + + cleanupcontrol(root) + bb.utils.unlockfile(lf) +} + +SSTATETASKS += "do_package_write_deb" +do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}" +do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}" + +python do_package_write_deb_setscene () { + tmpdir = d.getVar('TMPDIR', True) + + if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK): + os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN")) + + sstate_setscene(d) +} +addtask do_package_write_deb_setscene + +python () { + if d.getVar('PACKAGES', True) != '': + deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot' + d.appendVarFlag('do_package_write_deb', 'depends', deps) + d.setVarFlag('do_package_write_deb', 'fakeroot', "1") + + # Map TARGET_ARCH to Debian's ideas about architectures + darch = d.getVar('DPKG_ARCH', True) + if darch in ["x86", "i486", "i586", "i686", "pentium"]: + d.setVar('DPKG_ARCH', 'i386') + elif darch == "x86_64": + d.setVar('DPKG_ARCH', 'amd64') + elif darch == "arm": + d.setVar('DPKG_ARCH', 'armel') +} + +python do_package_write_deb () { + bb.build.exec_func("read_subpackage_metadata", d) + bb.build.exec_func("do_package_deb", d) +} +do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}" +do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}" +do_package_write_deb[umask] = "022" +addtask package_write_deb after do_packagedata do_package + + +PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot" +PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot" + +do_build[recrdeptask] += "do_package_write_deb" diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass new file mode 100644 index 0000000000..44fd3eb29c --- /dev/null +++ b/meta/classes/package_ipk.bbclass @@ -0,0 +1,286 @@ +inherit package + +IMAGE_PKGTYPE ?= "ipk" + +IPKGCONF_TARGET = "${WORKDIR}/opkg.conf" +IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf" + +PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks" + +# Program to be used to build opkg packages +OPKGBUILDCMD ??= "opkg-build" + +OPKG_ARGS = "--force_postinstall --prefer-arch-to-version" +OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}" +OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE', True) or "").split())][(d.getVar("PACKAGE_EXCLUDE", True) or "") != ""]}" + +OPKGLIBDIR = "${localstatedir}/lib" + +python do_package_ipk () { + import re, copy + import textwrap + import subprocess + + workdir = d.getVar('WORKDIR', True) + outdir = d.getVar('PKGWRITEDIRIPK', True) + tmpdir = d.getVar('TMPDIR', True) + pkgdest = d.getVar('PKGDEST', True) + if not workdir or not outdir or not tmpdir: + bb.error("Variables incorrectly set, unable to package") + return + + packages = d.getVar('PACKAGES', True) + if not packages or packages == '': + bb.debug(1, "No packages; nothing to do") + return + + # We're about to add new packages so the index needs to be checked + # so remove the appropriate stamp file. + if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK): + os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN")) + + def cleanupcontrol(root): + for p in ['CONTROL', 'DEBIAN']: + p = os.path.join(root, p) + if os.path.exists(p): + bb.utils.prunedir(p) + + for pkg in packages.split(): + localdata = bb.data.createCopy(d) + root = "%s/%s" % (pkgdest, pkg) + + lf = bb.utils.lockfile(root + ".lock") + + localdata.setVar('ROOT', '') + localdata.setVar('ROOT_%s' % pkg, root) + pkgname = localdata.getVar('PKG_%s' % pkg, True) + if not pkgname: + pkgname = pkg + localdata.setVar('PKG', pkgname) + + localdata.setVar('OVERRIDES', pkg) + + bb.data.update_data(localdata) + basedir = os.path.join(os.path.dirname(root)) + arch = localdata.getVar('PACKAGE_ARCH', True) + + if localdata.getVar('IPK_HIERARCHICAL_FEED') == "1": + # Spread packages across subdirectories so each isn't too crowded + if pkgname.startswith('lib'): + pkg_prefix = 'lib' + pkgname[3] + else: + pkg_prefix = pkgname[0] + + # Keep -dbg, -dev, -doc, -staticdev, -locale and -locale-* packages + # together. These package suffixes are taken from the definitions of + # PACKAGES and PACKAGES_DYNAMIC in meta/conf/bitbake.conf + if pkgname[-4:] in ('-dbg', '-dev', '-doc'): + pkg_subdir = pkgname[:-4] + elif pkgname.endswith('-staticdev'): + pkg_subdir = pkgname[:-10] + elif pkgname.endswith('-locale'): + pkg_subdir = pkgname[:-7] + elif '-locale-' in pkgname: + pkg_subdir = pkgname[:pkgname.find('-locale-')] + else: + pkg_subdir = pkgname + + pkgoutdir = "%s/%s/%s/%s" % (outdir, arch, pkg_prefix, pkg_subdir) + else: + pkgoutdir = "%s/%s" % (outdir, arch) + + bb.utils.mkdirhier(pkgoutdir) + os.chdir(root) + cleanupcontrol(root) + from glob import glob + g = glob('*') + if not g and localdata.getVar('ALLOW_EMPTY') != "1": + bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True))) + bb.utils.unlockfile(lf) + continue + + controldir = os.path.join(root, 'CONTROL') + bb.utils.mkdirhier(controldir) + try: + ctrlfile = open(os.path.join(controldir, 'control'), 'w') + except OSError: + bb.utils.unlockfile(lf) + raise bb.build.FuncFailed("unable to open control file for writing.") + + fields = [] + pe = d.getVar('PKGE', True) + if pe and int(pe) > 0: + fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']]) + else: + fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']]) + fields.append(["Description: %s\n", ['DESCRIPTION']]) + fields.append(["Section: %s\n", ['SECTION']]) + fields.append(["Priority: %s\n", ['PRIORITY']]) + fields.append(["Maintainer: %s\n", ['MAINTAINER']]) + fields.append(["License: %s\n", ['LICENSE']]) + fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']]) + fields.append(["OE: %s\n", ['PN']]) + if d.getVar('HOMEPAGE', True): + fields.append(["Homepage: %s\n", ['HOMEPAGE']]) + + def pullData(l, d): + l2 = [] + for i in l: + l2.append(d.getVar(i, True)) + return l2 + + ctrlfile.write("Package: %s\n" % pkgname) + # check for required fields + try: + for (c, fs) in fields: + for f in fs: + if localdata.getVar(f) is None: + raise KeyError(f) + # Special behavior for description... + if 'DESCRIPTION' in fs: + summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "." + ctrlfile.write('Description: %s\n' % summary) + description = localdata.getVar('DESCRIPTION', True) or "." + description = textwrap.dedent(description).strip() + if '\\n' in description: + # Manually indent + for t in description.split('\\n'): + # We don't limit the width when manually indent, but we do + # need the textwrap.fill() to set the initial_indent and + # subsequent_indent, so set a large width + ctrlfile.write('%s\n' % textwrap.fill(t.strip(), width=100000, initial_indent=' ', subsequent_indent=' ')) + else: + # Auto indent + ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' ')) + else: + ctrlfile.write(c % tuple(pullData(fs, localdata))) + except KeyError: + import sys + (type, value, traceback) = sys.exc_info() + ctrlfile.close() + bb.utils.unlockfile(lf) + raise bb.build.FuncFailed("Missing field for ipk generation: %s" % value) + # more fields + + custom_fields_chunk = get_package_additional_metadata("ipk", localdata) + if custom_fields_chunk is not None: + ctrlfile.write(custom_fields_chunk) + ctrlfile.write("\n") + + mapping_rename_hook(localdata) + + def debian_cmp_remap(var): + # In debian '>' and '<' do not mean what it appears they mean + # '<' = less or equal + # '>' = greater or equal + # adjust these to the '<<' and '>>' equivalents + # + for dep in var: + for i, v in enumerate(var[dep]): + if (v or "").startswith("< "): + var[dep][i] = var[dep][i].replace("< ", "<< ") + elif (v or "").startswith("> "): + var[dep][i] = var[dep][i].replace("> ", ">> ") + + rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "") + debian_cmp_remap(rdepends) + rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "") + debian_cmp_remap(rrecommends) + rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "") + debian_cmp_remap(rsuggests) + rprovides = bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or "") + debian_cmp_remap(rprovides) + rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "") + debian_cmp_remap(rreplaces) + rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "") + debian_cmp_remap(rconflicts) + + if rdepends: + ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends)) + if rsuggests: + ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests)) + if rrecommends: + ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends)) + if rprovides: + ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides)) + if rreplaces: + ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces)) + if rconflicts: + ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts)) + src_uri = localdata.getVar("SRC_URI", True).strip() or "None" + if src_uri: + src_uri = re.sub("\s+", " ", src_uri) + ctrlfile.write("Source: %s\n" % " ".join(src_uri.split())) + ctrlfile.close() + + for script in ["preinst", "postinst", "prerm", "postrm"]: + scriptvar = localdata.getVar('pkg_%s' % script, True) + if not scriptvar: + continue + try: + scriptfile = open(os.path.join(controldir, script), 'w') + except OSError: + bb.utils.unlockfile(lf) + raise bb.build.FuncFailed("unable to open %s script file for writing." % script) + scriptfile.write(scriptvar) + scriptfile.close() + os.chmod(os.path.join(controldir, script), 0755) + + conffiles_str = localdata.getVar("CONFFILES", True) + if conffiles_str: + try: + conffiles = open(os.path.join(controldir, 'conffiles'), 'w') + except OSError: + bb.utils.unlockfile(lf) + raise bb.build.FuncFailed("unable to open conffiles for writing.") + for f in conffiles_str.split(): + if os.path.exists(oe.path.join(root, f)): + conffiles.write('%s\n' % f) + conffiles.close() + + os.chdir(basedir) + ret = subprocess.call("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH", True), + d.getVar("OPKGBUILDCMD",1), pkg, pkgoutdir), shell=True) + if ret != 0: + bb.utils.unlockfile(lf) + raise bb.build.FuncFailed("opkg-build execution failed") + + cleanupcontrol(root) + bb.utils.unlockfile(lf) + +} + +SSTATETASKS += "do_package_write_ipk" +do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}" +do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}" + +python do_package_write_ipk_setscene () { + tmpdir = d.getVar('TMPDIR', True) + + if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK): + os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN")) + + sstate_setscene(d) +} +addtask do_package_write_ipk_setscene + +python () { + if d.getVar('PACKAGES', True) != '': + deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot' + d.appendVarFlag('do_package_write_ipk', 'depends', deps) + d.setVarFlag('do_package_write_ipk', 'fakeroot', "1") +} + +python do_package_write_ipk () { + bb.build.exec_func("read_subpackage_metadata", d) + bb.build.exec_func("do_package_ipk", d) +} +do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}" +do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}" +do_package_write_ipk[umask] = "022" +addtask package_write_ipk after do_packagedata do_package + +PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot" +PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot" + +do_build[recrdeptask] += "do_package_write_ipk" diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass new file mode 100644 index 0000000000..92ddf7a30f --- /dev/null +++ b/meta/classes/package_rpm.bbclass @@ -0,0 +1,754 @@ +inherit package + +IMAGE_PKGTYPE ?= "rpm" + +RPM="rpm" +RPMBUILD="rpmbuild" + +PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms" + +# Maintaining the perfile dependencies has singificant overhead when writing the +# packages. When set, this value merges them for efficiency. +MERGEPERFILEDEPS = "1" + +# Construct per file dependencies file +def write_rpm_perfiledata(srcname, d): + workdir = d.getVar('WORKDIR', True) + packages = d.getVar('PACKAGES', True) + pkgd = d.getVar('PKGD', True) + + def dump_filerdeps(varname, outfile, d): + outfile.write("#!/usr/bin/env python\n\n") + outfile.write("# Dependency table\n") + outfile.write('deps = {\n') + for pkg in packages.split(): + dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg + dependsflist = (d.getVar(dependsflist_key, True) or "") + for dfile in dependsflist.split(): + key = "FILE" + varname + "_" + dfile + "_" + pkg + depends_dict = bb.utils.explode_dep_versions(d.getVar(key, True) or "") + file = dfile.replace("@underscore@", "_") + file = file.replace("@closebrace@", "]") + file = file.replace("@openbrace@", "[") + file = file.replace("@tab@", "\t") + file = file.replace("@space@", " ") + file = file.replace("@at@", "@") + outfile.write('"' + pkgd + file + '" : "') + for dep in depends_dict: + ver = depends_dict[dep] + if dep and ver: + ver = ver.replace("(","") + ver = ver.replace(")","") + outfile.write(dep + " " + ver + " ") + else: + outfile.write(dep + " ") + outfile.write('",\n') + outfile.write('}\n\n') + outfile.write("import sys\n") + outfile.write("while 1:\n") + outfile.write("\tline = sys.stdin.readline().strip()\n") + outfile.write("\tif not line:\n") + outfile.write("\t\tsys.exit(0)\n") + outfile.write("\tif line in deps:\n") + outfile.write("\t\tprint(deps[line] + '\\n')\n") + + # OE-core dependencies a.k.a. RPM requires + outdepends = workdir + "/" + srcname + ".requires" + + try: + dependsfile = open(outdepends, 'w') + except OSError: + raise bb.build.FuncFailed("unable to open spec file for writing.") + + dump_filerdeps('RDEPENDS', dependsfile, d) + + dependsfile.close() + os.chmod(outdepends, 0755) + + # OE-core / RPM Provides + outprovides = workdir + "/" + srcname + ".provides" + + try: + providesfile = open(outprovides, 'w') + except OSError: + raise bb.build.FuncFailed("unable to open spec file for writing.") + + dump_filerdeps('RPROVIDES', providesfile, d) + + providesfile.close() + os.chmod(outprovides, 0755) + + return (outdepends, outprovides) + + +python write_specfile () { + import oe.packagedata + + # append information for logs and patches to %prep + def add_prep(d,spec_files_bottom): + if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d): + spec_files_bottom.append('%%prep -n %s' % d.getVar('PN', True) ) + spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"") + spec_files_bottom.append('') + + # append the name of tarball to key word 'SOURCE' in xxx.spec. + def tail_source(d): + if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d): + ar_outdir = d.getVar('ARCHIVER_OUTDIR', True) + if not os.path.exists(ar_outdir): + return + source_list = os.listdir(ar_outdir) + source_number = 0 + for source in source_list: + # The rpmbuild doesn't need the root permission, but it needs + # to know the file's user and group name, the only user and + # group in fakeroot is "root" when working in fakeroot. + f = os.path.join(ar_outdir, source) + os.chown(f, 0, 0) + spec_preamble_top.append('Source%s: %s' % (source_number, source)) + source_number += 1 + # We need a simple way to remove the MLPREFIX from the package name, + # and dependency information... + def strip_multilib(name, d): + multilibs = d.getVar('MULTILIBS', True) or "" + for ext in multilibs.split(): + eext = ext.split(':') + if len(eext) > 1 and eext[0] == 'multilib' and name and name.find(eext[1] + '-') >= 0: + name = "".join(name.split(eext[1] + '-')) + return name + + def strip_multilib_deps(deps, d): + depends = bb.utils.explode_dep_versions2(deps or "") + newdeps = {} + for dep in depends: + newdeps[strip_multilib(dep, d)] = depends[dep] + return bb.utils.join_deps(newdeps) + +# ml = d.getVar("MLPREFIX", True) +# if ml and name and len(ml) != 0 and name.find(ml) == 0: +# return ml.join(name.split(ml, 1)[1:]) +# return name + + # In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release + # This format is similar to OE, however there are restrictions on the + # characters that can be in a field. In the Version field, "-" + # characters are not allowed. "-" is allowed in the Release field. + # + # We translate the "-" in the version to a "+", by loading the PKGV + # from the dependent recipe, replacing the - with a +, and then using + # that value to do a replace inside of this recipe's dependencies. + # This preserves the "-" separator between the version and release, as + # well as any "-" characters inside of the release field. + # + # All of this has to happen BEFORE the mapping_rename_hook as + # after renaming we cannot look up the dependencies in the packagedata + # store. + def translate_vers(varname, d): + depends = d.getVar(varname, True) + if depends: + depends_dict = bb.utils.explode_dep_versions2(depends) + newdeps_dict = {} + for dep in depends_dict: + verlist = [] + for ver in depends_dict[dep]: + if '-' in ver: + subd = oe.packagedata.read_subpkgdata_dict(dep, d) + if 'PKGV' in subd: + pv = subd['PV'] + pkgv = subd['PKGV'] + reppv = pkgv.replace('-', '+') + ver = ver.replace(pv, reppv).replace(pkgv, reppv) + if 'PKGR' in subd: + # Make sure PKGR rather than PR in ver + pr = '-' + subd['PR'] + pkgr = '-' + subd['PKGR'] + if pkgr not in ver: + ver = ver.replace(pr, pkgr) + verlist.append(ver) + else: + verlist.append(ver) + newdeps_dict[dep] = verlist + depends = bb.utils.join_deps(newdeps_dict) + d.setVar(varname, depends.strip()) + + # We need to change the style the dependency from BB to RPM + # This needs to happen AFTER the mapping_rename_hook + def print_deps(variable, tag, array, d): + depends = variable + if depends: + depends_dict = bb.utils.explode_dep_versions2(depends) + for dep in depends_dict: + for ver in depends_dict[dep]: + ver = ver.replace('(', '') + ver = ver.replace(')', '') + array.append("%s: %s %s" % (tag, dep, ver)) + if not len(depends_dict[dep]): + array.append("%s: %s" % (tag, dep)) + + def walk_files(walkpath, target, conffiles, dirfiles): + # We can race against the ipk/deb backends which create CONTROL or DEBIAN directories + # when packaging. We just ignore these files which are created in + # packages-split/ and not package/ + # We have the odd situation where the CONTROL/DEBIAN directory can be removed in the middle of + # of the walk, the isdir() test would then fail and the walk code would assume its a file + # hence we check for the names in files too. + for rootpath, dirs, files in os.walk(walkpath): + path = rootpath.replace(walkpath, "") + if path.endswith("DEBIAN") or path.endswith("CONTROL"): + continue + + # Directory handling can happen in two ways, either DIRFILES is not set at all + # in which case we fall back to the older behaviour of packages owning all their + # directories + if dirfiles is None: + for dir in dirs: + if dir == "CONTROL" or dir == "DEBIAN": + continue + # All packages own the directories their files are in... + target.append('%dir "' + path + '/' + dir + '"') + else: + # packages own only empty directories or explict directory. + # This will prevent the overlapping of security permission. + if path and not files and not dirs: + target.append('%dir "' + path + '"') + elif path and path in dirfiles: + target.append('%dir "' + path + '"') + + for file in files: + if file == "CONTROL" or file == "DEBIAN": + continue + if conffiles.count(path + '/' + file): + target.append('%config "' + path + '/' + file + '"') + else: + target.append('"' + path + '/' + file + '"') + + # Prevent the prerm/postrm scripts from being run during an upgrade + def wrap_uninstall(scriptvar): + scr = scriptvar.strip() + if scr.startswith("#!"): + pos = scr.find("\n") + 1 + else: + pos = 0 + scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi' + return scr + + def get_perfile(varname, pkg, d): + deps = [] + dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg + dependsflist = (d.getVar(dependsflist_key, True) or "") + for dfile in dependsflist.split(): + key = "FILE" + varname + "_" + dfile + "_" + pkg + depends = d.getVar(key, True) + if depends: + deps.append(depends) + return " ".join(deps) + + def append_description(spec_preamble, text): + """ + Add the description to the spec file. + """ + import textwrap + dedent_text = textwrap.dedent(text).strip() + # Bitbake saves "\n" as "\\n" + if '\\n' in dedent_text: + for t in dedent_text.split('\\n'): + spec_preamble.append(t.strip()) + else: + spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75)) + + packages = d.getVar('PACKAGES', True) + if not packages or packages == '': + bb.debug(1, "No packages; nothing to do") + return + + pkgdest = d.getVar('PKGDEST', True) + if not pkgdest: + bb.fatal("No PKGDEST") + + outspecfile = d.getVar('OUTSPECFILE', True) + if not outspecfile: + bb.fatal("No OUTSPECFILE") + + # Construct the SPEC file... + srcname = strip_multilib(d.getVar('PN', True), d) + srcsummary = (d.getVar('SUMMARY', True) or d.getVar('DESCRIPTION', True) or ".") + srcversion = d.getVar('PKGV', True).replace('-', '+') + srcrelease = d.getVar('PKGR', True) + srcepoch = (d.getVar('PKGE', True) or "") + srclicense = d.getVar('LICENSE', True) + srcsection = d.getVar('SECTION', True) + srcmaintainer = d.getVar('MAINTAINER', True) + srchomepage = d.getVar('HOMEPAGE', True) + srcdescription = d.getVar('DESCRIPTION', True) or "." + srccustomtagschunk = get_package_additional_metadata("rpm", d) + + srcdepends = strip_multilib_deps(d.getVar('DEPENDS', True), d) + srcrdepends = [] + srcrrecommends = [] + srcrsuggests = [] + srcrprovides = [] + srcrreplaces = [] + srcrconflicts = [] + srcrobsoletes = [] + + srcrpreinst = [] + srcrpostinst = [] + srcrprerm = [] + srcrpostrm = [] + + spec_preamble_top = [] + spec_preamble_bottom = [] + + spec_scriptlets_top = [] + spec_scriptlets_bottom = [] + + spec_files_top = [] + spec_files_bottom = [] + + perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0" + extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA", True) or "0") == "1" + + for pkg in packages.split(): + localdata = bb.data.createCopy(d) + + root = "%s/%s" % (pkgdest, pkg) + + localdata.setVar('ROOT', '') + localdata.setVar('ROOT_%s' % pkg, root) + pkgname = localdata.getVar('PKG_%s' % pkg, True) + if not pkgname: + pkgname = pkg + localdata.setVar('PKG', pkgname) + + localdata.setVar('OVERRIDES', pkg) + + bb.data.update_data(localdata) + + conffiles = (localdata.getVar('CONFFILES', True) or "").split() + dirfiles = localdata.getVar('DIRFILES', True) + if dirfiles is not None: + dirfiles = dirfiles.split() + + splitname = strip_multilib(pkgname, d) + + splitsummary = (localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or ".") + splitversion = (localdata.getVar('PKGV', True) or "").replace('-', '+') + splitrelease = (localdata.getVar('PKGR', True) or "") + splitepoch = (localdata.getVar('PKGE', True) or "") + splitlicense = (localdata.getVar('LICENSE', True) or "") + splitsection = (localdata.getVar('SECTION', True) or "") + splitdescription = (localdata.getVar('DESCRIPTION', True) or ".") + splitcustomtagschunk = get_package_additional_metadata("rpm", localdata) + + translate_vers('RDEPENDS', localdata) + translate_vers('RRECOMMENDS', localdata) + translate_vers('RSUGGESTS', localdata) + translate_vers('RPROVIDES', localdata) + translate_vers('RREPLACES', localdata) + translate_vers('RCONFLICTS', localdata) + + # Map the dependencies into their final form + mapping_rename_hook(localdata) + + splitrdepends = strip_multilib_deps(localdata.getVar('RDEPENDS', True), d) + splitrrecommends = strip_multilib_deps(localdata.getVar('RRECOMMENDS', True), d) + splitrsuggests = strip_multilib_deps(localdata.getVar('RSUGGESTS', True), d) + splitrprovides = strip_multilib_deps(localdata.getVar('RPROVIDES', True), d) + splitrreplaces = strip_multilib_deps(localdata.getVar('RREPLACES', True), d) + splitrconflicts = strip_multilib_deps(localdata.getVar('RCONFLICTS', True), d) + splitrobsoletes = [] + + splitrpreinst = localdata.getVar('pkg_preinst', True) + splitrpostinst = localdata.getVar('pkg_postinst', True) + splitrprerm = localdata.getVar('pkg_prerm', True) + splitrpostrm = localdata.getVar('pkg_postrm', True) + + + if not perfiledeps: + # Add in summary of per file dependencies + splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d) + splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d) + + # Gather special src/first package data + if srcname == splitname: + srcrdepends = splitrdepends + srcrrecommends = splitrrecommends + srcrsuggests = splitrsuggests + srcrprovides = splitrprovides + srcrreplaces = splitrreplaces + srcrconflicts = splitrconflicts + + srcrpreinst = splitrpreinst + srcrpostinst = splitrpostinst + srcrprerm = splitrprerm + srcrpostrm = splitrpostrm + + file_list = [] + walk_files(root, file_list, conffiles, dirfiles) + if not file_list and localdata.getVar('ALLOW_EMPTY') != "1": + bb.note("Not creating empty RPM package for %s" % splitname) + else: + bb.note("Creating RPM package for %s" % splitname) + spec_files_top.append('%files') + if extra_pkgdata: + package_rpm_extra_pkgdata(splitname, spec_files_top, localdata) + spec_files_top.append('%defattr(-,-,-,-)') + if file_list: + bb.note("Creating RPM package for %s" % splitname) + spec_files_top.extend(file_list) + else: + bb.note("Creating EMPTY RPM Package for %s" % splitname) + spec_files_top.append('') + continue + + # Process subpackage data + spec_preamble_bottom.append('%%package -n %s' % splitname) + spec_preamble_bottom.append('Summary: %s' % splitsummary) + if srcversion != splitversion: + spec_preamble_bottom.append('Version: %s' % splitversion) + if srcrelease != splitrelease: + spec_preamble_bottom.append('Release: %s' % splitrelease) + if srcepoch != splitepoch: + spec_preamble_bottom.append('Epoch: %s' % splitepoch) + if srclicense != splitlicense: + spec_preamble_bottom.append('License: %s' % splitlicense) + spec_preamble_bottom.append('Group: %s' % splitsection) + + if srccustomtagschunk != splitcustomtagschunk: + spec_preamble_bottom.append(splitcustomtagschunk) + + # Replaces == Obsoletes && Provides + robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes or "") + rprovides = bb.utils.explode_dep_versions2(splitrprovides or "") + rreplaces = bb.utils.explode_dep_versions2(splitrreplaces or "") + for dep in rreplaces: + if not dep in robsoletes: + robsoletes[dep] = rreplaces[dep] + if not dep in rprovides: + rprovides[dep] = rreplaces[dep] + splitrobsoletes = bb.utils.join_deps(robsoletes, commasep=False) + splitrprovides = bb.utils.join_deps(rprovides, commasep=False) + + print_deps(splitrdepends, "Requires", spec_preamble_bottom, d) + if splitrpreinst: + print_deps(splitrdepends, "Requires(pre)", spec_preamble_bottom, d) + if splitrpostinst: + print_deps(splitrdepends, "Requires(post)", spec_preamble_bottom, d) + if splitrprerm: + print_deps(splitrdepends, "Requires(preun)", spec_preamble_bottom, d) + if splitrpostrm: + print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d) + + # Suggests in RPM are like recommends in OE-core! + print_deps(splitrrecommends, "Suggests", spec_preamble_bottom, d) + # While there is no analog for suggests... (So call them recommends for now) + print_deps(splitrsuggests, "Recommends", spec_preamble_bottom, d) + print_deps(splitrprovides, "Provides", spec_preamble_bottom, d) + print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d) + + # conflicts can not be in a provide! We will need to filter it. + if splitrconflicts: + depends_dict = bb.utils.explode_dep_versions2(splitrconflicts) + newdeps_dict = {} + for dep in depends_dict: + if dep not in splitrprovides: + newdeps_dict[dep] = depends_dict[dep] + if newdeps_dict: + splitrconflicts = bb.utils.join_deps(newdeps_dict) + else: + splitrconflicts = "" + + print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d) + + spec_preamble_bottom.append('') + + spec_preamble_bottom.append('%%description -n %s' % splitname) + append_description(spec_preamble_bottom, splitdescription) + + spec_preamble_bottom.append('') + + # Now process scriptlets + if splitrpreinst: + spec_scriptlets_bottom.append('%%pre -n %s' % splitname) + spec_scriptlets_bottom.append('# %s - preinst' % splitname) + spec_scriptlets_bottom.append(splitrpreinst) + spec_scriptlets_bottom.append('') + if splitrpostinst: + spec_scriptlets_bottom.append('%%post -n %s' % splitname) + spec_scriptlets_bottom.append('# %s - postinst' % splitname) + spec_scriptlets_bottom.append(splitrpostinst) + spec_scriptlets_bottom.append('') + if splitrprerm: + spec_scriptlets_bottom.append('%%preun -n %s' % splitname) + spec_scriptlets_bottom.append('# %s - prerm' % splitname) + scriptvar = wrap_uninstall(splitrprerm) + spec_scriptlets_bottom.append(scriptvar) + spec_scriptlets_bottom.append('') + if splitrpostrm: + spec_scriptlets_bottom.append('%%postun -n %s' % splitname) + spec_scriptlets_bottom.append('# %s - postrm' % splitname) + scriptvar = wrap_uninstall(splitrpostrm) + spec_scriptlets_bottom.append(scriptvar) + spec_scriptlets_bottom.append('') + + # Now process files + file_list = [] + walk_files(root, file_list, conffiles, dirfiles) + if not file_list and localdata.getVar('ALLOW_EMPTY') != "1": + bb.note("Not creating empty RPM package for %s" % splitname) + else: + spec_files_bottom.append('%%files -n %s' % splitname) + if extra_pkgdata: + package_rpm_extra_pkgdata(splitname, spec_files_bottom, localdata) + spec_files_bottom.append('%defattr(-,-,-,-)') + if file_list: + bb.note("Creating RPM package for %s" % splitname) + spec_files_bottom.extend(file_list) + else: + bb.note("Creating EMPTY RPM Package for %s" % splitname) + spec_files_bottom.append('') + + del localdata + + add_prep(d,spec_files_bottom) + spec_preamble_top.append('Summary: %s' % srcsummary) + spec_preamble_top.append('Name: %s' % srcname) + spec_preamble_top.append('Version: %s' % srcversion) + spec_preamble_top.append('Release: %s' % srcrelease) + if srcepoch and srcepoch.strip() != "": + spec_preamble_top.append('Epoch: %s' % srcepoch) + spec_preamble_top.append('License: %s' % srclicense) + spec_preamble_top.append('Group: %s' % srcsection) + spec_preamble_top.append('Packager: %s' % srcmaintainer) + if srchomepage: + spec_preamble_top.append('URL: %s' % srchomepage) + if srccustomtagschunk: + spec_preamble_top.append(srccustomtagschunk) + tail_source(d) + + # Replaces == Obsoletes && Provides + robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes or "") + rprovides = bb.utils.explode_dep_versions2(srcrprovides or "") + rreplaces = bb.utils.explode_dep_versions2(srcrreplaces or "") + for dep in rreplaces: + if not dep in robsoletes: + robsoletes[dep] = rreplaces[dep] + if not dep in rprovides: + rprovides[dep] = rreplaces[dep] + srcrobsoletes = bb.utils.join_deps(robsoletes, commasep=False) + srcrprovides = bb.utils.join_deps(rprovides, commasep=False) + + print_deps(srcdepends, "BuildRequires", spec_preamble_top, d) + print_deps(srcrdepends, "Requires", spec_preamble_top, d) + if srcrpreinst: + print_deps(srcrdepends, "Requires(pre)", spec_preamble_top, d) + if srcrpostinst: + print_deps(srcrdepends, "Requires(post)", spec_preamble_top, d) + if srcrprerm: + print_deps(srcrdepends, "Requires(preun)", spec_preamble_top, d) + if srcrpostrm: + print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d) + + # Suggests in RPM are like recommends in OE-core! + print_deps(srcrrecommends, "Suggests", spec_preamble_top, d) + # While there is no analog for suggests... (So call them recommends for now) + print_deps(srcrsuggests, "Recommends", spec_preamble_top, d) + print_deps(srcrprovides, "Provides", spec_preamble_top, d) + print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d) + + # conflicts can not be in a provide! We will need to filter it. + if srcrconflicts: + depends_dict = bb.utils.explode_dep_versions2(srcrconflicts) + newdeps_dict = {} + for dep in depends_dict: + if dep not in srcrprovides: + newdeps_dict[dep] = depends_dict[dep] + if newdeps_dict: + srcrconflicts = bb.utils.join_deps(newdeps_dict) + else: + srcrconflicts = "" + + print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d) + + spec_preamble_top.append('') + + spec_preamble_top.append('%description') + append_description(spec_preamble_top, srcdescription) + + spec_preamble_top.append('') + + if srcrpreinst: + spec_scriptlets_top.append('%pre') + spec_scriptlets_top.append('# %s - preinst' % srcname) + spec_scriptlets_top.append(srcrpreinst) + spec_scriptlets_top.append('') + if srcrpostinst: + spec_scriptlets_top.append('%post') + spec_scriptlets_top.append('# %s - postinst' % srcname) + spec_scriptlets_top.append(srcrpostinst) + spec_scriptlets_top.append('') + if srcrprerm: + spec_scriptlets_top.append('%preun') + spec_scriptlets_top.append('# %s - prerm' % srcname) + scriptvar = wrap_uninstall(srcrprerm) + spec_scriptlets_top.append(scriptvar) + spec_scriptlets_top.append('') + if srcrpostrm: + spec_scriptlets_top.append('%postun') + spec_scriptlets_top.append('# %s - postrm' % srcname) + scriptvar = wrap_uninstall(srcrpostrm) + spec_scriptlets_top.append(scriptvar) + spec_scriptlets_top.append('') + + # Write the SPEC file + try: + specfile = open(outspecfile, 'w') + except OSError: + raise bb.build.FuncFailed("unable to open spec file for writing.") + + # RPMSPEC_PREAMBLE is a way to add arbitrary text to the top + # of the generated spec file + external_preamble = d.getVar("RPMSPEC_PREAMBLE", True) + if external_preamble: + specfile.write(external_preamble + "\n") + + for line in spec_preamble_top: + specfile.write(line + "\n") + + for line in spec_preamble_bottom: + specfile.write(line + "\n") + + for line in spec_scriptlets_top: + specfile.write(line + "\n") + + for line in spec_scriptlets_bottom: + specfile.write(line + "\n") + + for line in spec_files_top: + specfile.write(line + "\n") + + for line in spec_files_bottom: + specfile.write(line + "\n") + + specfile.close() +} + +python do_package_rpm () { + # We need a simple way to remove the MLPREFIX from the package name, + # and dependency information... + def strip_multilib(name, d): + ml = d.getVar("MLPREFIX", True) + if ml and name and len(ml) != 0 and name.find(ml) >= 0: + return "".join(name.split(ml)) + return name + + workdir = d.getVar('WORKDIR', True) + tmpdir = d.getVar('TMPDIR', True) + pkgd = d.getVar('PKGD', True) + pkgdest = d.getVar('PKGDEST', True) + if not workdir or not pkgd or not tmpdir: + bb.error("Variables incorrectly set, unable to package") + return + + packages = d.getVar('PACKAGES', True) + if not packages or packages == '': + bb.debug(1, "No packages; nothing to do") + return + + # Construct the spec file... + # If the spec file already exist, and has not been stored into + # pseudo's files.db, it maybe cause rpmbuild src.rpm fail, + # so remove it before doing rpmbuild src.rpm. + srcname = strip_multilib(d.getVar('PN', True), d) + outspecfile = workdir + "/" + srcname + ".spec" + if os.path.isfile(outspecfile): + os.remove(outspecfile) + d.setVar('OUTSPECFILE', outspecfile) + bb.build.exec_func('write_specfile', d) + + perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0" + if perfiledeps: + outdepends, outprovides = write_rpm_perfiledata(srcname, d) + + # Setup the rpmbuild arguments... + rpmbuild = d.getVar('RPMBUILD', True) + targetsys = d.getVar('TARGET_SYS', True) + targetvendor = d.getVar('HOST_VENDOR', True) + package_arch = (d.getVar('PACKAGE_ARCH', True) or "").replace("-", "_") + sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX', True) or "nativesdk").replace("-", "_") + if package_arch not in "all any noarch".split() and not package_arch.endswith(sdkpkgsuffix): + ml_prefix = (d.getVar('MLPREFIX', True) or "").replace("-", "_") + d.setVar('PACKAGE_ARCH_EXTEND', ml_prefix + package_arch) + else: + d.setVar('PACKAGE_ARCH_EXTEND', package_arch) + pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}') + pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-${HOST_OS}') + magicfile = d.expand('${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc') + bb.utils.mkdirhier(pkgwritedir) + os.chmod(pkgwritedir, 0755) + + cmd = rpmbuild + cmd = cmd + " --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd + cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'" + cmd = cmd + " --define '_builddir " + d.getVar('S', True) + "'" + cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'" + cmd = cmd + " --define '_use_internal_dependency_generator 0'" + if perfiledeps: + cmd = cmd + " --define '__find_requires " + outdepends + "'" + cmd = cmd + " --define '__find_provides " + outprovides + "'" + else: + cmd = cmd + " --define '__find_requires %{nil}'" + cmd = cmd + " --define '__find_provides %{nil}'" + cmd = cmd + " --define '_unpackaged_files_terminate_build 0'" + cmd = cmd + " --define 'debug_package %{nil}'" + cmd = cmd + " --define '_rpmfc_magic_path " + magicfile + "'" + cmd = cmd + " --define '_tmppath " + workdir + "'" + if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d): + cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR', True) + "'" + cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_OUTDIR', True) + "'" + cmdsrpm = cmdsrpm + " -bs " + outspecfile + # Build the .src.rpm + d.setVar('SBUILDSPEC', cmdsrpm + "\n") + d.setVarFlag('SBUILDSPEC', 'func', '1') + bb.build.exec_func('SBUILDSPEC', d) + cmd = cmd + " -bb " + outspecfile + + # Build the rpm package! + d.setVar('BUILDSPEC', cmd + "\n") + d.setVarFlag('BUILDSPEC', 'func', '1') + bb.build.exec_func('BUILDSPEC', d) +} + +python () { + if d.getVar('PACKAGES', True) != '': + deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot' + d.appendVarFlag('do_package_write_rpm', 'depends', deps) + d.setVarFlag('do_package_write_rpm', 'fakeroot', 1) +} + +SSTATETASKS += "do_package_write_rpm" +do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}" +do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}" +# Take a shared lock, we can write multiple packages at the same time... +# but we need to stop the rootfs/solver from running while we do... +do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock" + +python do_package_write_rpm_setscene () { + sstate_setscene(d) +} +addtask do_package_write_rpm_setscene + +python do_package_write_rpm () { + bb.build.exec_func("read_subpackage_metadata", d) + bb.build.exec_func("do_package_rpm", d) +} + +do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}" +do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}" +do_package_write_rpm[umask] = "022" +addtask package_write_rpm after do_packagedata do_package + +PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot" +PACKAGEINDEXDEPS += "createrepo-native:do_populate_sysroot" + +do_build[recrdeptask] += "do_package_write_rpm" diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass new file mode 100644 index 0000000000..fed2c28b69 --- /dev/null +++ b/meta/classes/package_tar.bbclass @@ -0,0 +1,69 @@ +inherit package + +IMAGE_PKGTYPE ?= "tar" + +python do_package_tar () { + import subprocess + workdir = d.getVar('WORKDIR', True) + if not workdir: + bb.error("WORKDIR not defined, unable to package") + return + + outdir = d.getVar('DEPLOY_DIR_TAR', True) + if not outdir: + bb.error("DEPLOY_DIR_TAR not defined, unable to package") + return + + dvar = d.getVar('D', True) + if not dvar: + bb.error("D not defined, unable to package") + return + + packages = d.getVar('PACKAGES', True) + if not packages: + bb.debug(1, "PACKAGES not defined, nothing to package") + return + + pkgdest = d.getVar('PKGDEST', True) + + bb.utils.mkdirhier(outdir) + bb.utils.mkdirhier(dvar) + + for pkg in packages.split(): + localdata = bb.data.createCopy(d) + root = "%s/%s" % (pkgdest, pkg) + + overrides = localdata.getVar('OVERRIDES') + localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg)) + bb.data.update_data(localdata) + + bb.utils.mkdirhier(root) + basedir = os.path.dirname(root) + tarfn = localdata.expand("${DEPLOY_DIR_TAR}/${PKG}-${PKGV}-${PKGR}.tar.gz") + os.chdir(root) + dlist = os.listdir(root) + if not dlist: + bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True))) + continue + args = "tar -cz --exclude=CONTROL --exclude=DEBIAN -f".split() + ret = subprocess.call(args + [tarfn] + dlist) + if ret != 0: + bb.error("Creation of tar %s failed." % tarfn) +} + +python () { + if d.getVar('PACKAGES', True) != '': + deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split() + deps.append('tar-native:do_populate_sysroot') + deps.append('virtual/fakeroot-native:do_populate_sysroot') + d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps)) + d.setVarFlag('do_package_write_tar', 'fakeroot', "1") +} + + +python do_package_write_tar () { + bb.build.exec_func("read_subpackage_metadata", d) + bb.build.exec_func("do_package_tar", d) +} +do_package_write_tar[dirs] = "${D}" +addtask package_write_tar before do_build after do_packagedata do_package diff --git a/meta/classes/packagedata.bbclass b/meta/classes/packagedata.bbclass new file mode 100644 index 0000000000..d1aedf2289 --- /dev/null +++ b/meta/classes/packagedata.bbclass @@ -0,0 +1,26 @@ +python read_subpackage_metadata () { + import oe.packagedata + + vars = { + "PN" : d.getVar('PN', True), + "PE" : d.getVar('PE', True), + "PV" : d.getVar('PV', True), + "PR" : d.getVar('PR', True), + } + + data = oe.packagedata.read_pkgdata(vars["PN"], d) + + for key in data.keys(): + d.setVar(key, data[key]) + + for pkg in d.getVar('PACKAGES', True).split(): + sdata = oe.packagedata.read_subpkgdata(pkg, d) + for key in sdata.keys(): + if key in vars: + if sdata[key] != vars[key]: + if key == "PN": + bb.fatal("Recipe %s is trying to create package %s which was already written by recipe %s. This will cause corruption, please resolve this and only provide the package from one recipe or the other or only build one of the recipes." % (vars[key], pkg, sdata[key])) + bb.fatal("Recipe %s is trying to change %s from '%s' to '%s'. This will cause do_package_write_* failures since the incorrect data will be used and they will be unable to find the right workdir." % (vars["PN"], key, vars[key], sdata[key])) + continue + d.setVar(key, sdata[key]) +} diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass new file mode 100644 index 0000000000..56cfead82a --- /dev/null +++ b/meta/classes/packagegroup.bbclass @@ -0,0 +1,52 @@ +# Class for packagegroup (package group) recipes + +# By default, only the packagegroup package itself is in PACKAGES. +# -dbg and -dev flavours are handled by the anonfunc below. +# This means that packagegroup recipes used to build multiple packagegroup +# packages have to modify PACKAGES after inheriting packagegroup.bbclass. +PACKAGES = "${PN}" + +# By default, packagegroup packages do not depend on a certain architecture. +# Only if dependencies are modified by MACHINE_FEATURES, packages +# need to be set to MACHINE_ARCH after inheriting packagegroup.bbclass +PACKAGE_ARCH ?= "all" + +# Fully expanded - so it applies the overrides as well +PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}" + +inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED', True) == 'all', 'allarch', '')} + +# This automatically adds -dbg and -dev flavours of all PACKAGES +# to the list. Their dependencies (RRECOMMENDS) are handled as usual +# by package_depchains in a following step. +# Also mark all packages as ALLOW_EMPTY +python () { + packages = d.getVar('PACKAGES', True).split() + genpackages = [] + for pkg in packages: + d.setVar("ALLOW_EMPTY_%s" % pkg, "1") + for postfix in ['-dbg', '-dev', '-ptest']: + genpackages.append(pkg+postfix) + if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY', True) != '1': + d.setVar('PACKAGES', ' '.join(packages+genpackages)) +} + +# We don't want to look at shared library dependencies for the +# dbg packages +DEPCHAIN_DBGDEFAULTDEPS = "1" + +# We only need the packaging tasks - disable the rest +do_fetch[noexec] = "1" +do_unpack[noexec] = "1" +do_patch[noexec] = "1" +do_configure[noexec] = "1" +do_compile[noexec] = "1" +do_install[noexec] = "1" +do_populate_sysroot[noexec] = "1" + +python () { + initman = d.getVar("VIRTUAL-RUNTIME_init_manager", True) + if initman and initman in ['sysvinit', 'systemd'] and not bb.utils.contains('DISTRO_FEATURES', initman, True, False, d): + bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman) +} + diff --git a/meta/classes/packageinfo.bbclass b/meta/classes/packageinfo.bbclass new file mode 100644 index 0000000000..7d60ace1dc --- /dev/null +++ b/meta/classes/packageinfo.bbclass @@ -0,0 +1,22 @@ +python packageinfo_handler () { + import oe.packagedata + pkginfolist = [] + + pkgdata_dir = e.data.getVar("PKGDATA_DIR", True) + '/runtime/' + if os.path.exists(pkgdata_dir): + for root, dirs, files in os.walk(pkgdata_dir): + for pkgname in files: + if pkgname.endswith('.packaged'): + pkgname = pkgname[:-9] + pkgdatafile = root + pkgname + try: + sdata = oe.packagedata.read_pkgdatafile(pkgdatafile) + sdata['PKG'] = pkgname + pkginfolist.append(sdata) + except Exception as e: + bb.warn("Failed to read pkgdata file %s: %s: %s" % (pkgdatafile, e.__class__, str(e))) + bb.event.fire(bb.event.PackageInfo(pkginfolist), e.data) +} + +addhandler packageinfo_handler +packageinfo_handler[eventmask] = "bb.event.RequestPackageInfo" diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass new file mode 100644 index 0000000000..1e2aab0418 --- /dev/null +++ b/meta/classes/patch.bbclass @@ -0,0 +1,188 @@ +# Copyright (C) 2006 OpenedHand LTD + +# Point to an empty file so any user's custom settings don't break things +QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc" + +PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot" + +inherit terminal + +def src_patches(d, all = False ): + workdir = d.getVar('WORKDIR', True) + fetch = bb.fetch2.Fetch([], d) + patches = [] + sources = [] + for url in fetch.urls: + local = patch_path(url, fetch, workdir) + if not local: + if all: + local = fetch.localpath(url) + sources.append(local) + continue + + urldata = fetch.ud[url] + parm = urldata.parm + patchname = parm.get('pname') or os.path.basename(local) + + apply, reason = should_apply(parm, d) + if not apply: + if reason: + bb.note("Patch %s %s" % (patchname, reason)) + continue + + patchparm = {'patchname': patchname} + if "striplevel" in parm: + striplevel = parm["striplevel"] + elif "pnum" in parm: + #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url) + striplevel = parm["pnum"] + else: + striplevel = '1' + patchparm['striplevel'] = striplevel + + patchdir = parm.get('patchdir') + if patchdir: + patchparm['patchdir'] = patchdir + + localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm)) + patches.append(localurl) + + if all: + return sources + + return patches + +def patch_path(url, fetch, workdir): + """Return the local path of a patch, or None if this isn't a patch""" + + local = fetch.localpath(url) + base, ext = os.path.splitext(os.path.basename(local)) + if ext in ('.gz', '.bz2', '.Z'): + local = os.path.join(workdir, base) + ext = os.path.splitext(base)[1] + + urldata = fetch.ud[url] + if "apply" in urldata.parm: + apply = oe.types.boolean(urldata.parm["apply"]) + if not apply: + return + elif ext not in (".diff", ".patch"): + return + + return local + +def should_apply(parm, d): + """Determine if we should apply the given patch""" + + if "mindate" in parm or "maxdate" in parm: + pn = d.getVar('PN', True) + srcdate = d.getVar('SRCDATE_%s' % pn, True) + if not srcdate: + srcdate = d.getVar('SRCDATE', True) + + if srcdate == "now": + srcdate = d.getVar('DATE', True) + + if "maxdate" in parm and parm["maxdate"] < srcdate: + return False, 'is outdated' + + if "mindate" in parm and parm["mindate"] > srcdate: + return False, 'is predated' + + + if "minrev" in parm: + srcrev = d.getVar('SRCREV', True) + if srcrev and srcrev < parm["minrev"]: + return False, 'applies to later revisions' + + if "maxrev" in parm: + srcrev = d.getVar('SRCREV', True) + if srcrev and srcrev > parm["maxrev"]: + return False, 'applies to earlier revisions' + + if "rev" in parm: + srcrev = d.getVar('SRCREV', True) + if srcrev and parm["rev"] not in srcrev: + return False, "doesn't apply to revision" + + if "notrev" in parm: + srcrev = d.getVar('SRCREV', True) + if srcrev and parm["notrev"] in srcrev: + return False, "doesn't apply to revision" + + return True, None + +should_apply[vardepsexclude] = "DATE SRCDATE" + +python patch_do_patch() { + import oe.patch + + patchsetmap = { + "patch": oe.patch.PatchTree, + "quilt": oe.patch.QuiltTree, + "git": oe.patch.GitApplyTree, + } + + cls = patchsetmap[d.getVar('PATCHTOOL', True) or 'quilt'] + + resolvermap = { + "noop": oe.patch.NOOPResolver, + "user": oe.patch.UserResolver, + } + + rcls = resolvermap[d.getVar('PATCHRESOLVE', True) or 'user'] + + classes = {} + + s = d.getVar('S', True) + + path = os.getenv('PATH') + os.putenv('PATH', d.getVar('PATH', True)) + + # We must use one TMPDIR per process so that the "patch" processes + # don't generate the same temp file name. + + import tempfile + process_tmpdir = tempfile.mkdtemp() + os.environ['TMPDIR'] = process_tmpdir + + for patch in src_patches(d): + _, _, local, _, _, parm = bb.fetch.decodeurl(patch) + + if "patchdir" in parm: + patchdir = parm["patchdir"] + if not os.path.isabs(patchdir): + patchdir = os.path.join(s, patchdir) + else: + patchdir = s + + if not patchdir in classes: + patchset = cls(patchdir, d) + resolver = rcls(patchset, oe_terminal) + classes[patchdir] = (patchset, resolver) + patchset.Clean() + else: + patchset, resolver = classes[patchdir] + + bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d))) + try: + patchset.Import({"file":local, "strippath": parm['striplevel']}, True) + except Exception as exc: + bb.utils.remove(process_tmpdir, True) + bb.fatal(str(exc)) + try: + resolver.Resolve() + except bb.BBHandledException as e: + bb.utils.remove(process_tmpdir, True) + bb.fatal(str(e)) + + bb.utils.remove(process_tmpdir, True) + del os.environ['TMPDIR'] +} +patch_do_patch[vardepsexclude] = "PATCHRESOLVE" + +addtask patch after do_unpack +do_patch[dirs] = "${WORKDIR}" +do_patch[depends] = "${PATCHDEPENDENCY}" + +EXPORT_FUNCTIONS do_patch diff --git a/meta/classes/perlnative.bbclass b/meta/classes/perlnative.bbclass new file mode 100644 index 0000000000..cc8de8b381 --- /dev/null +++ b/meta/classes/perlnative.bbclass @@ -0,0 +1,3 @@ +EXTRANATIVEPATH += "perl-native" +DEPENDS += "perl-native" +OECMAKE_PERLNATIVE_DIR = "${STAGING_BINDIR_NATIVE}/perl-native" diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass new file mode 100644 index 0000000000..b8d75bd38c --- /dev/null +++ b/meta/classes/pixbufcache.bbclass @@ -0,0 +1,72 @@ +# +# This class will generate the proper postinst/postrm scriptlets for pixbuf +# packages. +# + +DEPENDS += "qemu-native" +inherit qemu + +PIXBUF_PACKAGES ??= "${PN}" + +pixbufcache_common() { +if [ "x$D" != "x" ]; then + $INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \ + bindir=${bindir} base_libdir=${base_libdir} +else + + # Update the pixbuf loaders in case they haven't been registered yet + GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache + + if [ -x ${bindir}/gtk-update-icon-cache ] && [ -d ${datadir}/icons ]; then + for icondir in /usr/share/icons/*; do + if [ -d ${icondir} ]; then + gtk-update-icon-cache -t -q ${icondir} + fi + done + fi +fi +} + +python populate_packages_append() { + pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES', True).split() + + for pkg in pixbuf_pkgs: + bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg) + postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True) + if not postinst: + postinst = '#!/bin/sh\n' + postinst += d.getVar('pixbufcache_common', True) + d.setVar('pkg_postinst_%s' % pkg, postinst) + + postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True) + if not postrm: + postrm = '#!/bin/sh\n' + postrm += d.getVar('pixbufcache_common', True) + d.setVar('pkg_postrm_%s' % pkg, postrm) +} + +# +# Add an sstate postinst hook to update the cache for native packages. +# An error exit during populate_sysroot_setscene allows bitbake to +# try to recover by re-building the package. +# +SSTATEPOSTINSTFUNCS_append_class-native = " pixbufcache_sstate_postinst" + +pixbufcache_sstate_postinst() { + if [ "${BB_CURRENTTASK}" = "populate_sysroot" -o "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ] + then + GDK_PIXBUF_FATAL_LOADER=1 gdk-pixbuf-query-loaders --update-cache || exit 1 + fi +} + +# Add all of the dependencies of gdk-pixbuf as dependencies of +# do_populate_sysroot_setscene so that pixbufcache_sstate_postinst can work +# (otherwise gdk-pixbuf-query-loaders may not exist or link). Only add +# gdk-pixbuf-native if we're not building gdk-pixbuf itself. +# +# Packages that use this class should extend this variable with their runtime +# dependencies. +PIXBUFCACHE_SYSROOT_DEPS = "" +PIXBUFCACHE_SYSROOT_DEPS_class-native = "${@['gdk-pixbuf-native:do_populate_sysroot_setscene', '']['${BPN}' == 'gdk-pixbuf']} glib-2.0-native:do_populate_sysroot_setscene libffi-native:do_populate_sysroot_setscene libpng-native:do_populate_sysroot_setscene zlib-native:do_populate_sysroot_setscene" +do_populate_sysroot_setscene[depends] += "${PIXBUFCACHE_SYSROOT_DEPS}" +do_populate_sysroot[depends] += "${@d.getVar('PIXBUFCACHE_SYSROOT_DEPS', True).replace('_setscene','')}" diff --git a/meta/classes/pkgconfig.bbclass b/meta/classes/pkgconfig.bbclass new file mode 100644 index 0000000000..ad1f84f506 --- /dev/null +++ b/meta/classes/pkgconfig.bbclass @@ -0,0 +1,2 @@ +DEPENDS_prepend = "pkgconfig-native " + diff --git a/meta/classes/populate_sdk.bbclass b/meta/classes/populate_sdk.bbclass new file mode 100644 index 0000000000..f64a911b72 --- /dev/null +++ b/meta/classes/populate_sdk.bbclass @@ -0,0 +1,7 @@ +# The majority of populate_sdk is located in populate_sdk_base +# This chunk simply facilitates compatibility with SDK only recipes. + +inherit populate_sdk_base + +addtask populate_sdk after do_install before do_build + diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass new file mode 100644 index 0000000000..de72e32ed8 --- /dev/null +++ b/meta/classes/populate_sdk_base.bbclass @@ -0,0 +1,164 @@ +inherit meta toolchain-scripts + +# Wildcards specifying complementary packages to install for every package that has been explicitly +# installed into the rootfs +COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev' +COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev' +COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc' +COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg' +COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest' + +def complementary_globs(featurevar, d): + all_globs = d.getVarFlags('COMPLEMENTARY_GLOB') + globs = [] + features = set((d.getVar(featurevar, True) or '').split()) + for name, glob in all_globs.items(): + if name in features: + globs.append(glob) + return ' '.join(globs) + +SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs" +SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}' + +inherit rootfs_${IMAGE_PKGTYPE} + +SDK_DIR = "${WORKDIR}/sdk" +SDK_OUTPUT = "${SDK_DIR}/image" +SDK_DEPLOY = "${DEPLOY_DIR}/sdk" + +B_task-populate-sdk = "${SDK_DIR}" + +SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}" + +TOOLCHAIN_HOST_TASK ?= "nativesdk-packagegroup-sdk-host packagegroup-cross-canadian-${MACHINE}" +TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= "" +TOOLCHAIN_TARGET_TASK ?= " \ + ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} \ + ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target-dbg')} \ + " +TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= "" +TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}" + +SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}" +SDK_DEPENDS = "virtual/fakeroot-native sed-native" + +# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it +# could be set to the MACHINE_ARCH +REAL_MULTIMACH_TARGET_SYS = "${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}" + +PID = "${@os.getpid()}" + +EXCLUDE_FROM_WORLD = "1" + +SDK_PACKAGING_FUNC ?= "create_shar" +SDK_POST_INSTALL_COMMAND ?= "" + +SDK_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.manifest" +python write_target_sdk_manifest () { + from oe.sdk import sdk_list_installed_packages + sdkmanifestdir = os.path.dirname(d.getVar("SDK_MANIFEST", True)) + if not os.path.exists(sdkmanifestdir): + bb.utils.mkdirhier(sdkmanifestdir) + with open(d.getVar('SDK_MANIFEST', True), 'w') as output: + output.write(sdk_list_installed_packages(d, True, 'ver')) +} + +POPULATE_SDK_POST_TARGET_COMMAND_append = " write_target_sdk_manifest ; " + +fakeroot python do_populate_sdk() { + from oe.sdk import populate_sdk + from oe.manifest import create_manifest, Manifest + + pn = d.getVar('PN', True) + runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d) + runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d) + + ld = bb.data.createCopy(d) + ld.setVar("PKGDATA_DIR", "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}/pkgdata") + runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld) + runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld) + d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK", True)) + d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", True)) + + # create target/host SDK manifests + create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True), + manifest_type=Manifest.MANIFEST_TYPE_SDK_HOST) + create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True), + manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET) + + populate_sdk(d) + + # Process DEFAULTTUNE + bb.build.exec_func("create_sdk_files", d) + + bb.build.exec_func("tar_sdk", d) + + bb.build.exec_func(d.getVar("SDK_PACKAGING_FUNC", True), d) +} + +fakeroot create_sdk_files() { + cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/ + + # Replace the ##DEFAULT_INSTALL_DIR## with the correct pattern. + # Escape special characters like '+' and '.' in the SDKPATH + escaped_sdkpath=$(echo ${SDKPATH} |sed -e "s:[\+\.]:\\\\\\\\\0:g") + sed -i -e "s:##DEFAULT_INSTALL_DIR##:$escaped_sdkpath:" ${SDK_OUTPUT}/${SDKPATH}/relocate_sdk.py +} + +SDKTAROPTS = "--owner=root --group=root -j" + +fakeroot tar_sdk() { + # Package it up + mkdir -p ${SDK_DEPLOY} + cd ${SDK_OUTPUT}/${SDKPATH} + tar ${SDKTAROPTS} -c --file=${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 . +} + +fakeroot create_shar() { + # copy in the template shar extractor script + cp ${COREBASE}/meta/files/toolchain-shar-template.sh ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh + + cat << "EOF" > ${T}/post_install_command +${SDK_POST_INSTALL_COMMAND} +EOF + sed -i -e '/@SDK_POST_INSTALL_COMMAND@/r ${T}/post_install_command' ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh + + # substitute variables + sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \ + -e 's#@SDKPATH@#${SDKPATH}#g' \ + -e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \ + -e '/@SDK_POST_INSTALL_COMMAND@/d' \ + ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh + + # add execution permission + chmod +x ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh + + # append the SDK tarball + cat ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 >> ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh + + # delete the old tarball, we don't need it anymore + rm ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 +} + +populate_sdk_log_check() { + for target in $* + do + lf_path="`dirname ${BB_LOGFILE}`/log.do_$target.${PID}" + + echo "log_check: Using $lf_path as logfile" + + if test -e "$lf_path" + then + ${IMAGE_PKGTYPE}_log_check $target $lf_path + else + echo "Cannot find logfile [$lf_path]" + fi + echo "Logfile is clean" + done +} + +do_populate_sdk[dirs] = "${TOPDIR}" +do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS', True).split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}" +do_populate_sdk[rdepends] = "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_RDEPENDS', True).split()])}" +do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb" +addtask populate_sdk diff --git a/meta/classes/prexport.bbclass b/meta/classes/prexport.bbclass new file mode 100644 index 0000000000..5a1cb33c6a --- /dev/null +++ b/meta/classes/prexport.bbclass @@ -0,0 +1,58 @@ +PRSERV_DUMPOPT_VERSION = "${PRAUTOINX}" +PRSERV_DUMPOPT_PKGARCH = "" +PRSERV_DUMPOPT_CHECKSUM = "" +PRSERV_DUMPOPT_COL = "0" + +PRSERV_DUMPDIR ??= "${LOG_DIR}/db" +PRSERV_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv.inc" + +python prexport_handler () { + import bb.event + if not e.data: + return + + if isinstance(e, bb.event.RecipeParsed): + import oe.prservice + #get all PR values for the current PRAUTOINX + ver = e.data.getVar('PRSERV_DUMPOPT_VERSION', True) + ver = ver.replace('%','-') + retval = oe.prservice.prserv_dump_db(e.data) + if not retval: + bb.fatal("prexport_handler: export failed!") + (metainfo, datainfo) = retval + if not datainfo: + bb.warn("prexport_handler: No AUTOPR values found for %s" % ver) + return + oe.prservice.prserv_export_tofile(e.data, None, datainfo, False) + if 'AUTOINC' in ver: + import re + srcpv = bb.fetch2.get_srcrev(e.data) + base_ver = "AUTOINC-%s" % ver[:ver.find(srcpv)] + e.data.setVar('PRSERV_DUMPOPT_VERSION', base_ver) + retval = oe.prservice.prserv_dump_db(e.data) + if not retval: + bb.fatal("prexport_handler: export failed!") + (metainfo, datainfo) = retval + oe.prservice.prserv_export_tofile(e.data, None, datainfo, False) + elif isinstance(e, bb.event.ParseStarted): + import bb.utils + import oe.prservice + oe.prservice.prserv_check_avail(e.data) + #remove dumpfile + bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE', True)) + elif isinstance(e, bb.event.ParseCompleted): + import oe.prservice + #dump meta info of tables + d = e.data.createCopy() + d.setVar('PRSERV_DUMPOPT_COL', "1") + retval = oe.prservice.prserv_dump_db(d) + if not retval: + bb.error("prexport_handler: export failed!") + return + (metainfo, datainfo) = retval + oe.prservice.prserv_export_tofile(d, metainfo, None, True) + +} + +addhandler prexport_handler +prexport_handler[eventmask] = "bb.event.RecipeParsed bb.event.ParseStarted bb.event.ParseCompleted" diff --git a/meta/classes/primport.bbclass b/meta/classes/primport.bbclass new file mode 100644 index 0000000000..8ed45f03f0 --- /dev/null +++ b/meta/classes/primport.bbclass @@ -0,0 +1,21 @@ +python primport_handler () { + import bb.event + if not e.data: + return + + if isinstance(e, bb.event.ParseCompleted): + import oe.prservice + #import all exported AUTOPR values + imported = oe.prservice.prserv_import_db(e.data) + if imported is None: + bb.fatal("import failed!") + + for (version, pkgarch, checksum, value) in imported: + bb.note("imported (%s,%s,%s,%d)" % (version, pkgarch, checksum, value)) + elif isinstance(e, bb.event.ParseStarted): + import oe.prservice + oe.prservice.prserv_check_avail(e.data) +} + +addhandler primport_handler +primport_handler[eventmask] = "bb.event.ParseCompleted bb.event.ParseStarted" diff --git a/meta/classes/prserv.bbclass b/meta/classes/prserv.bbclass new file mode 100644 index 0000000000..139597f9cb --- /dev/null +++ b/meta/classes/prserv.bbclass @@ -0,0 +1,2 @@ + + diff --git a/meta/classes/ptest-gnome.bbclass b/meta/classes/ptest-gnome.bbclass new file mode 100644 index 0000000000..b2949af9bb --- /dev/null +++ b/meta/classes/ptest-gnome.bbclass @@ -0,0 +1,8 @@ +inherit ptest + +EXTRA_OECONF_append_class-target = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}" + +FILES_${PN}-ptest += "${libexecdir}/installed-tests/ \ + ${datadir}/installed-tests/" + +RDEPENDS_${PN}-ptest += "gnome-desktop-testing" diff --git a/meta/classes/ptest.bbclass b/meta/classes/ptest.bbclass new file mode 100644 index 0000000000..4e6f075efe --- /dev/null +++ b/meta/classes/ptest.bbclass @@ -0,0 +1,62 @@ +SUMMARY_${PN}-ptest ?= "${SUMMARY} - Package test files" +DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \ +This package contains a test directory ${PTEST_PATH} for package test purposes." + +PTEST_PATH ?= "${libdir}/${PN}/ptest" +FILES_${PN}-ptest = "${PTEST_PATH}" +SECTION_${PN}-ptest = "devel" +ALLOW_EMPTY_${PN}-ptest = "1" +PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}" +PTEST_ENABLED_class-native = "" +PTEST_ENABLED_class-nativesdk = "" +PTEST_ENABLED_class-cross-canadian = "" +RDEPENDS_${PN}-ptest_class-native = "" +RDEPENDS_${PN}-ptest_class-nativesdk = "" + +PACKAGES =+ "${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}" + +do_configure_ptest() { + : +} + +do_configure_ptest_base() { + do_configure_ptest +} + +do_compile_ptest() { + : +} + +do_compile_ptest_base() { + do_compile_ptest +} + +do_install_ptest() { + : +} + +do_install_ptest_base() { + if [ -f ${WORKDIR}/run-ptest ]; then + install -D ${WORKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest + if grep -q install-ptest: Makefile; then + oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest + fi + do_install_ptest + fi +} + +do_install_ptest_base[cleandirs] = "${D}${PTEST_PATH}" + +addtask configure_ptest_base after do_configure before do_compile +addtask compile_ptest_base after do_compile before do_install +addtask install_ptest_base after do_install before do_package do_populate_sysroot + +python () { + if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d): + d.setVarFlag('do_install_ptest_base', 'fakeroot', 1) + + # Remove all '*ptest_base' tasks when ptest is not enabled + if not(d.getVar('PTEST_ENABLED', True) == "1"): + for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']: + bb.build.deltask(i, d) +} diff --git a/meta/classes/python-dir.bbclass b/meta/classes/python-dir.bbclass new file mode 100644 index 0000000000..ebfa4b30f6 --- /dev/null +++ b/meta/classes/python-dir.bbclass @@ -0,0 +1,5 @@ +PYTHON_BASEVERSION ?= "2.7" +PYTHON_ABI ?= "" +PYTHON_DIR = "python${PYTHON_BASEVERSION}" +PYTHON_PN = "python${@'' if '${PYTHON_BASEVERSION}'.startswith('2') else '3'}" +PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages" diff --git a/meta/classes/python3native.bbclass b/meta/classes/python3native.bbclass new file mode 100644 index 0000000000..f86374fd33 --- /dev/null +++ b/meta/classes/python3native.bbclass @@ -0,0 +1,7 @@ +PYTHON_BASEVERSION = "3.3" + +inherit python-dir + +PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}" +EXTRANATIVEPATH += "${PYTHON_PN}-native" +DEPENDS += " ${PYTHON_PN}-native " diff --git a/meta/classes/pythonnative.bbclass b/meta/classes/pythonnative.bbclass new file mode 100644 index 0000000000..fdd22bbc86 --- /dev/null +++ b/meta/classes/pythonnative.bbclass @@ -0,0 +1,6 @@ + +inherit python-dir + +PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}" +EXTRANATIVEPATH += "${PYTHON_PN}-native" +DEPENDS += " ${PYTHON_PN}-native " diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass new file mode 100644 index 0000000000..b2cf85d628 --- /dev/null +++ b/meta/classes/qemu.bbclass @@ -0,0 +1,48 @@ +# +# This class contains functions for recipes that need QEMU or test for its +# existence. +# + +def qemu_target_binary(data): + target_arch = data.getVar("TARGET_ARCH", True) + if target_arch in ("i486", "i586", "i686"): + target_arch = "i386" + elif target_arch == "powerpc": + target_arch = "ppc" + elif target_arch == "powerpc64": + target_arch = "ppc64" + + return "qemu-" + target_arch +# +# Next function will return a string containing the command that is needed to +# to run a certain binary through qemu. For example, in order to make a certain +# postinstall scriptlet run at do_rootfs time and running the postinstall is +# architecture dependent, we can run it through qemu. For example, in the +# postinstall scriptlet, we could use the following: +# +# ${@qemu_run_binary(d, '$D', '/usr/bin/test_app')} [test_app arguments] +# +def qemu_run_binary(data, rootfs_path, binary): + qemu_binary = qemu_target_binary(data) + if qemu_binary == "qemu-allarch": + qemu_binary = "qemuwrapper" + + libdir = rootfs_path + data.getVar("libdir", False) + base_libdir = rootfs_path + data.getVar("base_libdir", False) + oldest_kernel = data.getVar("OLDEST_KERNEL", True) + + return "PSEUDO_UNLOAD=1 " + qemu_binary + " -r " + oldest_kernel + " -L " + rootfs_path\ + + " -E LD_LIBRARY_PATH=" + libdir + ":" + base_libdir + " "\ + + rootfs_path + binary + +QEMU_OPTIONS = "-r ${OLDEST_KERNEL}" +QEMU_OPTIONS_append_iwmmxt = " -cpu pxa270-c5" +QEMU_OPTIONS_append_armv6 = " -cpu arm1136" +QEMU_OPTIONS_append_armv7a = " -cpu cortex-a8" +QEMU_OPTIONS_append_e500v2 = " -cpu e500v2" +QEMU_OPTIONS_append_e500mc = " -cpu e500mc" +QEMU_OPTIONS_append_e5500 = " -cpu e5500" +QEMU_OPTIONS_append_e5500-64b = " -cpu e5500" +QEMU_OPTIONS_append_e6500 = " -cpu e6500" +QEMU_OPTIONS_append_e6500-64b = " -cpu e6500" +QEMU_OPTIONS_append_ppc7400 = " -cpu 7400" diff --git a/meta/classes/qmake2.bbclass b/meta/classes/qmake2.bbclass new file mode 100644 index 0000000000..6e73ad2d1e --- /dev/null +++ b/meta/classes/qmake2.bbclass @@ -0,0 +1,27 @@ +# +# QMake variables for Qt4 +# +inherit qmake_base + +DEPENDS_prepend = "qt4-tools-native " + +export QMAKESPEC = "${STAGING_DATADIR}/qt4/mkspecs/${TARGET_OS}-oe-g++" +export OE_QMAKE_QT_CONFIG = "${STAGING_DATADIR}/qt4/mkspecs/qconfig.pri" +export OE_QMAKE_UIC = "${STAGING_BINDIR_NATIVE}/uic4" +export OE_QMAKE_UIC3 = "${STAGING_BINDIR_NATIVE}/uic34" +export OE_QMAKE_MOC = "${STAGING_BINDIR_NATIVE}/moc4" +export OE_QMAKE_RCC = "${STAGING_BINDIR_NATIVE}/rcc4" +export OE_QMAKE_QDBUSCPP2XML = "${STAGING_BINDIR_NATIVE}/qdbuscpp2xml4" +export OE_QMAKE_QDBUSXML2CPP = "${STAGING_BINDIR_NATIVE}/qdbusxml2cpp4" +export OE_QMAKE_QMAKE = "${STAGING_BINDIR_NATIVE}/qmake2" +export OE_QMAKE_LINK = "${CXX}" +export OE_QMAKE_CXXFLAGS = "${CXXFLAGS}" +export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/qt4" +export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}" +export OE_QMAKE_LIBS_QT = "qt" +export OE_QMAKE_LIBS_X11 = "-lXext -lX11 -lm" +export OE_QMAKE_LIBS_X11SM = "-lSM -lICE" +export OE_QMAKE_LCONVERT = "${STAGING_BINDIR_NATIVE}/lconvert4" +export OE_QMAKE_LRELEASE = "${STAGING_BINDIR_NATIVE}/lrelease4" +export OE_QMAKE_LUPDATE = "${STAGING_BINDIR_NATIVE}/lupdate4" +export OE_QMAKE_XMLPATTERNS = "${STAGING_BINDIR_NATIVE}/xmlpatterns4" diff --git a/meta/classes/qmake_base.bbclass b/meta/classes/qmake_base.bbclass new file mode 100644 index 0000000000..86bbede260 --- /dev/null +++ b/meta/classes/qmake_base.bbclass @@ -0,0 +1,119 @@ +QMAKE_MKSPEC_PATH ?= "${STAGING_DATADIR_NATIVE}/qmake" + +OE_QMAKE_PLATFORM = "${TARGET_OS}-oe-g++" +QMAKESPEC := "${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}" + +# We override this completely to eliminate the -e normally passed in +EXTRA_OEMAKE = "" + +export OE_QMAKE_CC="${CC}" +export OE_QMAKE_CFLAGS="${CFLAGS}" +export OE_QMAKE_CXX="${CXX}" +export OE_QMAKE_LDFLAGS="${LDFLAGS}" +export OE_QMAKE_AR="${AR}" +export OE_QMAKE_STRIP="echo" +export OE_QMAKE_RPATH="-Wl,-rpath-link," + +# default to qte2 via bb.conf, inherit qt3x11 to configure for qt3x11 + +oe_qmake_mkspecs () { + mkdir -p mkspecs/${OE_QMAKE_PLATFORM} + for f in ${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}/*; do + if [ -L $f ]; then + lnk=`readlink $f` + if [ -f mkspecs/${OE_QMAKE_PLATFORM}/$lnk ]; then + ln -s $lnk mkspecs/${OE_QMAKE_PLATFORM}/`basename $f` + else + cp $f mkspecs/${OE_QMAKE_PLATFORM}/ + fi + else + cp $f mkspecs/${OE_QMAKE_PLATFORM}/ + fi + done +} + +do_generate_qt_config_file() { + export QT_CONF_PATH=${WORKDIR}/qt.conf + cat > ${WORKDIR}/qt.conf < +# +# Licensed under the MIT license, see COPYING.MIT for details + +ERR_REPORT_DIR ?= "${LOG_DIR}/error-report" + +def errorreport_getdata(e): + logpath = e.data.getVar('ERR_REPORT_DIR', True) + datafile = os.path.join(logpath, "error-report.txt") + with open(datafile) as f: + data = f.read() + return data + +def errorreport_savedata(e, newdata, file): + import json + logpath = e.data.getVar('ERR_REPORT_DIR', True) + bb.utils.mkdirhier(logpath) + datafile = os.path.join(logpath, file) + with open(datafile, "w") as f: + json.dump(newdata, f, indent=4, sort_keys=True) + return datafile + +python errorreport_handler () { + import json + + if isinstance(e, bb.event.BuildStarted): + data = {} + machine = e.data.getVar("MACHINE") + data['machine'] = machine + data['build_sys'] = e.data.getVar("BUILD_SYS", True) + data['nativelsb'] = e.data.getVar("NATIVELSBSTRING") + data['distro'] = e.data.getVar("DISTRO") + data['target_sys'] = e.data.getVar("TARGET_SYS", True) + data['failures'] = [] + data['component'] = e.getPkgs()[0] + data['branch_commit'] = base_detect_branch(e.data) + ": " + base_detect_revision(e.data) + errorreport_savedata(e, data, "error-report.txt") + + elif isinstance(e, bb.build.TaskFailed): + task = e.task + taskdata={} + log = e.data.getVar('BB_LOGFILE', True) + taskdata['package'] = e.data.expand("${PF}") + taskdata['task'] = task + if log: + logFile = open(log, 'r') + taskdata['log'] = logFile.read() + logFile.close() + else: + taskdata['log'] = "No Log" + jsondata = json.loads(errorreport_getdata(e)) + jsondata['failures'].append(taskdata) + errorreport_savedata(e, jsondata, "error-report.txt") + + elif isinstance(e, bb.event.BuildCompleted): + jsondata = json.loads(errorreport_getdata(e)) + failures = jsondata['failures'] + if(len(failures) > 0): + filename = "error_report_" + e.data.getVar("BUILDNAME")+".txt" + datafile = errorreport_savedata(e, jsondata, filename) + bb.note("The errors for this build are stored in %s\nYou can send the errors to an upstream server by running:\n send-error-report %s [server]" % (datafile, datafile)) + bb.note("The contents of these logs will be posted in public if you use the above command with the default server. If you need to do so, please ensure you remove any identifying or proprietary information before sending.") +} + +addhandler errorreport_handler +errorreport_handler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskFailed" diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass new file mode 100644 index 0000000000..e68d02a783 --- /dev/null +++ b/meta/classes/rm_work.bbclass @@ -0,0 +1,120 @@ +# +# Removes source after build +# +# To use it add that line to conf/local.conf: +# +# INHERIT += "rm_work" +# +# To inhibit rm_work for some recipes, specify them in RM_WORK_EXCLUDE. +# For example, in conf/local.conf: +# +# RM_WORK_EXCLUDE += "icu-native icu busybox" +# + +# Use the completion scheduler by default when rm_work is active +# to try and reduce disk usage +BB_SCHEDULER ?= "completion" + +RMWORK_ORIG_TASK := "${BB_DEFAULT_TASK}" +BB_DEFAULT_TASK = "rm_work_all" + +do_rm_work () { + # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe. + for p in ${RM_WORK_EXCLUDE}; do + if [ "$p" = "${PN}" ]; then + bbnote "rm_work: Skipping ${PN} since it is in RM_WORK_EXCLUDE" + exit 0 + fi + done + + cd ${WORKDIR} + for dir in * + do + # Retain only logs and other files in temp, safely ignore + # failures of removing pseudo folers on NFS2/3 server. + if [ $dir = 'pseudo' ]; then + rm -rf $dir 2> /dev/null || true + elif [ $dir != 'temp' ]; then + rm -rf $dir + fi + done + + # Need to add pseudo back or subsqeuent work in this workdir + # might fail since setscene may not rerun to recreate it + mkdir -p ${WORKDIR}/pseudo/ + + # Change normal stamps into setscene stamps as they better reflect the + # fact WORKDIR is now empty + # Also leave noexec stamps since setscene stamps don't cover them + cd `dirname ${STAMP}` + for i in `basename ${STAMP}`* + do + for j in ${SSTATETASKS} + do + case $i in + *do_setscene*) + break + ;; + *sigdata*) + i=dummy + break + ;; + *do_package_write*) + i=dummy + break + ;; + *do_build*) + i=dummy + break + ;; + # We remove do_package entirely, including any + # sstate version since otherwise we'd need to leave 'plaindirs' around + # such as 'packages' and 'packages-split' and these can be large. No end + # of chain tasks depend directly on do_package anymore. + *do_package|*do_package.*|*do_package_setscene.*) + rm -f $i; + i=dummy + break + ;; + *_setscene*) + i=dummy + break + ;; + *$j|*$j.*) + mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"` + i=dummy + break + ;; + esac + done + rm -f $i + done +} +addtask rm_work after do_${RMWORK_ORIG_TASK} + +do_rm_work_all () { + : +} +do_rm_work_all[recrdeptask] = "do_rm_work" +addtask rm_work_all after do_rm_work + +do_populate_sdk[postfuncs] += "rm_work_populatesdk" +rm_work_populatesdk () { + : +} +rm_work_populatesdk[cleandirs] = "${WORKDIR}/sdk" + +do_rootfs[postfuncs] += "rm_work_rootfs" +rm_work_rootfs () { + : +} +rm_work_rootfs[cleandirs] = "${WORKDIR}/rootfs" + +python () { + # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe. + excludes = (d.getVar("RM_WORK_EXCLUDE", True) or "").split() + pn = d.getVar("PN", True) + if pn in excludes: + d.delVarFlag('rm_work_rootfs', 'cleandirs') + d.delVarFlag('rm_work_populatesdk', 'cleandirs') +} diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass new file mode 100644 index 0000000000..d51b4582d2 --- /dev/null +++ b/meta/classes/rootfs_deb.bbclass @@ -0,0 +1,39 @@ +# +# Copyright 2006-2007 Openedhand Ltd. +# + +ROOTFS_PKGMANAGE = "dpkg apt" +ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts" + +do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot" +do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot" +do_rootfs[recrdeptask] += "do_package_write_deb" +rootfs_deb_do_rootfs[vardepsexclude] += "BUILDNAME" +do_rootfs[vardeps] += "PACKAGE_FEED_URIS" + +do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock" +do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock" + +python rootfs_deb_bad_recommendations() { + if d.getVar("BAD_RECOMMENDATIONS", True): + bb.warn("Debian package install does not support BAD_RECOMMENDATIONS") +} +do_rootfs[prefuncs] += "rootfs_deb_bad_recommendations" + +DEB_POSTPROCESS_COMMANDS = "" + +opkglibdir = "${localstatedir}/lib/opkg" + +python () { + # Map TARGET_ARCH to Debian's ideas about architectures + darch = d.getVar('SDK_ARCH', True) + if darch in ["x86", "i486", "i586", "i686", "pentium"]: + d.setVar('DEB_SDK_ARCH', 'i386') + elif darch == "x86_64": + d.setVar('DEB_SDK_ARCH', 'amd64') + elif darch == "arm": + d.setVar('DEB_SDK_ARCH', 'armel') +} + +# This will of course only work after rootfs_deb_do_rootfs or populate_sdk_deb has been called +DPKG_QUERY_COMMAND = "${STAGING_BINDIR_NATIVE}/dpkg-query --admindir=$INSTALL_ROOTFS_DEB/var/lib/dpkg" diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass new file mode 100644 index 0000000000..6139cc7d59 --- /dev/null +++ b/meta/classes/rootfs_ipk.bbclass @@ -0,0 +1,39 @@ +# +# Creates a root filesystem out of IPKs +# +# This rootfs can be mounted via root-nfs or it can be put into an cramfs/jffs etc. +# See image.bbclass for a usage of this. +# + +EXTRAOPKGCONFIG ?= "" +ROOTFS_PKGMANAGE = "opkg opkg-collateral ${EXTRAOPKGCONFIG}" +ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts" + +do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot" +do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot" +do_rootfs[recrdeptask] += "do_package_write_ipk" +do_rootfs[vardeps] += "PACKAGE_FEED_URIS" +rootfs_ipk_do_rootfs[vardepsexclude] += "BUILDNAME" + +do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock" +do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock" + +OPKG_PREPROCESS_COMMANDS = "" + +OPKG_POSTPROCESS_COMMANDS = "" + +OPKGLIBDIR = "${localstatedir}/lib" + +MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg" + +python () { + + if d.getVar('BUILD_IMAGES_FROM_FEEDS', True): + flags = d.getVarFlag('do_rootfs', 'recrdeptask') + flags = flags.replace("do_package_write_ipk", "") + flags = flags.replace("do_deploy", "") + flags = flags.replace("do_populate_sysroot", "") + d.setVarFlag('do_rootfs', 'recrdeptask', flags) + d.setVar('OPKG_PREPROCESS_COMMANDS', "") + d.setVar('OPKG_POSTPROCESS_COMMANDS', '') +} diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass new file mode 100644 index 0000000000..d85d001a62 --- /dev/null +++ b/meta/classes/rootfs_rpm.bbclass @@ -0,0 +1,47 @@ +# +# Creates a root filesystem out of rpm packages +# + +ROOTFS_PKGMANAGE = "rpm smartpm" +ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts" + +# Add 50Meg of extra space for Smart +IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "smartpm", " + 51200", "" ,d)}" + +# Smart is python based, so be sure python-native is available to us. +EXTRANATIVEPATH += "python-native" + +# opkg is needed for update-alternatives +RPMROOTFSDEPENDS = "rpm-native:do_populate_sysroot \ + rpmresolve-native:do_populate_sysroot \ + python-smartpm-native:do_populate_sysroot \ + createrepo-native:do_populate_sysroot \ + opkg-native:do_populate_sysroot" + +do_rootfs[depends] += "${RPMROOTFSDEPENDS}" +do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}" + +do_rootfs[recrdeptask] += "do_package_write_rpm" +rootfs_rpm_do_rootfs[vardepsexclude] += "BUILDNAME" +do_rootfs[vardeps] += "PACKAGE_FEED_URIS" + +# RPM doesn't work with multiple rootfs generation at once due to collisions in the use of files +# in ${DEPLOY_DIR_RPM}. This can be removed if package_update_index_rpm can be called concurrently +do_rootfs[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock" +do_populate_sdk[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock" + +python () { + if d.getVar('BUILD_IMAGES_FROM_FEEDS', True): + flags = d.getVarFlag('do_rootfs', 'recrdeptask') + flags = flags.replace("do_package_write_rpm", "") + flags = flags.replace("do_deploy", "") + flags = flags.replace("do_populate_sysroot", "") + d.setVarFlag('do_rootfs', 'recrdeptask', flags) + d.setVar('RPM_PREPROCESS_COMMANDS', '') + d.setVar('RPM_POSTPROCESS_COMMANDS', '') + +} +# Smart is python based, so be sure python-native is available to us. +EXTRANATIVEPATH += "python-native" + +rpmlibdir = "/var/lib/rpm" diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass new file mode 100644 index 0000000000..5be5efb8a4 --- /dev/null +++ b/meta/classes/sanity.bbclass @@ -0,0 +1,887 @@ +# +# Sanity check the users setup for common misconfigurations +# + +SANITY_REQUIRED_UTILITIES ?= "patch diffstat makeinfo git bzip2 tar \ + gzip gawk chrpath wget cpio perl" + +def bblayers_conf_file(d): + return os.path.join(d.getVar('TOPDIR', True), 'conf/bblayers.conf') + +def sanity_conf_read(fn): + with open(fn, 'r') as f: + lines = f.readlines() + return lines + +def sanity_conf_find_line(pattern, lines): + import re + return next(((index, line) + for index, line in enumerate(lines) + if re.search(pattern, line)), (None, None)) + +def sanity_conf_update(fn, lines, version_var_name, new_version): + index, line = sanity_conf_find_line(version_var_name, lines) + lines[index] = '%s = "%d"\n' % (version_var_name, new_version) + with open(fn, "w") as f: + f.write(''.join(lines)) + +# Functions added to this variable MUST throw an exception (or sys.exit()) unless they +# successfully changed LCONF_VERSION in bblayers.conf +BBLAYERS_CONF_UPDATE_FUNCS += "oecore_update_bblayers" + +python oecore_update_bblayers() { + # bblayers.conf is out of date, so see if we can resolve that + + current_lconf = int(d.getVar('LCONF_VERSION', True)) + if not current_lconf: + sys.exit() + lconf_version = int(d.getVar('LAYER_CONF_VERSION', True)) + lines = [] + + if current_lconf < 4: + sys.exit() + + bblayers_fn = bblayers_conf_file(d) + lines = sanity_conf_read(bblayers_fn) + + if current_lconf == 4 and lconf_version > 4: + topdir_var = '$' + '{TOPDIR}' + index, bbpath_line = sanity_conf_find_line('BBPATH', lines) + if bbpath_line: + start = bbpath_line.find('"') + if start != -1 and (len(bbpath_line) != (start + 1)): + if bbpath_line[start + 1] == '"': + lines[index] = (bbpath_line[:start + 1] + + topdir_var + bbpath_line[start + 1:]) + else: + if not topdir_var in bbpath_line: + lines[index] = (bbpath_line[:start + 1] + + topdir_var + ':' + bbpath_line[start + 1:]) + else: + sys.exit() + else: + index, bbfiles_line = sanity_conf_find_line('BBFILES', lines) + if bbfiles_line: + lines.insert(index, 'BBPATH = "' + topdir_var + '"\n') + else: + sys.exit() + + current_lconf += 1 + sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf) + return + + sys.exit() +} + +def raise_sanity_error(msg, d, network_error=False): + if d.getVar("SANITY_USE_EVENTS", True) == "1": + try: + bb.event.fire(bb.event.SanityCheckFailed(msg, network_error), d) + except TypeError: + bb.event.fire(bb.event.SanityCheckFailed(msg), d) + return + + bb.fatal(""" OE-core's config sanity checker detected a potential misconfiguration. + Either fix the cause of this error or at your own risk disable the checker (see sanity.conf). + Following is the list of potential problems / advisories: + + %s""" % msg) + +# Check flags associated with a tuning. +def check_toolchain_tune_args(data, tune, multilib, errs): + found_errors = False + if check_toolchain_args_present(data, tune, multilib, errs, 'CCARGS'): + found_errors = True + if check_toolchain_args_present(data, tune, multilib, errs, 'ASARGS'): + found_errors = True + if check_toolchain_args_present(data, tune, multilib, errs, 'LDARGS'): + found_errors = True + + return found_errors + +def check_toolchain_args_present(data, tune, multilib, tune_errors, which): + args_set = (data.getVar("TUNE_%s" % which, True) or "").split() + args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune), True) or "").split() + args_missing = [] + + # If no args are listed/required, we are done. + if not args_wanted: + return + for arg in args_wanted: + if arg not in args_set: + args_missing.append(arg) + + found_errors = False + if args_missing: + found_errors = True + tune_errors.append("TUNEABI for %s requires '%s' in TUNE_%s (%s)." % + (tune, ' '.join(args_missing), which, ' '.join(args_set))) + return found_errors + +# Check a single tune for validity. +def check_toolchain_tune(data, tune, multilib): + tune_errors = [] + if not tune: + return "No tuning found for %s multilib." % multilib + localdata = bb.data.createCopy(data) + if multilib != "default": + # Apply the overrides so we can look at the details. + overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib + localdata.setVar("OVERRIDES", overrides) + bb.data.update_data(localdata) + bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib)) + features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune, True) or "").split() + if not features: + return "Tuning '%s' has no defined features, and cannot be used." % tune + valid_tunes = localdata.getVarFlags('TUNEVALID') or {} + conflicts = localdata.getVarFlags('TUNECONFLICTS') or {} + # [doc] is the documentation for the variable, not a real feature + if 'doc' in valid_tunes: + del valid_tunes['doc'] + if 'doc' in conflicts: + del conflicts['doc'] + for feature in features: + if feature in conflicts: + for conflict in conflicts[feature].split(): + if conflict in features: + tune_errors.append("Feature '%s' conflicts with '%s'." % + (feature, conflict)) + if feature in valid_tunes: + bb.debug(2, " %s: %s" % (feature, valid_tunes[feature])) + else: + tune_errors.append("Feature '%s' is not defined." % feature) + whitelist = localdata.getVar("TUNEABI_WHITELIST", True) + if whitelist: + tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune, True) + if not tuneabi: + tuneabi = tune + if True not in [x in whitelist.split() for x in tuneabi.split()]: + tune_errors.append("Tuning '%s' (%s) cannot be used with any supported tuning/ABI." % + (tune, tuneabi)) + else: + if not check_toolchain_tune_args(localdata, tuneabi, multilib, tune_errors): + bb.debug(2, "Sanity check: Compiler args OK for %s." % tune) + if tune_errors: + return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors) + +def check_toolchain(data): + tune_error_set = [] + deftune = data.getVar("DEFAULTTUNE", True) + tune_errors = check_toolchain_tune(data, deftune, 'default') + if tune_errors: + tune_error_set.append(tune_errors) + + multilibs = (data.getVar("MULTILIB_VARIANTS", True) or "").split() + global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS", True) or "").split() + + if multilibs: + seen_libs = [] + seen_tunes = [] + for lib in multilibs: + if lib in seen_libs: + tune_error_set.append("The multilib '%s' appears more than once." % lib) + else: + seen_libs.append(lib) + if not lib in global_multilibs: + tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib) + tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib, True) + if tune in seen_tunes: + tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune) + else: + seen_libs.append(tune) + if tune == deftune: + tune_error_set.append("Multilib '%s' (%s) is also the default tuning." % (lib, deftune)) + else: + tune_errors = check_toolchain_tune(data, tune, lib) + if tune_errors: + tune_error_set.append(tune_errors) + if tune_error_set: + return "Toolchain tunings invalid:\n" + '\n'.join(tune_error_set) + "\n" + + return "" + +def check_conf_exists(fn, data): + bbpath = [] + fn = data.expand(fn) + vbbpath = data.getVar("BBPATH") + if vbbpath: + bbpath += vbbpath.split(":") + for p in bbpath: + currname = os.path.join(data.expand(p), fn) + if os.access(currname, os.R_OK): + return True + return False + +def check_create_long_filename(filepath, pathname): + import string, random + testfile = os.path.join(filepath, ''.join(random.choice(string.ascii_letters) for x in range(200))) + try: + if not os.path.exists(filepath): + bb.utils.mkdirhier(filepath) + f = open(testfile, "w") + f.close() + os.remove(testfile) + except IOError as e: + import errno + err, strerror = e.args + if err == errno.ENAMETOOLONG: + return "Failed to create a file with a long name in %s. Please use a filesystem that does not unreasonably limit filename length.\n" % pathname + else: + return "Failed to create a file in %s: %s.\n" % (pathname, strerror) + except OSError as e: + errno, strerror = e.args + return "Failed to create %s directory in which to run long name sanity check: %s.\n" % (pathname, strerror) + return "" + +def check_path_length(filepath, pathname, limit): + if len(filepath) > limit: + return "The length of %s is longer than 410, this would cause unexpected errors, please use a shorter path.\n" % pathname + return "" + +def get_filesystem_id(path): + status, result = oe.utils.getstatusoutput("stat -f -c '%s' %s" % ("%t", path)) + if status == 0: + return result + else: + bb.warn("Can't get the filesystem id of: %s" % path) + return None + +# Check that the path isn't located on nfs. +def check_not_nfs(path, name): + # The nfs' filesystem id is 6969 + if get_filesystem_id(path) == "6969": + return "The %s: %s can't be located on nfs.\n" % (name, path) + return "" + +def check_connectivity(d): + # URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable + # using the same syntax as for SRC_URI. If the variable is not set + # the check is skipped + test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS', True) or "").split() + retval = "" + + # Only check connectivity if network enabled and the + # CONNECTIVITY_CHECK_URIS are set + network_enabled = not d.getVar('BB_NO_NETWORK', True) + check_enabled = len(test_uris) + # Take a copy of the data store and unset MIRRORS and PREMIRRORS + data = bb.data.createCopy(d) + data.delVar('PREMIRRORS') + data.delVar('MIRRORS') + if check_enabled and network_enabled: + try: + fetcher = bb.fetch2.Fetch(test_uris, data) + fetcher.checkstatus() + except Exception: + # Allow the message to be configured so that users can be + # pointed to a support mechanism. + msg = data.getVar('CONNECTIVITY_CHECK_MSG', True) or "" + if len(msg) == 0: + msg = "Failed to fetch test data from the network. Please ensure your network is configured correctly.\n" + retval = msg + + return retval + +def check_supported_distro(sanity_data): + from fnmatch import fnmatch + + tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS', True) + if not tested_distros: + return + + try: + distro = oe.lsb.distro_identifier() + except Exception: + distro = None + + if not distro: + bb.warn('Host distribution could not be determined; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.') + + for supported in [x.strip() for x in tested_distros.split('\\n')]: + if fnmatch(distro, supported): + return + + bb.warn('Host distribution "%s" has not been validated with this version of the build system; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.' % distro) + +# Checks we should only make if MACHINE is set correctly +def check_sanity_validmachine(sanity_data): + messages = "" + + # Check TUNE_ARCH is set + if sanity_data.getVar('TUNE_ARCH', True) == 'INVALID': + messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n' + + # Check TARGET_OS is set + if sanity_data.getVar('TARGET_OS', True) == 'INVALID': + messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n' + + # Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS + pkgarchs = sanity_data.getVar('PACKAGE_ARCHS', True) + tunepkg = sanity_data.getVar('TUNE_PKGARCH', True) + tunefound = False + seen = {} + dups = [] + + for pa in pkgarchs.split(): + if seen.get(pa, 0) == 1: + dups.append(pa) + else: + seen[pa] = 1 + if pa == tunepkg: + tunefound = True + + if len(dups): + messages = messages + "Error, the PACKAGE_ARCHS variable contains duplicates. The following archs are listed more than once: %s" % " ".join(dups) + + if tunefound == False: + messages = messages + "Error, the PACKAGE_ARCHS variable does not contain TUNE_PKGARCH (%s)." % tunepkg + + return messages + +# Checks if necessary to add option march to host gcc +def check_gcc_march(sanity_data): + result = True + message = "" + + # Check if -march not in BUILD_CFLAGS + if sanity_data.getVar("BUILD_CFLAGS",True).find("-march") < 0: + result = False + + # Construct a test file + f = open("gcc_test.c", "w") + f.write("int main (){ volatile int atomic = 2; __sync_bool_compare_and_swap (&atomic, 2, 3); return 0; }\n") + f.close() + + # Check if GCC could work without march + if not result: + status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc gcc_test.c -o gcc_test") + if status == 0: + result = True; + + if not result: + status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc -march=native gcc_test.c -o gcc_test") + if status == 0: + message = "BUILD_CFLAGS_append = \" -march=native\"" + result = True; + + if not result: + build_arch = sanity_data.getVar('BUILD_ARCH', True) + status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc -march=%s gcc_test.c -o gcc_test" % build_arch) + if status == 0: + message = "BUILD_CFLAGS_append = \" -march=%s\"" % build_arch + result = True; + + os.remove("gcc_test.c") + if os.path.exists("gcc_test"): + os.remove("gcc_test") + + return (result, message) + +# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612. +# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate. +def check_make_version(sanity_data): + from distutils.version import LooseVersion + status, result = oe.utils.getstatusoutput("make --version") + if status != 0: + return "Unable to execute make --version, exit code %s\n" % status + version = result.split()[2] + if LooseVersion(version) == LooseVersion("3.82"): + # Construct a test file + f = open("makefile_test", "w") + f.write("makefile_test.a: makefile_test_a.c makefile_test_b.c makefile_test.a( makefile_test_a.c makefile_test_b.c)\n") + f.write("\n") + f.write("makefile_test_a.c:\n") + f.write(" touch $@\n") + f.write("\n") + f.write("makefile_test_b.c:\n") + f.write(" touch $@\n") + f.close() + + # Check if make 3.82 has been patched + status,result = oe.utils.getstatusoutput("make -f makefile_test") + + os.remove("makefile_test") + if os.path.exists("makefile_test_a.c"): + os.remove("makefile_test_a.c") + if os.path.exists("makefile_test_b.c"): + os.remove("makefile_test_b.c") + if os.path.exists("makefile_test.a"): + os.remove("makefile_test.a") + + if status != 0: + return "Your version of make 3.82 is broken. Please revert to 3.81 or install a patched version.\n" + return None + + +# Tar version 1.24 and onwards handle overwriting symlinks correctly +# but earlier versions do not; this needs to work properly for sstate +def check_tar_version(sanity_data): + from distutils.version import LooseVersion + status, result = oe.utils.getstatusoutput("tar --version") + if status != 0: + return "Unable to execute tar --version, exit code %s\n" % status + version = result.split()[3] + if LooseVersion(version) < LooseVersion("1.24"): + return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar.\n" + return None + +# We use git parameters and functionality only found in 1.7.8 or later +def check_git_version(sanity_data): + from distutils.version import LooseVersion + status, result = oe.utils.getstatusoutput("git --version 2> /dev/null") + if status != 0: + return "Unable to execute git --version, exit code %s\n" % status + version = result.split()[2] + if LooseVersion(version) < LooseVersion("1.7.8"): + return "Your version of git is older than 1.7.8 and has bugs which will break builds. Please install a newer version of git.\n" + return None + +# Check the required perl modules which may not be installed by default +def check_perl_modules(sanity_data): + ret = "" + modules = ( "Text::ParseWords", "Thread::Queue", "Data::Dumper" ) + for m in modules: + status, result = oe.utils.getstatusoutput("perl -e 'use %s' 2> /dev/null" % m) + if status != 0: + ret += "%s " % m + if ret: + return "Required perl module(s) not found: %s\n" % ret + return None + +def sanity_check_conffiles(status, d): + # Check we are using a valid local.conf + current_conf = d.getVar('CONF_VERSION', True) + conf_version = d.getVar('LOCALCONF_VERSION', True) + + if current_conf != conf_version: + status.addresult("Your version of local.conf was generated from an older/newer version of local.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/local.conf ${COREBASE}/meta*/conf/local.conf.sample\" is a good way to visualise the changes.\n") + + # Check bblayers.conf is valid + current_lconf = d.getVar('LCONF_VERSION', True) + lconf_version = d.getVar('LAYER_CONF_VERSION', True) + if current_lconf != lconf_version: + funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS', True).split() + for func in funcs: + success = True + try: + bb.build.exec_func(func, d) + except Exception: + success = False + if success: + bb.note("Your conf/bblayers.conf has been automatically updated.") + status.reparse = True + break + if not status.reparse: + status.addresult("Your version of bblayers.conf has the wrong LCONF_VERSION (has %s, expecting %s).\nPlease compare the your file against bblayers.conf.sample and merge any changes before continuing.\n\"meld conf/bblayers.conf ${COREBASE}/meta*/conf/bblayers.conf.sample\" is a good way to visualise the changes.\n" % (current_lconf, lconf_version)) + + # If we have a site.conf, check it's valid + if check_conf_exists("conf/site.conf", d): + current_sconf = d.getVar('SCONF_VERSION', True) + sconf_version = d.getVar('SITE_CONF_VERSION', True) + if current_sconf != sconf_version: + status.addresult("Your version of site.conf was generated from an older version of site.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/site.conf ${COREBASE}/meta*/conf/site.conf.sample\" is a good way to visualise the changes.\n") + + +def sanity_handle_abichanges(status, d): + # + # Check the 'ABI' of TMPDIR + # + current_abi = d.getVar('OELAYOUT_ABI', True) + abifile = d.getVar('SANITY_ABIFILE', True) + if os.path.exists(abifile): + with open(abifile, "r") as f: + abi = f.read().strip() + if not abi.isdigit(): + with open(abifile, "w") as f: + f.write(current_abi) + elif abi == "2" and current_abi == "3": + bb.note("Converting staging from layout version 2 to layout version 3") + subprocess.call(d.expand("mv ${TMPDIR}/staging ${TMPDIR}/sysroots"), shell=True) + subprocess.call(d.expand("ln -s sysroots ${TMPDIR}/staging"), shell=True) + subprocess.call(d.expand("cd ${TMPDIR}/stamps; for i in */*do_populate_staging; do new=`echo $i | sed -e 's/do_populate_staging/do_populate_sysroot/'`; mv $i $new; done"), shell=True) + with open(abifile, "w") as f: + f.write(current_abi) + elif abi == "3" and current_abi == "4": + bb.note("Converting staging layout from version 3 to layout version 4") + if os.path.exists(d.expand("${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}")): + subprocess.call(d.expand("mv ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS} ${STAGING_BINDIR_CROSS}"), shell=True) + subprocess.call(d.expand("ln -s ${STAGING_BINDIR_CROSS} ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}"), shell=True) + with open(abifile, "w") as f: + f.write(current_abi) + elif abi == "4": + status.addresult("Staging layout has changed. The cross directory has been deprecated and cross packages are now built under the native sysroot.\nThis requires a rebuild.\n") + elif abi == "5" and current_abi == "6": + bb.note("Converting staging layout from version 5 to layout version 6") + subprocess.call(d.expand("mv ${TMPDIR}/pstagelogs ${SSTATE_MANIFESTS}"), shell=True) + with open(abifile, "w") as f: + f.write(current_abi) + elif abi == "7" and current_abi == "8": + status.addresult("Your configuration is using stamp files including the sstate hash but your build directory was built with stamp files that do not include this.\nTo continue, either rebuild or switch back to the OEBasic signature handler with BB_SIGNATURE_HANDLER = 'OEBasic'.\n") + elif (abi != current_abi and current_abi == "9"): + status.addresult("The layout of the TMPDIR STAMPS directory has changed. Please clean out TMPDIR and rebuild (sstate will be still be valid and reused)\n") + elif (abi != current_abi): + # Code to convert from one ABI to another could go here if possible. + status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi)) + else: + with open(abifile, "w") as f: + f.write(current_abi) + +def check_sanity_sstate_dir_change(sstate_dir, data): + # Sanity checks to be done when the value of SSTATE_DIR changes + + # Check that SSTATE_DIR isn't on a filesystem with limited filename length (eg. eCryptFS) + testmsg = "" + if sstate_dir != "": + testmsg = check_create_long_filename(sstate_dir, "SSTATE_DIR") + # If we don't have permissions to SSTATE_DIR, suggest the user set it as an SSTATE_MIRRORS + try: + err = testmsg.split(': ')[1].strip() + if err == "Permission denied.": + testmsg = testmsg + "You could try using %s in SSTATE_MIRRORS rather than as an SSTATE_CACHE.\n" % (sstate_dir) + except IndexError: + pass + return testmsg + +def check_sanity_version_change(status, d): + # Sanity checks to be done when SANITY_VERSION changes + # In other words, these tests run once in a given build directory and then + # never again until the sanity version changes. + + # Check the python install is complete. glib-2.0-natives requries + # xml.parsers.expat + try: + import xml.parsers.expat + except ImportError: + status.addresult('Your python is not a full install. Please install the module xml.parsers.expat (python-xml on openSUSE and SUSE Linux).\n') + import stat + + status.addresult(check_make_version(d)) + status.addresult(check_tar_version(d)) + status.addresult(check_git_version(d)) + status.addresult(check_perl_modules(d)) + + missing = "" + + if not check_app_exists("${MAKE}", d): + missing = missing + "GNU make," + + if not check_app_exists('${BUILD_PREFIX}gcc', d): + missing = missing + "C Compiler (%sgcc)," % d.getVar("BUILD_PREFIX", True) + + if not check_app_exists('${BUILD_PREFIX}g++', d): + missing = missing + "C++ Compiler (%sg++)," % d.getVar("BUILD_PREFIX", True) + + required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES', True) + + for util in required_utilities.split(): + if not check_app_exists(util, d): + missing = missing + "%s," % util + + if missing: + missing = missing.rstrip(',') + status.addresult("Please install the following missing utilities: %s\n" % missing) + + assume_provided = d.getVar('ASSUME_PROVIDED', True).split() + # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf + if "diffstat-native" not in assume_provided: + status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n') + + if "qemu-native" in assume_provided: + if not check_app_exists("qemu-arm", d): + status.addresult("qemu-native was in ASSUME_PROVIDED but the QEMU binaries (qemu-arm) can't be found in PATH") + + if "libsdl-native" in assume_provided: + if not check_app_exists("sdl-config", d): + status.addresult("libsdl-native is set to be ASSUME_PROVIDED but sdl-config can't be found in PATH. Please either install it, or configure qemu not to require sdl.") + + (result, message) = check_gcc_march(d) + if result and message: + status.addresult("Your gcc version is older than 4.5, please add the following param to local.conf\n \ + %s\n" % message) + if not result: + status.addresult("Your gcc version is older than 4.5 or is not working properly. Please verify you can build") + status.addresult(" and link something that uses atomic operations, such as: \n") + status.addresult(" __sync_bool_compare_and_swap (&atomic, 2, 3);\n") + + # Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS) + tmpdir = d.getVar('TMPDIR', True) + status.addresult(check_create_long_filename(tmpdir, "TMPDIR")) + tmpdirmode = os.stat(tmpdir).st_mode + if (tmpdirmode & stat.S_ISGID): + status.addresult("TMPDIR is setgid, please don't build in a setgid directory") + if (tmpdirmode & stat.S_ISUID): + status.addresult("TMPDIR is setuid, please don't build in a setuid directory") + + # Some third-party software apparently relies on chmod etc. being suid root (!!) + import stat + suid_check_bins = "chown chmod mknod".split() + for bin_cmd in suid_check_bins: + bin_path = bb.utils.which(os.environ["PATH"], bin_cmd) + if bin_path: + bin_stat = os.stat(bin_path) + if bin_stat.st_uid == 0 and bin_stat.st_mode & stat.S_ISUID: + status.addresult('%s has the setuid bit set. This interferes with pseudo and may cause other issues that break the build process.\n' % bin_path) + + # Check that we can fetch from various network transports + netcheck = check_connectivity(d) + status.addresult(netcheck) + if netcheck: + status.network_error = True + + nolibs = d.getVar('NO32LIBS', True) + if not nolibs: + lib32path = '/lib' + if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ): + lib32path = '/lib32' + + if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'): + status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n") + + bbpaths = d.getVar('BBPATH', True).split(":") + if ("." in bbpaths or "" in bbpaths) and not status.reparse: + status.addresult("BBPATH references the current directory, either through " \ + "an empty entry, or a '.'.\n\t This is unsafe and means your "\ + "layer configuration is adding empty elements to BBPATH.\n\t "\ + "Please check your layer.conf files and other BBPATH " \ + "settings to remove the current working directory " \ + "references.\n" \ + "Parsed BBPATH is" + str(bbpaths)); + + oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF', True) + if not oes_bb_conf: + status.addresult('You are not using the OpenEmbedded version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n') + + # The length of TMPDIR can't be longer than 410 + status.addresult(check_path_length(tmpdir, "TMPDIR", 410)) + + # Check that TMPDIR isn't located on nfs + status.addresult(check_not_nfs(tmpdir, "TMPDIR")) + +def check_sanity_everybuild(status, d): + # Sanity tests which test the users environment so need to run at each build (or are so cheap + # it makes sense to always run them. + + if 0 == os.getuid(): + raise_sanity_error("Do not use Bitbake as root.", d) + + # Check the Python version, we now have a minimum of Python 2.7.3 + import sys + if sys.hexversion < 0x020703F0: + status.addresult('The system requires at least Python 2.7.3 to run. Please update your Python interpreter.\n') + + # Check the bitbake version meets minimum requirements + from distutils.version import LooseVersion + minversion = d.getVar('BB_MIN_VERSION', True) + if (LooseVersion(bb.__version__) < LooseVersion(minversion)): + status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__)) + + sanity_check_conffiles(status, d) + + paths = d.getVar('PATH', True).split(":") + if "." in paths or "" in paths: + status.addresult("PATH contains '.' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n") + + # Check that the DISTRO is valid, if set + # need to take into account DISTRO renaming DISTRO + distro = d.getVar('DISTRO', True) + if distro and distro != "nodistro": + if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ): + status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO", True)) + + # Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't + # set, since so much relies on it being set. + dldir = d.getVar('DL_DIR', True) + if not dldir: + status.addresult("DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n") + if os.path.exists(dldir) and not os.access(dldir, os.W_OK): + status.addresult("DL_DIR: %s exists but you do not appear to have write access to it. \n" % dldir) + + # Check that the MACHINE is valid, if it is set + machinevalid = True + if d.getVar('MACHINE', True): + if not check_conf_exists("conf/machine/${MACHINE}.conf", d): + status.addresult('Please set a valid MACHINE in your local.conf or environment\n') + machinevalid = False + else: + status.addresult(check_sanity_validmachine(d)) + else: + status.addresult('Please set a MACHINE in your local.conf or environment\n') + machinevalid = False + if machinevalid: + status.addresult(check_toolchain(d)) + + # Check that the SDKMACHINE is valid, if it is set + if d.getVar('SDKMACHINE', True): + if not check_conf_exists("conf/machine-sdk/${SDKMACHINE}.conf", d): + status.addresult('Specified SDKMACHINE value is not valid\n') + elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}": + status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n') + + check_supported_distro(d) + + # Check if DISPLAY is set if TEST_IMAGE is set + if d.getVar('TEST_IMAGE', True) == '1' or d.getVar('DEFAULT_TEST_SUITES', True): + testtarget = d.getVar('TEST_TARGET', True) + if testtarget == 'qemu' or testtarget == 'QemuTarget': + display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True) + if not display: + status.addresult('testimage needs an X desktop to start qemu, please set DISPLAY correctly (e.g. DISPLAY=:1.0)\n') + + omask = os.umask(022) + if omask & 0755: + status.addresult("Please use a umask which allows a+rx and u+rwx\n") + os.umask(omask) + + if d.getVar('TARGET_ARCH', True) == "arm": + # This path is no longer user-readable in modern (very recent) Linux + try: + if os.path.exists("/proc/sys/vm/mmap_min_addr"): + f = open("/proc/sys/vm/mmap_min_addr", "r") + try: + if (int(f.read().strip()) > 65536): + status.addresult("/proc/sys/vm/mmap_min_addr is not <= 65536. This will cause problems with qemu so please fix the value (as root).\n\nTo fix this in later reboots, set vm.mmap_min_addr = 65536 in /etc/sysctl.conf.\n") + finally: + f.close() + except: + pass + + oeroot = d.getVar('COREBASE', True) + if oeroot.find('+') != -1: + status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.") + if oeroot.find('@') != -1: + status.addresult("Error, you have an invalid character (@) in your COREBASE directory path. Please move the installation to a directory which doesn't include any @ characters.") + if oeroot.find(' ') != -1: + status.addresult("Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this.") + + # Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS + import re + mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS'] + protocols = ['http', 'ftp', 'file', 'https', \ + 'git', 'gitsm', 'hg', 'osc', 'p4', 'svk', 'svn', \ + 'bzr', 'cvs'] + for mirror_var in mirror_vars: + mirrors = (d.getVar(mirror_var, True) or '').replace('\\n', '\n').split('\n') + for mirror_entry in mirrors: + mirror_entry = mirror_entry.strip() + if not mirror_entry: + # ignore blank lines + continue + + try: + pattern, mirror = mirror_entry.split() + except ValueError: + bb.warn('Invalid %s: %s, should be 2 members.' % (mirror_var, mirror_entry.strip())) + continue + + decoded = bb.fetch2.decodeurl(pattern) + try: + pattern_scheme = re.compile(decoded[0]) + except re.error as exc: + bb.warn('Invalid scheme regex (%s) in %s; %s' % (pattern, mirror_var, mirror_entry)) + continue + + if not any(pattern_scheme.match(protocol) for protocol in protocols): + bb.warn('Invalid protocol (%s) in %s: %s' % (decoded[0], mirror_var, mirror_entry)) + continue + + if not any(mirror.startswith(protocol + '://') for protocol in protocols): + bb.warn('Invalid protocol in %s: %s' % (mirror_var, mirror_entry)) + continue + + if mirror.startswith('file://') and not mirror.startswith('file:///'): + bb.warn('Invalid file url in %s: %s, must be absolute path (file:///)' % (mirror_var, mirror_entry)) + + # Check that TMPDIR hasn't changed location since the last time we were run + tmpdir = d.getVar('TMPDIR', True) + checkfile = os.path.join(tmpdir, "saved_tmpdir") + if os.path.exists(checkfile): + with open(checkfile, "r") as f: + saved_tmpdir = f.read().strip() + if (saved_tmpdir != tmpdir): + status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or rebuild\n" % saved_tmpdir) + else: + bb.utils.mkdirhier(tmpdir) + with open(checkfile, "w") as f: + f.write(tmpdir) + +def check_sanity(sanity_data): + import subprocess + + class SanityStatus(object): + def __init__(self): + self.messages = "" + self.network_error = False + self.reparse = False + + def addresult(self, message): + if message: + self.messages = self.messages + message + + status = SanityStatus() + + tmpdir = sanity_data.getVar('TMPDIR', True) + sstate_dir = sanity_data.getVar('SSTATE_DIR', True) + + # Check saved sanity info + last_sanity_version = 0 + last_tmpdir = "" + last_sstate_dir = "" + sanityverfile = sanity_data.expand("${TOPDIR}/conf/sanity_info") + if os.path.exists(sanityverfile): + with open(sanityverfile, 'r') as f: + for line in f: + if line.startswith('SANITY_VERSION'): + last_sanity_version = int(line.split()[1]) + if line.startswith('TMPDIR'): + last_tmpdir = line.split()[1] + if line.startswith('SSTATE_DIR'): + last_sstate_dir = line.split()[1] + + check_sanity_everybuild(status, sanity_data) + + sanity_version = int(sanity_data.getVar('SANITY_VERSION', True) or 1) + network_error = False + if last_sanity_version < sanity_version: + check_sanity_version_change(status, sanity_data) + status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data)) + else: + if last_sstate_dir != sstate_dir: + status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data)) + + if os.path.exists(os.path.dirname(sanityverfile)) and not status.messages: + with open(sanityverfile, 'w') as f: + f.write("SANITY_VERSION %s\n" % sanity_version) + f.write("TMPDIR %s\n" % tmpdir) + f.write("SSTATE_DIR %s\n" % sstate_dir) + + sanity_handle_abichanges(status, sanity_data) + + if status.messages != "": + raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error) + return status.reparse + +# Create a copy of the datastore and finalise it to ensure appends and +# overrides are set - the datastore has yet to be finalised at ConfigParsed +def copy_data(e): + sanity_data = bb.data.createCopy(e.data) + sanity_data.finalize() + return sanity_data + +addhandler check_sanity_eventhandler +check_sanity_eventhandler[eventmask] = "bb.event.SanityCheck bb.event.NetworkTest" +python check_sanity_eventhandler() { + if bb.event.getName(e) == "SanityCheck": + sanity_data = copy_data(e) + if e.generateevents: + sanity_data.setVar("SANITY_USE_EVENTS", "1") + reparse = check_sanity(sanity_data) + e.data.setVar("BB_INVALIDCONF", reparse) + bb.event.fire(bb.event.SanityCheckPassed(), e.data) + elif bb.event.getName(e) == "NetworkTest": + sanity_data = copy_data(e) + if e.generateevents: + sanity_data.setVar("SANITY_USE_EVENTS", "1") + bb.event.fire(bb.event.NetworkTestFailed() if check_connectivity(sanity_data) else bb.event.NetworkTestPassed(), e.data) + + return +} diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass new file mode 100644 index 0000000000..fc0f26b17b --- /dev/null +++ b/meta/classes/scons.bbclass @@ -0,0 +1,15 @@ +DEPENDS += "python-scons-native" + +EXTRA_OESCONS ?= "" + +scons_do_compile() { + ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \ + bbfatal "scons build execution failed." +} + +scons_do_install() { + ${STAGING_BINDIR_NATIVE}/scons PREFIX=${D}${prefix} prefix=${D}${prefix} install ${EXTRA_OESCONS}|| \ + bbfatal "scons install execution failed." +} + +EXPORT_FUNCTIONS do_compile do_install diff --git a/meta/classes/sdl.bbclass b/meta/classes/sdl.bbclass new file mode 100644 index 0000000000..cc31288f61 --- /dev/null +++ b/meta/classes/sdl.bbclass @@ -0,0 +1,6 @@ +# +# (C) Michael 'Mickey' Lauer +# + +DEPENDS += "virtual/libsdl libsdl-mixer libsdl-image" +SECTION = "x11/games" diff --git a/meta/classes/setuptools.bbclass b/meta/classes/setuptools.bbclass new file mode 100644 index 0000000000..56343b1c73 --- /dev/null +++ b/meta/classes/setuptools.bbclass @@ -0,0 +1,8 @@ +inherit distutils + +DEPENDS += "python-distribute-native" + +DISTUTILS_INSTALL_ARGS = "--root=${D} \ + --prefix=${prefix} \ + --install-lib=${PYTHON_SITEPACKAGES_DIR} \ + --install-data=${datadir}" diff --git a/meta/classes/setuptools3.bbclass b/meta/classes/setuptools3.bbclass new file mode 100644 index 0000000000..40c18c8976 --- /dev/null +++ b/meta/classes/setuptools3.bbclass @@ -0,0 +1,8 @@ +inherit distutils3 + +DEPENDS += "python3-distribute-native" + +DISTUTILS_INSTALL_ARGS = "--root=${D} \ + --prefix=${prefix} \ + --install-lib=${PYTHON_SITEPACKAGES_DIR} \ + --install-data=${datadir}" diff --git a/meta/classes/sip.bbclass b/meta/classes/sip.bbclass new file mode 100644 index 0000000000..6ed2a13bda --- /dev/null +++ b/meta/classes/sip.bbclass @@ -0,0 +1,61 @@ +# Build Class for Sip based Python Bindings +# (C) Michael 'Mickey' Lauer +# +STAGING_SIPDIR ?= "${STAGING_DATADIR_NATIVE}/sip" + +DEPENDS =+ "sip-native" +RDEPENDS_${PN} += "python-sip" + +# default stuff, do not uncomment +# EXTRA_SIPTAGS = "-tWS_X11 -tQt_4_3_0" + +# do_generate is before do_configure so ensure that sip_native is populated in sysroot before executing it +do_generate[depends] += "sip-native:do_populate_sysroot" + +sip_do_generate() { + if [ -z "${SIP_MODULES}" ]; then + MODULES="`ls sip/*mod.sip`" + else + MODULES="${SIP_MODULES}" + fi + + if [ -z "$MODULES" ]; then + die "SIP_MODULES not set and no modules found in $PWD" + else + bbnote "using modules '${SIP_MODULES}' and tags '${EXTRA_SIPTAGS}'" + fi + + if [ -z "${EXTRA_SIPTAGS}" ]; then + die "EXTRA_SIPTAGS needs to be set!" + else + SIPTAGS="${EXTRA_SIPTAGS}" + fi + + if [ ! -z "${SIP_FEATURES}" ]; then + FEATURES="-z ${SIP_FEATURES}" + bbnote "sip feature file: ${SIP_FEATURES}" + fi + + for module in $MODULES + do + install -d ${module}/ + echo "calling 'sip4 -I sip -I ${STAGING_SIPDIR} ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.pro.in sip/${module}/${module}mod.sip'" + sip4 -I ${STAGING_SIPDIR} -I sip ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.sbf \ + sip/${module}/${module}mod.sip || die "Error calling sip on ${module}" + sed -e 's,target,TARGET,' -e 's,sources,SOURCES,' -e 's,headers,HEADERS,' \ + ${module}/${module}.sbf | sed s,"moc_HEADERS =","HEADERS +=", \ + >${module}/${module}.pro + echo "TEMPLATE=lib" >>${module}/${module}.pro + [ "${module}" = "qt" ] && echo "" >>${module}/${module}.pro + [ "${module}" = "qtcanvas" ] && echo "" >>${module}/${module}.pro + [ "${module}" = "qttable" ] && echo "" >>${module}/${module}.pro + [ "${module}" = "qwt" ] && echo "" >>${module}/${module}.pro + [ "${module}" = "qtpe" ] && echo "" >>${module}/${module}.pro + [ "${module}" = "qtpe" ] && echo "LIBS+=-lqpe" >>${module}/${module}.pro + true + done +} + +EXPORT_FUNCTIONS do_generate + +addtask generate after do_unpack do_patch before do_configure diff --git a/meta/classes/siteconfig.bbclass b/meta/classes/siteconfig.bbclass new file mode 100644 index 0000000000..45dce489de --- /dev/null +++ b/meta/classes/siteconfig.bbclass @@ -0,0 +1,33 @@ +python siteconfig_do_siteconfig () { + shared_state = sstate_state_fromvars(d) + if shared_state['task'] != 'populate_sysroot': + return + if not os.path.isdir(os.path.join(d.getVar('FILE_DIRNAME', True), 'site_config')): + bb.debug(1, "No site_config directory, skipping do_siteconfig") + return + bb.build.exec_func('do_siteconfig_gencache', d) + sstate_clean(shared_state, d) + sstate_install(shared_state, d) +} + +EXTRASITECONFIG ?= "" + +siteconfig_do_siteconfig_gencache () { + mkdir -p ${WORKDIR}/site_config_${MACHINE} + gen-site-config ${FILE_DIRNAME}/site_config \ + >${WORKDIR}/site_config_${MACHINE}/configure.ac + cd ${WORKDIR}/site_config_${MACHINE} + autoconf + rm -f ${BPN}_cache + CONFIG_SITE="" ${EXTRASITECONFIG} ./configure ${CONFIGUREOPTS} --cache-file ${BPN}_cache + sed -n -e "/ac_cv_c_bigendian/p" -e "/ac_cv_sizeof_/p" \ + -e "/ac_cv_type_/p" -e "/ac_cv_header_/p" -e "/ac_cv_func_/p" \ + < ${BPN}_cache > ${BPN}_config + mkdir -p ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d + cp ${BPN}_config ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d + +} + +do_populate_sysroot[sstate-interceptfuncs] += "do_siteconfig " + +EXPORT_FUNCTIONS do_siteconfig do_siteconfig_gencache diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass new file mode 100644 index 0000000000..e90632aeef --- /dev/null +++ b/meta/classes/siteinfo.bbclass @@ -0,0 +1,164 @@ +# This class exists to provide information about the targets that +# may be needed by other classes and/or recipes. If you add a new +# target this will probably need to be updated. + +# +# Returns information about 'what' for the named target 'target' +# where 'target' == "-" +# +# 'what' can be one of +# * target: Returns the target name ("-") +# * endianess: Return "be" for big endian targets, "le" for little endian +# * bits: Returns the bit size of the target, either "32" or "64" +# * libc: Returns the name of the c library used by the target +# +# It is an error for the target not to exist. +# If 'what' doesn't exist then an empty value is returned +# +def siteinfo_data(d): + archinfo = { + "allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch + "aarch64": "endian-little bit-64 arm-common", + "aarch64_be": "endian-big bit-64 arm-common", + "arm": "endian-little bit-32 arm-common", + "armeb": "endian-big bit-32 arm-common", + "avr32": "endian-big bit-32 avr32-common", + "bfin": "endian-little bit-32 bfin-common", + "i386": "endian-little bit-32 ix86-common", + "i486": "endian-little bit-32 ix86-common", + "i586": "endian-little bit-32 ix86-common", + "i686": "endian-little bit-32 ix86-common", + "ia64": "endian-little bit-64", + "microblaze": "endian-big bit-32 microblaze-common", + "microblazeel": "endian-little bit-32 microblaze-common", + "mips": "endian-big bit-32 mips-common", + "mips64": "endian-big bit-64 mips-common", + "mips64el": "endian-little bit-64 mips-common", + "mipsel": "endian-little bit-32 mips-common", + "powerpc": "endian-big bit-32 powerpc-common", + "nios2": "endian-little bit-32 nios2-common", + "powerpc64": "endian-big bit-64 powerpc-common", + "ppc": "endian-big bit-32 powerpc-common", + "ppc64": "endian-big bit-64 powerpc-common", + "sh3": "endian-little bit-32 sh-common", + "sh4": "endian-little bit-32 sh-common", + "sparc": "endian-big bit-32", + "viac3": "endian-little bit-32 ix86-common", + "x86_64": "endian-little", # bitinfo specified in targetinfo + } + osinfo = { + "darwin": "common-darwin", + "darwin9": "common-darwin", + "linux": "common-linux common-glibc", + "linux-gnu": "common-linux common-glibc", + "linux-gnux32": "common-linux common-glibc", + "linux-gnun32": "common-linux common-glibc", + "linux-gnueabi": "common-linux common-glibc", + "linux-gnuspe": "common-linux common-glibc", + "linux-uclibc": "common-linux common-uclibc", + "linux-uclibceabi": "common-linux common-uclibc", + "linux-uclibcspe": "common-linux common-uclibc", + "linux-musl": "common-linux common-musl", + "linux-musleabi": "common-linux common-musl", + "linux-muslspe": "common-linux common-musl", + "uclinux-uclibc": "common-uclibc", + "cygwin": "common-cygwin", + "mingw32": "common-mingw", + } + targetinfo = { + "aarch64-linux-gnu": "aarch64-linux", + "aarch64_be-linux-gnu": "aarch64_be-linux", + "arm-linux-gnueabi": "arm-linux", + "arm-linux-musleabi": "arm-linux", + "arm-linux-uclibceabi": "arm-linux-uclibc", + "armeb-linux-gnueabi": "armeb-linux", + "armeb-linux-uclibceabi": "armeb-linux-uclibc", + "armeb-linux-musleabi": "armeb-linux", + "mips-linux-musl": "mips-linux", + "mipsel-linux-musl": "mipsel-linux", + "mips64-linux-musl": "mips-linux", + "mips64el-linux-musl": "mipsel-linux", + "mips64-linux-gnun32": "mips-linux bit-32", + "mips64el-linux-gnun32": "mipsel-linux bit-32", + "powerpc-linux": "powerpc32-linux", + "powerpc-linux-musl": "powerpc-linux powerpc32-linux", + "powerpc-linux-uclibc": "powerpc-linux powerpc32-linux", + "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux", + "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux", + "powerpc-linux-uclibcspe": "powerpc-linux powerpc32-linux powerpc-linux-uclibc", + "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux", + "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux", + "powerpc64-linux": "powerpc-linux", + "x86_64-cygwin": "bit-64", + "x86_64-darwin": "bit-64", + "x86_64-darwin9": "bit-64", + "x86_64-linux": "bit-64", + "x86_64-linux-musl": "x86_64-linux bit-64", + "x86_64-linux-uclibc": "bit-64", + "x86_64-linux-gnu": "bit-64 x86_64-linux", + "x86_64-linux-gnux32": "bit-32 ix86-common x32-linux", + "x86_64-mingw32": "bit-64", + } + + hostarch = d.getVar("HOST_ARCH", True) + hostos = d.getVar("HOST_OS", True) + target = "%s-%s" % (hostarch, hostos) + + sitedata = [] + if hostarch in archinfo: + sitedata.extend(archinfo[hostarch].split()) + if hostos in osinfo: + sitedata.extend(osinfo[hostos].split()) + if target in targetinfo: + sitedata.extend(targetinfo[target].split()) + sitedata.append(target) + sitedata.append("common") + + bb.debug(1, "SITE files %s" % sitedata); + return sitedata + +python () { + sitedata = set(siteinfo_data(d)) + if "endian-little" in sitedata: + d.setVar("SITEINFO_ENDIANNESS", "le") + elif "endian-big" in sitedata: + d.setVar("SITEINFO_ENDIANNESS", "be") + else: + bb.error("Unable to determine endianness for architecture '%s'" % + d.getVar("HOST_ARCH", True)) + bb.fatal("Please add your architecture to siteinfo.bbclass") + + if "bit-32" in sitedata: + d.setVar("SITEINFO_BITS", "32") + elif "bit-64" in sitedata: + d.setVar("SITEINFO_BITS", "64") + else: + bb.error("Unable to determine bit size for architecture '%s'" % + d.getVar("HOST_ARCH", True)) + bb.fatal("Please add your architecture to siteinfo.bbclass") +} + +def siteinfo_get_files(d, no_cache = False): + sitedata = siteinfo_data(d) + sitefiles = "" + for path in d.getVar("BBPATH", True).split(":"): + for element in sitedata: + filename = os.path.join(path, "site", element) + if os.path.exists(filename): + sitefiles += filename + " " + + if no_cache: return sitefiles + + # Now check for siteconfig cache files + path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE', True) + if os.path.isdir(path_siteconfig): + for i in os.listdir(path_siteconfig): + filename = os.path.join(path_siteconfig, i) + sitefiles += filename + " " + + return sitefiles + +# +# Make some information available via variables +# +SITECONFIG_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d" diff --git a/meta/classes/spdx.bbclass b/meta/classes/spdx.bbclass new file mode 100644 index 0000000000..bccc230d8c --- /dev/null +++ b/meta/classes/spdx.bbclass @@ -0,0 +1,325 @@ +# This class integrates real-time license scanning, generation of SPDX standard +# output and verifiying license info during the building process. +# It is a combination of efforts from the OE-Core, SPDX and Fossology projects. +# +# For more information on FOSSology: +# http://www.fossology.org +# +# For more information on FOSSologySPDX commandline: +# https://github.com/spdx-tools/fossology-spdx/wiki/Fossology-SPDX-Web-API +# +# For more information on SPDX: +# http://www.spdx.org +# + +# SPDX file will be output to the path which is defined as[SPDX_MANIFEST_DIR] +# in ./meta/conf/licenses.conf. + +SPDXOUTPUTDIR = "${WORKDIR}/spdx_output_dir" +SPDXSSTATEDIR = "${WORKDIR}/spdx_sstate_dir" + +# If ${S} isn't actually the top-level source directory, set SPDX_S to point at +# the real top-level directory. +SPDX_S ?= "${S}" + +python do_spdx () { + import os, sys + import json + + info = {} + info['workdir'] = (d.getVar('WORKDIR', True) or "") + info['sourcedir'] = (d.getVar('SPDX_S', True) or "") + info['pn'] = (d.getVar( 'PN', True ) or "") + info['pv'] = (d.getVar( 'PV', True ) or "") + info['src_uri'] = (d.getVar( 'SRC_URI', True ) or "") + info['spdx_version'] = (d.getVar('SPDX_VERSION', True) or '') + info['data_license'] = (d.getVar('DATA_LICENSE', True) or '') + + spdx_sstate_dir = (d.getVar('SPDXSSTATEDIR', True) or "") + manifest_dir = (d.getVar('SPDX_MANIFEST_DIR', True) or "") + info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" ) + sstatefile = os.path.join(spdx_sstate_dir, + info['pn'] + info['pv'] + ".spdx" ) + info['spdx_temp_dir'] = (d.getVar('SPDX_TEMP_DIR', True) or "") + info['tar_file'] = os.path.join( info['workdir'], info['pn'] + ".tar.gz" ) + + + ## get everything from cache. use it to decide if + ## something needs to be rerun + cur_ver_code = get_ver_code( info['sourcedir'] ) + cache_cur = False + if not os.path.exists( spdx_sstate_dir ): + bb.utils.mkdirhier( spdx_sstate_dir ) + if not os.path.exists( info['spdx_temp_dir'] ): + bb.utils.mkdirhier( info['spdx_temp_dir'] ) + if os.path.exists( sstatefile ): + ## cache for this package exists. read it in + cached_spdx = get_cached_spdx( sstatefile ) + + if cached_spdx['PackageVerificationCode'] == cur_ver_code: + bb.warn(info['pn'] + "'s ver code same as cache's. do nothing") + cache_cur = True + else: + local_file_info = setup_foss_scan( info, + True, cached_spdx['Files'] ) + else: + local_file_info = setup_foss_scan( info, False, None ) + + if cache_cur: + spdx_file_info = cached_spdx['Files'] + else: + ## setup fossology command + foss_server = (d.getVar('FOSS_SERVER', True) or "") + foss_flags = (d.getVar('FOSS_WGET_FLAGS', True) or "") + foss_command = "wget %s --post-file=%s %s"\ + % (foss_flags,info['tar_file'],foss_server) + + #bb.warn(info['pn'] + json.dumps(local_file_info)) + foss_file_info = run_fossology( foss_command ) + spdx_file_info = create_spdx_doc( local_file_info, foss_file_info ) + ## write to cache + write_cached_spdx(sstatefile,cur_ver_code,spdx_file_info) + + ## Get document and package level information + spdx_header_info = get_header_info(info, cur_ver_code, spdx_file_info) + + ## CREATE MANIFEST + create_manifest(info,spdx_header_info,spdx_file_info) + + ## clean up the temp stuff + remove_dir_tree( info['spdx_temp_dir'] ) + if os.path.exists(info['tar_file']): + remove_file( info['tar_file'] ) +} +addtask spdx after do_patch before do_configure + +def create_manifest(info,header,files): + with open(info['outfile'], 'w') as f: + f.write(header + '\n') + for chksum, block in files.iteritems(): + for key, value in block.iteritems(): + f.write(key + ": " + value) + f.write('\n') + f.write('\n') + +def get_cached_spdx( sstatefile ): + import json + cached_spdx_info = {} + with open( sstatefile, 'r' ) as f: + try: + cached_spdx_info = json.load(f) + except ValueError as e: + cached_spdx_info = None + return cached_spdx_info + +def write_cached_spdx( sstatefile, ver_code, files ): + import json + spdx_doc = {} + spdx_doc['PackageVerificationCode'] = ver_code + spdx_doc['Files'] = {} + spdx_doc['Files'] = files + with open( sstatefile, 'w' ) as f: + f.write(json.dumps(spdx_doc)) + +def setup_foss_scan( info, cache, cached_files ): + import errno, shutil + import tarfile + file_info = {} + cache_dict = {} + + for f_dir, f in list_files( info['sourcedir'] ): + full_path = os.path.join( f_dir, f ) + abs_path = os.path.join(info['sourcedir'], full_path) + dest_dir = os.path.join( info['spdx_temp_dir'], f_dir ) + dest_path = os.path.join( info['spdx_temp_dir'], full_path ) + try: + stats = os.stat(abs_path) + except OSError as e: + bb.warn( "Stat failed" + str(e) + "\n") + continue + + checksum = hash_file( abs_path ) + mtime = time.asctime(time.localtime(stats.st_mtime)) + + ## retain cache information if it exists + file_info[checksum] = {} + if cache and checksum in cached_files: + file_info[checksum] = cached_files[checksum] + else: + file_info[checksum]['FileName'] = full_path + + try: + os.makedirs( dest_dir ) + except OSError as e: + if e.errno == errno.EEXIST and os.path.isdir(dest_dir): + pass + else: + bb.warn( "mkdir failed " + str(e) + "\n" ) + continue + + if(cache and checksum not in cached_files) or not cache: + try: + shutil.copyfile( abs_path, dest_path ) + except shutil.Error as e: + bb.warn( str(e) + "\n" ) + except IOError as e: + bb.warn( str(e) + "\n" ) + + with tarfile.open( info['tar_file'], "w:gz" ) as tar: + tar.add( info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir']) ) + tar.close() + + return file_info + + +def remove_dir_tree( dir_name ): + import shutil + try: + shutil.rmtree( dir_name ) + except: + pass + +def remove_file( file_name ): + try: + os.remove( file_name ) + except OSError as e: + pass + +def list_files( dir ): + for root, subFolders, files in os.walk( dir ): + for f in files: + rel_root = os.path.relpath( root, dir ) + yield rel_root, f + return + +def hash_file( file_name ): + try: + f = open( file_name, 'rb' ) + data_string = f.read() + except: + return None + finally: + f.close() + sha1 = hash_string( data_string ) + return sha1 + +def hash_string( data ): + import hashlib + sha1 = hashlib.sha1() + sha1.update( data ) + return sha1.hexdigest() + +def run_fossology( foss_command ): + import string, re + import subprocess + + p = subprocess.Popen(foss_command.split(), + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + foss_output, foss_error = p.communicate() + + records = [] + records = re.findall('FileName:.*?', foss_output, re.S) + + file_info = {} + for rec in records: + rec = string.replace( rec, '\r', '' ) + chksum = re.findall( 'FileChecksum: SHA1: (.*)\n', rec)[0] + file_info[chksum] = {} + file_info[chksum]['FileCopyrightText'] = re.findall( 'FileCopyrightText: ' + + '(.*?)', rec, re.S )[0] + fields = ['FileType','LicenseConcluded', + 'LicenseInfoInFile','FileName'] + for field in fields: + file_info[chksum][field] = re.findall(field + ': (.*)', rec)[0] + + return file_info + +def create_spdx_doc( file_info, scanned_files ): + import json + ## push foss changes back into cache + for chksum, lic_info in scanned_files.iteritems(): + if chksum in file_info: + file_info[chksum]['FileName'] = file_info[chksum]['FileName'] + file_info[chksum]['FileType'] = lic_info['FileType'] + file_info[chksum]['FileChecksum: SHA1'] = chksum + file_info[chksum]['LicenseInfoInFile'] = lic_info['LicenseInfoInFile'] + file_info[chksum]['LicenseConcluded'] = lic_info['LicenseConcluded'] + file_info[chksum]['FileCopyrightText'] = lic_info['FileCopyrightText'] + else: + bb.warn(lic_info['FileName'] + " : " + chksum + + " : is not in the local file info: " + + json.dumps(lic_info,indent=1)) + return file_info + +def get_ver_code( dirname ): + chksums = [] + for f_dir, f in list_files( dirname ): + try: + stats = os.stat(os.path.join(dirname,f_dir,f)) + except OSError as e: + bb.warn( "Stat failed" + str(e) + "\n") + continue + chksums.append(hash_file(os.path.join(dirname,f_dir,f))) + ver_code_string = ''.join( chksums ).lower() + ver_code = hash_string( ver_code_string ) + return ver_code + +def get_header_info( info, spdx_verification_code, spdx_files ): + """ + Put together the header SPDX information. + Eventually this needs to become a lot less + of a hardcoded thing. + """ + from datetime import datetime + import os + head = [] + DEFAULT = "NOASSERTION" + + #spdx_verification_code = get_ver_code( info['sourcedir'] ) + package_checksum = '' + if os.path.exists(info['tar_file']): + package_checksum = hash_file( info['tar_file'] ) + else: + package_checksum = DEFAULT + + ## document level information + head.append("SPDXVersion: " + info['spdx_version']) + head.append("DataLicense: " + info['data_license']) + head.append("DocumentComment: SPDX for " + + info['pn'] + " version " + info['pv'] + "") + head.append("") + + ## Creator information + now = datetime.now().strftime('%Y-%m-%dT%H:%M:%S') + head.append("## Creation Information") + head.append("Creator: fossology-spdx") + head.append("Created: " + now) + head.append("CreatorComment: UNO") + head.append("") + + ## package level information + head.append("## Package Information") + head.append("PackageName: " + info['pn']) + head.append("PackageVersion: " + info['pv']) + head.append("PackageDownloadLocation: " + DEFAULT) + head.append("PackageSummary: ") + head.append("PackageFileName: " + os.path.basename(info['tar_file'])) + head.append("PackageSupplier: Person:" + DEFAULT) + head.append("PackageOriginator: Person:" + DEFAULT) + head.append("PackageChecksum: SHA1: " + package_checksum) + head.append("PackageVerificationCode: " + spdx_verification_code) + head.append("PackageDescription: " + info['pn'] + + " version " + info['pv'] + "") + head.append("") + head.append("PackageCopyrightText: " + DEFAULT + "") + head.append("") + head.append("PackageLicenseDeclared: " + DEFAULT) + head.append("PackageLicenseConcluded: " + DEFAULT) + head.append("PackageLicenseInfoFromFiles: " + DEFAULT) + head.append("") + + ## header for file level + head.append("## File Information") + head.append("") + + return '\n'.join(head) diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass new file mode 100644 index 0000000000..ace6bdb57a --- /dev/null +++ b/meta/classes/sstate.bbclass @@ -0,0 +1,837 @@ +SSTATE_VERSION = "3" + +SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control" +SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}" + +def generate_sstatefn(spec, hash, d): + if not hash: + hash = "INVALID" + return hash[:2] + "/" + spec + hash + +SSTATE_PKGARCH = "${PACKAGE_ARCH}" +SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:" +SSTATE_SWSPEC = "sstate:${BPN}::${PV}:${PR}::${SSTATE_VERSION}:" +SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC', True), d.getVar('BB_TASKHASH', True), d)}" +SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}" +SSTATE_EXTRAPATH = "" +SSTATE_EXTRAPATHWILDCARD = "" +SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC}" + +# We don't want the sstate to depend on things like the distro string +# of the system, we let the sstate paths take care of this. +SSTATE_EXTRAPATH[vardepvalue] = "" + +# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical) +SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR_RPM}/all/" +# Avoid docbook/sgml catalog warnings for now +SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml" + +SSTATE_SCAN_FILES ?= "*.la *-config *_config" +SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f' + +BB_HASHFILENAME = "${SSTATE_EXTRAPATH} ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}" + +SSTATE_MANMACH ?= "${SSTATE_PKGARCH}" + +SSTATECREATEFUNCS = "sstate_hardcode_path" +SSTATEPOSTCREATEFUNCS = "" +SSTATEPREINSTFUNCS = "" +SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack" +SSTATEPOSTINSTFUNCS = "" +EXTRA_STAGING_FIXMES ?= "" + +SIGGEN_LOCKEDSIGS_CHECK_LEVEL ?= 'error' + +# Specify dirs in which the shell function is executed and don't use ${B} +# as default dirs to avoid possible race about ${B} with other task. +sstate_create_package[dirs] = "${SSTATE_BUILDDIR}" +sstate_unpack_package[dirs] = "${SSTATE_INSTDIR}" + +# Do not run sstate_hardcode_path() in ${B}: +# the ${B} maybe removed by cmake_do_configure() while +# sstate_hardcode_path() running. +sstate_hardcode_path[dirs] = "${SSTATE_BUILDDIR}" + +python () { + if bb.data.inherits_class('native', d): + d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH')) + elif bb.data.inherits_class('crosssdk', d): + d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}")) + elif bb.data.inherits_class('cross', d): + d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}")) + elif bb.data.inherits_class('nativesdk', d): + d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}")) + elif bb.data.inherits_class('cross-canadian', d): + d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}")) + elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH", True) == "all": + d.setVar('SSTATE_PKGARCH', "allarch") + else: + d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}")) + + if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d): + d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/") + d.setVar('SSTATE_EXTRAPATHWILDCARD', "*/") + + # These classes encode staging paths into their scripts data so can only be + # reused if we manipulate the paths + if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d): + scan_cmd = "grep -Irl ${STAGING_DIR} ${SSTATE_BUILDDIR}" + d.setVar('SSTATE_SCAN_CMD', scan_cmd) + + unique_tasks = set((d.getVar('SSTATETASKS', True) or "").split()) + d.setVar('SSTATETASKS', " ".join(unique_tasks)) + for task in unique_tasks: + d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ") + d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc") +} + +def sstate_init(task, d): + ss = {} + ss['task'] = task + ss['dirs'] = [] + ss['plaindirs'] = [] + ss['lockfiles'] = [] + ss['lockfiles-shared'] = [] + return ss + +def sstate_state_fromvars(d, task = None): + if task is None: + task = d.getVar('BB_CURRENTTASK', True) + if not task: + bb.fatal("sstate code running without task context?!") + task = task.replace("_setscene", "") + + if task.startswith("do_"): + task = task[3:] + inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs', True) or "").split() + outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs', True) or "").split() + plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs', True) or "").split() + lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile', True) or "").split() + lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared', True) or "").split() + interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs', True) or "").split() + if not task or len(inputs) != len(outputs): + bb.fatal("sstate variables not setup correctly?!") + + if task == "populate_lic": + d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}") + d.setVar("SSTATE_EXTRAPATH", "") + + ss = sstate_init(task, d) + for i in range(len(inputs)): + sstate_add(ss, inputs[i], outputs[i], d) + ss['lockfiles'] = lockfiles + ss['lockfiles-shared'] = lockfilesshared + ss['plaindirs'] = plaindirs + ss['interceptfuncs'] = interceptfuncs + return ss + +def sstate_add(ss, source, dest, d): + if not source.endswith("/"): + source = source + "/" + if not dest.endswith("/"): + dest = dest + "/" + source = os.path.normpath(source) + dest = os.path.normpath(dest) + srcbase = os.path.basename(source) + ss['dirs'].append([srcbase, source, dest]) + return ss + +def sstate_install(ss, d): + import oe.path + import subprocess + + sharedfiles = [] + shareddirs = [] + bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}")) + + d2 = d.createCopy() + extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True) + if extrainf: + d2.setVar("SSTATE_MANMACH", extrainf) + manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task']) + + if os.access(manifest, os.R_OK): + bb.fatal("Package already staged (%s)?!" % manifest) + + locks = [] + for lock in ss['lockfiles-shared']: + locks.append(bb.utils.lockfile(lock, True)) + for lock in ss['lockfiles']: + locks.append(bb.utils.lockfile(lock)) + + for state in ss['dirs']: + bb.debug(2, "Staging files from %s to %s" % (state[1], state[2])) + for walkroot, dirs, files in os.walk(state[1]): + for file in files: + srcpath = os.path.join(walkroot, file) + dstpath = srcpath.replace(state[1], state[2]) + #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath)) + sharedfiles.append(dstpath) + for dir in dirs: + srcdir = os.path.join(walkroot, dir) + dstdir = srcdir.replace(state[1], state[2]) + #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir)) + if not dstdir.endswith("/"): + dstdir = dstdir + "/" + shareddirs.append(dstdir) + + # Check the file list for conflicts against files which already exist + whitelist = (d.getVar("SSTATE_DUPWHITELIST", True) or "").split() + match = [] + for f in sharedfiles: + if os.path.exists(f): + f = os.path.normpath(f) + realmatch = True + for w in whitelist: + if f.startswith(w): + realmatch = False + break + if realmatch: + match.append(f) + sstate_search_cmd = "grep -rl '%s' %s --exclude=master.list | sed -e 's:^.*/::' -e 's:\.populate-sysroot::'" % (f, d.expand("${SSTATE_MANIFESTS}")) + search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0] + if search_output != "": + match.append("Matched in %s" % search_output.rstrip()) + if match: + bb.error("The recipe %s is trying to install files into a shared " \ + "area when those files already exist. Those files and their manifest " \ + "location are:\n %s\nPlease verify which recipe should provide the " \ + "above files.\nThe build has stopped as continuing in this scenario WILL " \ + "break things, if not now, possibly in the future (we've seen builds fail " \ + "several months later). If the system knew how to recover from this " \ + "automatically it would however there are several different scenarios " \ + "which can result in this and we don't know which one this is. It may be " \ + "you have switched providers of something like virtual/kernel (e.g. from " \ + "linux-yocto to linux-yocto-dev), in that case you need to execute the " \ + "clean task for both recipes and it will resolve this error. It may be " \ + "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \ + "those recipes should again resolve this error however switching " \ + "DISTRO_FEATURES on an existing build directory is not supported, you " \ + "should really clean out tmp and rebuild (reusing sstate should be safe). " \ + "It could be the overlapping files detected are harmless in which case " \ + "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \ + "also be your build is including two different conflicting versions of " \ + "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \ + "be to resolve the conflict. If in doubt, please ask on the mailing list, " \ + "sharing the error and filelist above." % \ + (d.getVar('PN', True), "\n ".join(match))) + bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.") + + # Write out the manifest + f = open(manifest, "w") + for file in sharedfiles: + f.write(file + "\n") + + # We want to ensure that directories appear at the end of the manifest + # so that when we test to see if they should be deleted any contents + # added by the task will have been removed first. + dirs = sorted(shareddirs, key=len) + # Must remove children first, which will have a longer path than the parent + for di in reversed(dirs): + f.write(di + "\n") + f.close() + + # Run the actual file install + for state in ss['dirs']: + if os.path.exists(state[1]): + oe.path.copyhardlinktree(state[1], state[2]) + + for postinst in (d.getVar('SSTATEPOSTINSTFUNCS', True) or '').split(): + bb.build.exec_func(postinst, d) + + for lock in locks: + bb.utils.unlockfile(lock) + +sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX" +sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}" + +def sstate_installpkg(ss, d): + import oe.path + import subprocess + + def prepdir(dir): + # remove dir if it exists, ensure any parent directories do exist + if os.path.exists(dir): + oe.path.remove(dir) + bb.utils.mkdirhier(dir) + oe.path.remove(dir) + + sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task']) + sstatefetch = d.getVar('SSTATE_PKGNAME', True) + '_' + ss['task'] + ".tgz" + sstatepkg = d.getVar('SSTATE_PKG', True) + '_' + ss['task'] + ".tgz" + + if not os.path.exists(sstatepkg): + pstaging_fetch(sstatefetch, sstatepkg, d) + + if not os.path.isfile(sstatepkg): + bb.note("Staging package %s does not exist" % sstatepkg) + return False + + sstate_clean(ss, d) + + d.setVar('SSTATE_INSTDIR', sstateinst) + d.setVar('SSTATE_PKG', sstatepkg) + + for f in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split() + ['sstate_unpack_package'] + (d.getVar('SSTATEPOSTUNPACKFUNCS', True) or '').split(): + bb.build.exec_func(f, d) + + for state in ss['dirs']: + prepdir(state[1]) + os.rename(sstateinst + state[0], state[1]) + sstate_install(ss, d) + + for plain in ss['plaindirs']: + workdir = d.getVar('WORKDIR', True) + src = sstateinst + "/" + plain.replace(workdir, '') + dest = plain + bb.utils.mkdirhier(src) + prepdir(dest) + os.rename(src, dest) + + return True + +python sstate_hardcode_path_unpack () { + # Fixup hardcoded paths + # + # Note: The logic below must match the reverse logic in + # sstate_hardcode_path(d) + import subprocess + + sstateinst = d.getVar('SSTATE_INSTDIR', True) + fixmefn = sstateinst + "fixmepath" + if os.path.isfile(fixmefn): + staging = d.getVar('STAGING_DIR', True) + staging_target = d.getVar('STAGING_DIR_TARGET', True) + staging_host = d.getVar('STAGING_DIR_HOST', True) + + if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d): + sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIR:%s:g'" % (staging) + elif bb.data.inherits_class('cross', d): + sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIR:%s:g'" % (staging_target, staging) + else: + sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host) + + extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or '' + for fixmevar in extra_staging_fixmes.split(): + fixme_path = d.getVar(fixmevar, True) + sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path) + + # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed + sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd) + + bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd)) + subprocess.call(sstate_hardcode_cmd, shell=True) + + # Need to remove this or we'd copy it into the target directory and may + # conflict with another writer + os.remove(fixmefn) +} + +def sstate_clean_cachefile(ss, d): + import oe.path + + sstatepkgfile = d.getVar('SSTATE_PATHSPEC', True) + "*_" + ss['task'] + ".tgz*" + bb.note("Removing %s" % sstatepkgfile) + oe.path.remove(sstatepkgfile) + +def sstate_clean_cachefiles(d): + for task in (d.getVar('SSTATETASKS', True) or "").split(): + ld = d.createCopy() + ss = sstate_state_fromvars(ld, task) + sstate_clean_cachefile(ss, ld) + +def sstate_clean_manifest(manifest, d): + import oe.path + + mfile = open(manifest) + entries = mfile.readlines() + mfile.close() + + for entry in entries: + entry = entry.strip() + bb.debug(2, "Removing manifest: %s" % entry) + # We can race against another package populating directories as we're removing them + # so we ignore errors here. + try: + if entry.endswith("/"): + if os.path.islink(entry[:-1]): + os.remove(entry[:-1]) + elif os.path.exists(entry) and len(os.listdir(entry)) == 0: + os.rmdir(entry[:-1]) + else: + oe.path.remove(entry) + except OSError: + pass + + oe.path.remove(manifest) + +def sstate_clean(ss, d): + import oe.path + import glob + + d2 = d.createCopy() + stamp_clean = d.getVar("STAMPCLEAN", True) + extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True) + if extrainf: + d2.setVar("SSTATE_MANMACH", extrainf) + wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf) + else: + wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task']) + + manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task']) + + if os.path.exists(manifest): + locks = [] + for lock in ss['lockfiles-shared']: + locks.append(bb.utils.lockfile(lock)) + for lock in ss['lockfiles']: + locks.append(bb.utils.lockfile(lock)) + + sstate_clean_manifest(manifest, d) + + for lock in locks: + bb.utils.unlockfile(lock) + + # Remove the current and previous stamps, but keep the sigdata. + # + # The glob() matches do_task* which may match multiple tasks, for + # example: do_package and do_package_write_ipk, so we need to + # exactly match *.do_task.* and *.do_task_setscene.* + rm_stamp = '.do_%s.' % ss['task'] + rm_setscene = '.do_%s_setscene.' % ss['task'] + # For BB_SIGNATURE_HANDLER = "noop" + rm_nohash = ".do_%s" % ss['task'] + for stfile in glob.glob(wildcard_stfile): + # Keep the sigdata + if ".sigdata." in stfile: + continue + # Preserve taint files in the stamps directory + if stfile.endswith('.taint'): + continue + if rm_stamp in stfile or rm_setscene in stfile or \ + stfile.endswith(rm_nohash): + oe.path.remove(stfile) + +sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX" + +CLEANFUNCS += "sstate_cleanall" + +python sstate_cleanall() { + bb.note("Removing shared state for package %s" % d.getVar('PN', True)) + + manifest_dir = d.getVar('SSTATE_MANIFESTS', True) + if not os.path.exists(manifest_dir): + return + + tasks = d.getVar('SSTATETASKS', True).split() + for name in tasks: + ld = d.createCopy() + shared_state = sstate_state_fromvars(ld, name) + sstate_clean(shared_state, ld) +} + +python sstate_hardcode_path () { + import subprocess, platform + + # Need to remove hardcoded paths and fix these when we install the + # staging packages. + # + # Note: the logic in this function needs to match the reverse logic + # in sstate_installpkg(ss, d) + + staging = d.getVar('STAGING_DIR', True) + staging_target = d.getVar('STAGING_DIR_TARGET', True) + staging_host = d.getVar('STAGING_DIR_HOST', True) + sstate_builddir = d.getVar('SSTATE_BUILDDIR', True) + + if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d): + sstate_grep_cmd = "grep -l -e '%s'" % (staging) + sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIR:g'" % (staging) + elif bb.data.inherits_class('cross', d): + sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging) + sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIR:g'" % (staging_target, staging) + else: + sstate_grep_cmd = "grep -l -e '%s'" % (staging_host) + sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % (staging_host) + + extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or '' + for fixmevar in extra_staging_fixmes.split(): + fixme_path = d.getVar(fixmevar, True) + sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar) + + fixmefn = sstate_builddir + "fixmepath" + + sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD', True) + sstate_filelist_cmd = "tee %s" % (fixmefn) + + # fixmepath file needs relative paths, drop sstate_builddir prefix + sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn) + + xargs_no_empty_run_cmd = '--no-run-if-empty' + if platform.system() == 'Darwin': + xargs_no_empty_run_cmd = '' + + # Limit the fixpaths and sed operations based on the initial grep search + # This has the side effect of making sure the vfs cache is hot + sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd) + + bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd)) + subprocess.call(sstate_hardcode_cmd, shell=True) + + # If the fixmefn is empty, remove it.. + if os.stat(fixmefn).st_size == 0: + os.remove(fixmefn) + else: + bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd)) + subprocess.call(sstate_filelist_relative_cmd, shell=True) +} + +def sstate_package(ss, d): + import oe.path + + def make_relative_symlink(path, outputpath, d): + # Replace out absolute TMPDIR paths in symlinks with relative ones + if not os.path.islink(path): + return + link = os.readlink(path) + if not os.path.isabs(link): + return + if not link.startswith(tmpdir): + return + + depth = outputpath.rpartition(tmpdir)[2].count('/') + base = link.partition(tmpdir)[2].strip() + while depth > 1: + base = "/.." + base + depth -= 1 + base = "." + base + + bb.debug(2, "Replacing absolute path %s with relative path %s for %s" % (link, base, outputpath)) + os.remove(path) + os.symlink(base, path) + + tmpdir = d.getVar('TMPDIR', True) + + sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task']) + sstatepkg = d.getVar('SSTATE_PKG', True) + '_'+ ss['task'] + ".tgz" + bb.utils.remove(sstatebuild, recurse=True) + bb.utils.mkdirhier(sstatebuild) + bb.utils.mkdirhier(os.path.dirname(sstatepkg)) + for state in ss['dirs']: + if not os.path.exists(state[1]): + continue + srcbase = state[0].rstrip("/").rsplit('/', 1)[0] + for walkroot, dirs, files in os.walk(state[1]): + for file in files: + srcpath = os.path.join(walkroot, file) + dstpath = srcpath.replace(state[1], state[2]) + make_relative_symlink(srcpath, dstpath, d) + for dir in dirs: + srcpath = os.path.join(walkroot, dir) + dstpath = srcpath.replace(state[1], state[2]) + make_relative_symlink(srcpath, dstpath, d) + bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0])) + oe.path.copyhardlinktree(state[1], sstatebuild + state[0]) + + workdir = d.getVar('WORKDIR', True) + for plain in ss['plaindirs']: + pdir = plain.replace(workdir, sstatebuild) + bb.utils.mkdirhier(plain) + bb.utils.mkdirhier(pdir) + oe.path.copyhardlinktree(plain, pdir) + + d.setVar('SSTATE_BUILDDIR', sstatebuild) + d.setVar('SSTATE_PKG', sstatepkg) + + for f in (d.getVar('SSTATECREATEFUNCS', True) or '').split() + ['sstate_create_package'] + \ + (d.getVar('SSTATEPOSTCREATEFUNCS', True) or '').split(): + bb.build.exec_func(f, d) + + bb.siggen.dump_this_task(sstatepkg + ".siginfo", d) + + return + +def pstaging_fetch(sstatefetch, sstatepkg, d): + import bb.fetch2 + + # Only try and fetch if the user has configured a mirror + mirrors = d.getVar('SSTATE_MIRRORS', True) + if not mirrors: + return + + # Copy the data object and override DL_DIR and SRC_URI + localdata = bb.data.createCopy(d) + bb.data.update_data(localdata) + + dldir = localdata.expand("${SSTATE_DIR}") + bb.utils.mkdirhier(dldir) + + localdata.delVar('MIRRORS') + localdata.delVar('FILESPATH') + localdata.setVar('DL_DIR', dldir) + localdata.setVar('PREMIRRORS', mirrors) + + # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK, + # we'll want to allow network access for the current set of fetches. + if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1": + localdata.delVar('BB_NO_NETWORK') + + # Try a fetch from the sstate mirror, if it fails just return and + # we will build the package + for srcuri in ['file://{0}'.format(sstatefetch), + 'file://{0}.siginfo'.format(sstatefetch)]: + localdata.setVar('SRC_URI', srcuri) + try: + fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False) + fetcher.download() + + # Need to optimise this, if using file:// urls, the fetcher just changes the local path + # For now work around by symlinking + localpath = bb.data.expand(fetcher.localpath(srcuri), localdata) + if localpath != sstatepkg and os.path.exists(localpath) and not os.path.exists(sstatepkg): + os.symlink(localpath, sstatepkg) + + except bb.fetch2.BBFetchException: + break + +def sstate_setscene(d): + shared_state = sstate_state_fromvars(d) + accelerate = sstate_installpkg(shared_state, d) + if not accelerate: + raise bb.build.FuncFailed("No suitable staging package found") + +python sstate_task_prefunc () { + shared_state = sstate_state_fromvars(d) + sstate_clean(shared_state, d) +} + +python sstate_task_postfunc () { + shared_state = sstate_state_fromvars(d) + sstate_install(shared_state, d) + for intercept in shared_state['interceptfuncs']: + bb.build.exec_func(intercept, d) + omask = os.umask(002) + if omask != 002: + bb.note("Using umask 002 (not %0o) for sstate packaging" % omask) + sstate_package(shared_state, d) + os.umask(omask) +} + + +# +# Shell function to generate a sstate package from a directory +# set as SSTATE_BUILDDIR +# +sstate_create_package () { + cd ${SSTATE_BUILDDIR} + TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX` + # Need to handle empty directories + if [ "$(ls -A)" ]; then + set +e + tar -czf $TFILE * + ret=$? + if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then + exit 1 + fi + set -e + else + tar -cz --file=$TFILE --files-from=/dev/null + fi + chmod 0664 $TFILE + mv -f $TFILE ${SSTATE_PKG} + + cd ${WORKDIR} + rm -rf ${SSTATE_BUILDDIR} +} + +# +# Shell function to decompress and prepare a package for installation +# +sstate_unpack_package () { + mkdir -p ${SSTATE_INSTDIR} + cd ${SSTATE_INSTDIR} + tar -xmvzf ${SSTATE_PKG} + # Use "! -w ||" to return true for read only files + [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG} +} + +BB_HASHCHECK_FUNCTION = "sstate_checkhashes" + +def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d): + + ret = [] + missed = [] + + def getpathcomponents(task, d): + # Magic data from BB_HASHFILENAME + splithashfn = sq_hashfn[task].split(" ") + spec = splithashfn[1] + extrapath = splithashfn[0] + + tname = sq_task[task][3:] + + if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]: + spec = splithashfn[2] + extrapath = "" + + return spec, extrapath, tname + + + for task in range(len(sq_fn)): + + spec, extrapath, tname = getpathcomponents(task, d) + + sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz.siginfo") + + if os.path.exists(sstatefile): + bb.debug(2, "SState: Found valid sstate file %s" % sstatefile) + ret.append(task) + continue + else: + missed.append(task) + bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile) + + mirrors = d.getVar("SSTATE_MIRRORS", True) + if mirrors: + # Copy the data object and override DL_DIR and SRC_URI + localdata = bb.data.createCopy(d) + bb.data.update_data(localdata) + + dldir = localdata.expand("${SSTATE_DIR}") + localdata.setVar('DL_DIR', dldir) + localdata.setVar('PREMIRRORS', mirrors) + + bb.debug(2, "SState using premirror of: %s" % mirrors) + + # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK, + # we'll want to allow network access for the current set of fetches. + if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1": + localdata.delVar('BB_NO_NETWORK') + + for task in range(len(sq_fn)): + if task in ret: + continue + + spec, extrapath, tname = getpathcomponents(task, d) + + sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz.siginfo") + + srcuri = "file://" + sstatefile + localdata.setVar('SRC_URI', srcuri) + bb.debug(2, "SState: Attempting to fetch %s" % srcuri) + + try: + fetcher = bb.fetch2.Fetch(srcuri.split(), localdata) + fetcher.checkstatus() + bb.debug(2, "SState: Successful fetch test for %s" % srcuri) + ret.append(task) + if task in missed: + missed.remove(task) + except: + missed.append(task) + bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri) + pass + + inheritlist = d.getVar("INHERIT", True) + if "toaster" in inheritlist: + evdata = {'missed': [], 'found': []}; + for task in missed: + spec, extrapath, tname = getpathcomponents(task, d) + sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz") + evdata['missed'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) ) + for task in ret: + spec, extrapath, tname = getpathcomponents(task, d) + sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz") + evdata['found'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) ) + bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d) + + if hasattr(bb.parse.siggen, "checkhashes"): + bb.parse.siggen.checkhashes(missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d) + + return ret + +BB_SETSCENE_DEPVALID = "setscene_depvalid" + +def setscene_depvalid(task, taskdependees, notneeded, d): + # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME] + # task is included in taskdependees too + + bb.debug(2, "Considering setscene task: %s" % (str(taskdependees[task]))) + + def isNativeCross(x): + return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x + + def isPostInstDep(x): + if x in ["qemu-native", "gdk-pixbuf-native", "qemuwrapper-cross", "depmodwrapper-cross", "systemd-systemctl-native", "gtk-update-icon-cache-native"]: + return True + return False + + # We only need to trigger populate_lic through direct dependencies + if taskdependees[task][1] == "do_populate_lic": + return True + + for dep in taskdependees: + bb.debug(2, " considering dependency: %s" % (str(taskdependees[dep]))) + if task == dep: + continue + if dep in notneeded: + continue + # do_package_write_* and do_package doesn't need do_package + if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']: + continue + # do_package_write_* and do_package doesn't need do_populate_sysroot, unless is a postinstall dependency + if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']: + if isPostInstDep(taskdependees[task][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']: + return False + continue + # Native/Cross packages don't exist and are noexec anyway + if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']: + continue + + # Consider sysroot depending on sysroot tasks + if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot': + # base-passwd/shadow-sysroot don't need their dependencies + if taskdependees[dep][0].endswith(("base-passwd", "shadow-sysroot")): + continue + # Nothing need depend on libc-initial/gcc-cross-initial + if "-initial" in taskdependees[task][0]: + continue + # Native/Cross populate_sysroot need their dependencies + if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]): + return False + # Target populate_sysroot depended on by cross tools need to be installed + if isNativeCross(taskdependees[dep][0]): + return False + # Native/cross tools depended upon by target sysroot are not needed + if isNativeCross(taskdependees[task][0]): + continue + # Target populate_sysroot need their dependencies + return False + + # This is due to the [depends] in useradd.bbclass complicating matters + # The logic *is* reversed here due to the way hard setscene dependencies are injected + if taskdependees[task][1] == 'do_package' and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot': + continue + + # Safe fallthrough default + bb.debug(2, " Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep]))) + return False + return True + +addhandler sstate_eventhandler +sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded" +python sstate_eventhandler() { + d = e.data + # When we write an sstate package we rewrite the SSTATE_PKG + spkg = d.getVar('SSTATE_PKG', True) + if not spkg.endswith(".tgz"): + taskname = d.getVar("BB_RUNTASK", True)[3:] + spec = d.getVar('SSTATE_PKGSPEC', True) + swspec = d.getVar('SSTATE_SWSPEC', True) + if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec: + d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}") + d.setVar("SSTATE_EXTRAPATH", "") + sstatepkg = d.getVar('SSTATE_PKG', True) + bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d) +} + diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass new file mode 100644 index 0000000000..57b2743196 --- /dev/null +++ b/meta/classes/staging.bbclass @@ -0,0 +1,122 @@ + +sysroot_stage_dir() { + src="$1" + dest="$2" + # if the src doesn't exist don't do anything + if [ ! -d "$src" ]; then + return + fi + + mkdir -p "$dest" + ( + cd $src + find . -print0 | cpio --null -pdlu $dest + ) +} + +sysroot_stage_libdir() { + src="$1" + dest="$2" + + sysroot_stage_dir $src $dest +} + +sysroot_stage_dirs() { + from="$1" + to="$2" + + sysroot_stage_dir $from${includedir} $to${includedir} + if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then + sysroot_stage_dir $from${bindir} $to${bindir} + sysroot_stage_dir $from${sbindir} $to${sbindir} + sysroot_stage_dir $from${base_bindir} $to${base_bindir} + sysroot_stage_dir $from${base_sbindir} $to${base_sbindir} + sysroot_stage_dir $from${libexecdir} $to${libexecdir} + sysroot_stage_dir $from${sysconfdir} $to${sysconfdir} + sysroot_stage_dir $from${localstatedir} $to${localstatedir} + fi + if [ -d $from${libdir} ] + then + sysroot_stage_libdir $from${libdir} $to${libdir} + fi + if [ -d $from${base_libdir} ] + then + sysroot_stage_libdir $from${base_libdir} $to${base_libdir} + fi + if [ -d $from${nonarch_base_libdir} ] + then + sysroot_stage_libdir $from${nonarch_base_libdir} $to${nonarch_base_libdir} + fi + sysroot_stage_dir $from${datadir} $to${datadir} + # We don't care about docs/info/manpages/locales + rm -rf $to${mandir}/ $to${docdir}/ $to${infodir}/ ${to}${datadir}/locale/ + rm -rf $to${datadir}/applications/ $to${datadir}/fonts/ $to${datadir}/pixmaps/ +} + +sysroot_stage_all() { + sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR} +} + +do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}" +do_populate_sysroot[umask] = "022" + +addtask populate_sysroot after do_install + +SYSROOT_PREPROCESS_FUNCS ?= "" +SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir/" +SYSROOT_LOCK = "${STAGING_DIR}/staging.lock" + +# We clean out any existing sstate from the sysroot if we rerun configure +python sysroot_cleansstate () { + ss = sstate_state_fromvars(d, "populate_sysroot") + sstate_clean(ss, d) +} +do_configure[prefuncs] += "sysroot_cleansstate" + + +BB_SETSCENE_VERIFY_FUNCTION = "sysroot_checkhashes" + +def sysroot_checkhashes(covered, tasknames, fnids, fns, d, invalidtasks = None): + problems = set() + configurefnids = set() + if not invalidtasks: + invalidtasks = xrange(len(tasknames)) + for task in invalidtasks: + if tasknames[task] == "do_configure" and task not in covered: + configurefnids.add(fnids[task]) + for task in covered: + if tasknames[task] == "do_populate_sysroot" and fnids[task] in configurefnids: + problems.add(task) + return problems + +python do_populate_sysroot () { + bb.build.exec_func("sysroot_stage_all", d) + for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS', True) or '').split(): + bb.build.exec_func(f, d) + pn = d.getVar("PN", True) + multiprov = d.getVar("MULTI_PROVIDER_WHITELIST", True).split() + provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/") + bb.utils.mkdirhier(provdir) + for p in d.getVar("PROVIDES", True).split(): + if p in multiprov: + continue + p = p.replace("/", "_") + with open(provdir + p, "w") as f: + f.write(pn) +} + +do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}" +do_populate_sysroot[vardepsexclude] += "MULTI_PROVIDER_WHITELIST" + +SSTATETASKS += "do_populate_sysroot" +do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}" +do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}" +do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_HOST}/" +do_populate_sysroot[stamp-extra-info] = "${MACHINE}" + +python do_populate_sysroot_setscene () { + sstate_setscene(d) +} +addtask do_populate_sysroot_setscene + + diff --git a/meta/classes/syslinux.bbclass b/meta/classes/syslinux.bbclass new file mode 100644 index 0000000000..d6498d98bb --- /dev/null +++ b/meta/classes/syslinux.bbclass @@ -0,0 +1,187 @@ +# syslinux.bbclass +# Copyright (C) 2004-2006, Advanced Micro Devices, Inc. All Rights Reserved +# Released under the MIT license (see packages/COPYING) + +# Provide syslinux specific functions for building bootable images. + +# External variables +# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional) +# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional) +# ${AUTO_SYSLINUXMENU} - set this to 1 to enable creating an automatic menu +# ${LABELS} - a list of targets for the automatic config +# ${APPEND} - an override list of append strings for each label +# ${SYSLINUX_OPTS} - additional options to add to the syslinux file ';' delimited +# ${SYSLINUX_SPLASH} - A background for the vga boot menu if using the boot menu +# ${SYSLINUX_DEFAULT_CONSOLE} - set to "console=ttyX" to change kernel boot default console +# ${SYSLINUX_SERIAL} - Set an alternate serial port or turn off serial with empty string +# ${SYSLINUX_SERIAL_TTY} - Set alternate console=tty... kernel boot argument +# ${SYSLINUX_KERNEL_ARGS} - Add additional kernel arguments + +do_bootimg[depends] += "${MLPREFIX}syslinux:do_populate_sysroot \ + syslinux-native:do_populate_sysroot" + +SYSLINUXCFG = "${S}/syslinux.cfg" + +ISOLINUXDIR = "/isolinux" +SYSLINUXDIR = "/" +# The kernel has an internal default console, which you can override with +# a console=...some_tty... +SYSLINUX_DEFAULT_CONSOLE ?= "" +SYSLINUX_SERIAL ?= "0 115200" +SYSLINUX_SERIAL_TTY ?= "console=ttyS0,115200" +ISO_BOOTIMG = "isolinux/isolinux.bin" +ISO_BOOTCAT = "isolinux/boot.cat" +MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table" +APPEND_prepend = " ${SYSLINUX_ROOT} " + +syslinux_populate() { + DEST=$1 + BOOTDIR=$2 + CFGNAME=$3 + + install -d ${DEST}${BOOTDIR} + + # Install the config files + install -m 0644 ${SYSLINUXCFG} ${DEST}${BOOTDIR}/${CFGNAME} + if [ "${AUTO_SYSLINUXMENU}" = 1 ] ; then + install -m 0644 ${STAGING_DATADIR}/syslinux/vesamenu.c32 ${DEST}${BOOTDIR}/vesamenu.c32 + install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${DEST}${BOOTDIR}/libcom32.c32 + install -m 0444 ${STAGING_DATADIR}/syslinux/libutil.c32 ${DEST}${BOOTDIR}/libutil.c32 + if [ "${SYSLINUX_SPLASH}" != "" ] ; then + install -m 0644 ${SYSLINUX_SPLASH} ${DEST}${BOOTDIR}/splash.lss + fi + fi +} + +syslinux_iso_populate() { + iso_dir=$1 + syslinux_populate $iso_dir ${ISOLINUXDIR} isolinux.cfg + install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin $iso_dir${ISOLINUXDIR} + install -m 0644 ${STAGING_DATADIR}/syslinux/ldlinux.c32 $iso_dir${ISOLINUXDIR} +} + +syslinux_hddimg_populate() { + hdd_dir=$1 + syslinux_populate $hdd_dir ${SYSLINUXDIR} syslinux.cfg + install -m 0444 ${STAGING_DATADIR}/syslinux/ldlinux.sys $hdd_dir${SYSLINUXDIR}/ldlinux.sys +} + +syslinux_hddimg_install() { + syslinux ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg +} + +syslinux_hdddirect_install() { + DEST=$1 + syslinux $DEST +} + +python build_syslinux_cfg () { + import copy + import sys + + workdir = d.getVar('WORKDIR', True) + if not workdir: + bb.error("WORKDIR not defined, unable to package") + return + + labels = d.getVar('LABELS', True) + if not labels: + bb.debug(1, "LABELS not defined, nothing to do") + return + + if labels == []: + bb.debug(1, "No labels, nothing to do") + return + + cfile = d.getVar('SYSLINUXCFG', True) + if not cfile: + raise bb.build.FuncFailed('Unable to read SYSLINUXCFG') + + try: + cfgfile = file(cfile, 'w') + except OSError: + raise bb.build.funcFailed('Unable to open %s' % (cfile)) + + cfgfile.write('# Automatically created by OE\n') + + opts = d.getVar('SYSLINUX_OPTS', True) + + if opts: + for opt in opts.split(';'): + cfgfile.write('%s\n' % opt) + + cfgfile.write('ALLOWOPTIONS 1\n'); + syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE', True) + syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY', True) + syslinux_serial = d.getVar('SYSLINUX_SERIAL', True) + if syslinux_serial: + cfgfile.write('SERIAL %s\n' % syslinux_serial) + + menu = d.getVar('AUTO_SYSLINUXMENU', True) + + if menu and syslinux_serial: + cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0])) + else: + cfgfile.write('DEFAULT %s\n' % (labels.split()[0])) + + timeout = d.getVar('SYSLINUX_TIMEOUT', True) + + if timeout: + cfgfile.write('TIMEOUT %s\n' % timeout) + else: + cfgfile.write('TIMEOUT 50\n') + + prompt = d.getVar('SYSLINUX_PROMPT', True) + if prompt: + cfgfile.write('PROMPT %s\n' % prompt) + else: + cfgfile.write('PROMPT 1\n') + + if menu: + cfgfile.write('ui vesamenu.c32\n') + cfgfile.write('menu title Select kernel options and boot kernel\n') + cfgfile.write('menu tabmsg Press [Tab] to edit, [Return] to select\n') + splash = d.getVar('SYSLINUX_SPLASH', True) + if splash: + cfgfile.write('menu background splash.lss\n') + + for label in labels.split(): + localdata = bb.data.createCopy(d) + + overrides = localdata.getVar('OVERRIDES', True) + if not overrides: + raise bb.build.FuncFailed('OVERRIDES not defined') + + localdata.setVar('OVERRIDES', label + ':' + overrides) + bb.data.update_data(localdata) + + btypes = [ [ "", syslinux_default_console ] ] + if menu and syslinux_serial: + btypes = [ [ "Graphics console ", syslinux_default_console ], + [ "Serial console ", syslinux_serial_tty ] ] + + for btype in btypes: + cfgfile.write('LABEL %s%s\nKERNEL /vmlinuz\n' % (btype[0], label)) + + exargs = d.getVar('SYSLINUX_KERNEL_ARGS', True) + if exargs: + btype[1] += " " + exargs + + append = localdata.getVar('APPEND', True) + initrd = localdata.getVar('INITRD', True) + + if append: + cfgfile.write('APPEND ') + + if initrd: + cfgfile.write('initrd=/initrd ') + + cfgfile.write('LABEL=%s '% (label)) + + cfgfile.write('%s %s\n' % (append, btype[1])) + else: + cfgfile.write('APPEND %s\n' % btype[1]) + + cfgfile.close() +} +build_syslinux_cfg[vardeps] += "APPEND" diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass new file mode 100644 index 0000000000..c34884bd38 --- /dev/null +++ b/meta/classes/systemd.bbclass @@ -0,0 +1,197 @@ +# The list of packages that should have systemd packaging scripts added. For +# each entry, optionally have a SYSTEMD_SERVICE_[package] that lists the service +# files in this package. If this variable isn't set, [package].service is used. +SYSTEMD_PACKAGES ?= "${PN}" +SYSTEMD_PACKAGES_class-native ?= "" +SYSTEMD_PACKAGES_class-nativesdk ?= "" + +# Whether to enable or disable the services on installation. +SYSTEMD_AUTO_ENABLE ??= "enable" + +# This class will be included in any recipe that supports systemd init scripts, +# even if systemd is not in DISTRO_FEATURES. As such don't make any changes +# directly but check the DISTRO_FEATURES first. +python __anonymous() { + # If the distro features have systemd but not sysvinit, inhibit update-rcd + # from doing any work so that pure-systemd images don't have redundant init + # files. + if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d): + d.appendVar("DEPENDS", " systemd-systemctl-native") + if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d): + d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1") +} + +systemd_postinst() { +OPTS="" + +if [ -n "$D" ]; then + OPTS="--root=$D" +fi + +if type systemctl >/dev/null 2>/dev/null; then + systemctl $OPTS ${SYSTEMD_AUTO_ENABLE} ${SYSTEMD_SERVICE} + + if [ -z "$D" -a "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then + systemctl restart ${SYSTEMD_SERVICE} + fi +fi +} + +systemd_prerm() { +OPTS="" + +if [ -n "$D" ]; then + OPTS="--root=$D" +fi + +if type systemctl >/dev/null 2>/dev/null; then + if [ -z "$D" ]; then + systemctl stop ${SYSTEMD_SERVICE} + fi + + systemctl $OPTS disable ${SYSTEMD_SERVICE} +fi +} + + +systemd_populate_packages[vardeps] += "systemd_prerm systemd_postinst" +systemd_populate_packages[vardepsexclude] += "OVERRIDES" + + +python systemd_populate_packages() { + if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d): + return + + def get_package_var(d, var, pkg): + val = (d.getVar('%s_%s' % (var, pkg), True) or "").strip() + if val == "": + val = (d.getVar(var, True) or "").strip() + return val + + # Check if systemd-packages already included in PACKAGES + def systemd_check_package(pkg_systemd): + packages = d.getVar('PACKAGES', True) + if not pkg_systemd in packages.split(): + bb.error('%s does not appear in package list, please add it' % pkg_systemd) + + + def systemd_generate_package_scripts(pkg): + bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg) + + # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE_pkg + # variable. + localdata = d.createCopy() + localdata.prependVar("OVERRIDES", pkg + ":") + bb.data.update_data(localdata) + + postinst = d.getVar('pkg_postinst_%s' % pkg, True) + if not postinst: + postinst = '#!/bin/sh\n' + postinst += localdata.getVar('systemd_postinst', True) + d.setVar('pkg_postinst_%s' % pkg, postinst) + + prerm = d.getVar('pkg_prerm_%s' % pkg, True) + if not prerm: + prerm = '#!/bin/sh\n' + prerm += localdata.getVar('systemd_prerm', True) + d.setVar('pkg_prerm_%s' % pkg, prerm) + + + # Add files to FILES_*-systemd if existent and not already done + def systemd_append_file(pkg_systemd, file_append): + appended = False + if os.path.exists(oe.path.join(d.getVar("D", True), file_append)): + var_name = "FILES_" + pkg_systemd + files = d.getVar(var_name, False) or "" + if file_append not in files.split(): + d.appendVar(var_name, " " + file_append) + appended = True + return appended + + # Add systemd files to FILES_*-systemd, parse for Also= and follow recursive + def systemd_add_files_and_parse(pkg_systemd, path, service, keys): + # avoid infinite recursion + if systemd_append_file(pkg_systemd, oe.path.join(path, service)): + fullpath = oe.path.join(d.getVar("D", True), path, service) + if service.find('.service') != -1: + # for *.service add *@.service + service_base = service.replace('.service', '') + systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys) + if service.find('.socket') != -1: + # for *.socket add *.service and *@.service + service_base = service.replace('.socket', '') + systemd_add_files_and_parse(pkg_systemd, path, service_base + '.service', keys) + systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys) + for key in keys.split(): + # recurse all dependencies found in keys ('Also';'Conflicts';..) and add to files + cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, fullpath, key) + pipe = os.popen(cmd, 'r') + line = pipe.readline() + while line: + line = line.replace('\n', '') + systemd_add_files_and_parse(pkg_systemd, path, line, keys) + line = pipe.readline() + pipe.close() + + # Check service-files and call systemd_add_files_and_parse for each entry + def systemd_check_services(): + searchpaths = [oe.path.join(d.getVar("sysconfdir", True), "systemd", "system"),] + searchpaths.append(oe.path.join(d.getVar("nonarch_base_libdir", True), "systemd", "system")) + searchpaths.append(oe.path.join(d.getVar("exec_prefix", True), d.getVar("nonarch_base_libdir", True), "systemd", "system")) + systemd_packages = d.getVar('SYSTEMD_PACKAGES', True) + has_exactly_one_service = len(systemd_packages.split()) == 1 + if has_exactly_one_service: + has_exactly_one_service = len(get_package_var(d, 'SYSTEMD_SERVICE', systemd_packages).split()) == 1 + + keys = 'Also' + # scan for all in SYSTEMD_SERVICE[] + for pkg_systemd in systemd_packages.split(): + for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split(): + path_found = '' + for path in searchpaths: + if os.path.exists(oe.path.join(d.getVar("D", True), path, service)): + path_found = path + break + if path_found != '': + systemd_add_files_and_parse(pkg_systemd, path_found, service, keys) + else: + raise bb.build.FuncFailed("SYSTEMD_SERVICE_%s value %s does not exist" % \ + (pkg_systemd, service)) + + # Run all modifications once when creating package + if os.path.exists(d.getVar("D", True)): + for pkg in d.getVar('SYSTEMD_PACKAGES', True).split(): + systemd_check_package(pkg) + if d.getVar('SYSTEMD_SERVICE_' + pkg, True): + systemd_generate_package_scripts(pkg) + systemd_check_services() +} + +PACKAGESPLITFUNCS_prepend = "systemd_populate_packages " + +python rm_systemd_unitdir (){ + import shutil + if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d): + systemd_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_unitdir', True)) + if os.path.exists(systemd_unitdir): + shutil.rmtree(systemd_unitdir) + systemd_libdir = os.path.dirname(systemd_unitdir) + if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)): + os.rmdir(systemd_libdir) +} +do_install[postfuncs] += "rm_systemd_unitdir " + +python rm_sysvinit_initddir (){ + import shutil + sysv_initddir = oe.path.join(d.getVar("D", True), (d.getVar('INIT_D_DIR', True) or "/etc/init.d")) + + if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \ + not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \ + os.path.exists(sysv_initddir): + systemd_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_unitdir', True), "system") + + # If systemd_unitdir contains anything, delete sysv_initddir + if (os.path.exists(systemd_unitdir) and os.listdir(systemd_unitdir)): + shutil.rmtree(sysv_initddir) +} +do_install[postfuncs] += "rm_sysvinit_initddir " diff --git a/meta/classes/terminal.bbclass b/meta/classes/terminal.bbclass new file mode 100644 index 0000000000..e577c6d594 --- /dev/null +++ b/meta/classes/terminal.bbclass @@ -0,0 +1,94 @@ +OE_TERMINAL ?= 'auto' +OE_TERMINAL[type] = 'choice' +OE_TERMINAL[choices] = 'auto none \ + ${@" ".join(o.name \ + for o in oe.terminal.prioritized())}' + +OE_TERMINAL_EXPORTS += 'EXTRA_OEMAKE' +OE_TERMINAL_EXPORTS[type] = 'list' + +XAUTHORITY ?= "${HOME}/.Xauthority" +SHELL ?= "bash" + + +def emit_terminal_func(command, envdata, d): + cmd_func = 'do_terminal' + + envdata.setVar(cmd_func, 'exec ' + command) + envdata.setVarFlag(cmd_func, 'func', 1) + + runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}" + runfile = runfmt.format(func=cmd_func, task=cmd_func, taskfunc=cmd_func, pid=os.getpid()) + runfile = os.path.join(d.getVar('T', True), runfile) + bb.utils.mkdirhier(os.path.dirname(runfile)) + + with open(runfile, 'w') as script: + script.write('#!/bin/sh -e\n') + bb.data.emit_func(cmd_func, script, envdata) + script.write(cmd_func) + script.write("\n") + os.chmod(runfile, 0755) + + return runfile + +def oe_terminal(command, title, d): + import oe.data + import oe.terminal + + envdata = bb.data.init() + + for v in os.environ: + envdata.setVar(v, os.environ[v]) + envdata.setVarFlag(v, 'export', 1) + + for export in oe.data.typed_value('OE_TERMINAL_EXPORTS', d): + value = d.getVar(export, True) + if value is not None: + os.environ[export] = str(value) + envdata.setVar(export, str(value)) + envdata.setVarFlag(export, 'export', 1) + if export == "PSEUDO_DISABLED": + if "PSEUDO_UNLOAD" in os.environ: + del os.environ["PSEUDO_UNLOAD"] + envdata.delVar("PSEUDO_UNLOAD") + + # Add in all variables from the user's original environment which + # haven't subsequntly been set/changed + origbbenv = d.getVar("BB_ORIGENV", False) or {} + for key in origbbenv: + if key in envdata: + continue + value = origbbenv.getVar(key, True) + if value is not None: + os.environ[key] = str(value) + envdata.setVar(key, str(value)) + envdata.setVarFlag(key, 'export', 1) + + # A complex PS1 might need more escaping of chars. + # Lets not export PS1 instead. + envdata.delVar("PS1") + + # Replace command with an executable wrapper script + command = emit_terminal_func(command, envdata, d) + + terminal = oe.data.typed_value('OE_TERMINAL', d).lower() + if terminal == 'none': + bb.fatal('Devshell usage disabled with OE_TERMINAL') + elif terminal != 'auto': + try: + oe.terminal.spawn(terminal, command, title, None, d) + return + except oe.terminal.UnsupportedTerminal: + bb.warn('Unsupported terminal "%s", defaulting to "auto"' % + terminal) + except oe.terminal.ExecutionError as exc: + bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc)) + + try: + oe.terminal.spawn_preferred(command, title, None, d) + except oe.terminal.NoSupportedTerminals: + bb.fatal('No valid terminal found, unable to open devshell') + except oe.terminal.ExecutionError as exc: + bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc)) + +oe_terminal[vardepsexclude] = "BB_ORIGENV" diff --git a/meta/classes/testimage-auto.bbclass b/meta/classes/testimage-auto.bbclass new file mode 100644 index 0000000000..860599d2b5 --- /dev/null +++ b/meta/classes/testimage-auto.bbclass @@ -0,0 +1,23 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + + +# Run tests automatically on an image after the image is constructed +# (as opposed to testimage.bbclass alone where tests must be called +# manually using bitbake -c testimage ). +# +# NOTE: to use this class, simply set TEST_IMAGE = "1" - no need to +# inherit it since that will be done in image.bbclass when this variable +# has been set. +# +# See testimage.bbclass for the test implementation. + +inherit testimage + +python do_testimage_auto() { + testimage_main(d) +} +addtask testimage_auto before do_build after do_rootfs +do_testimage_auto[depends] += "${TESTIMAGEDEPENDS}" +do_testimage_auto[lockfiles] += "${TESTIMAGELOCK}" diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass new file mode 100644 index 0000000000..683173854d --- /dev/null +++ b/meta/classes/testimage.bbclass @@ -0,0 +1,323 @@ +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) + + +# testimage.bbclass enables testing of qemu images using python unittests. +# Most of the tests are commands run on target image over ssh. +# To use it add testimage to global inherit and call your target image with -c testimage +# You can try it out like this: +# - first build a qemu core-image-sato +# - add INHERIT += "testimage" in local.conf +# - then bitbake core-image-sato -c testimage. That will run a standard suite of tests. + +# You can set (or append to) TEST_SUITES in local.conf to select the tests +# which you want to run for your target. +# The test names are the module names in meta/lib/oeqa/runtime. +# Each name in TEST_SUITES represents a required test for the image. (no skipping allowed) +# Appending "auto" means that it will try to run all tests that are suitable for the image (each test decides that on it's own). +# Note that order in TEST_SUITES is important (it's the order tests run) and it influences tests dependencies. +# A layer can add its own tests in lib/oeqa/runtime, provided it extends BBPATH as normal in its layer.conf. + +# TEST_LOG_DIR contains a command ssh log and may contain infromation about what command is running, output and return codes and for qemu a boot log till login. +# Booting is handled by this class, and it's not a test in itself. +# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt. + +TEST_LOG_DIR ?= "${WORKDIR}/testimage" + +TEST_EXPORT_DIR ?= "${TMPDIR}/testimage/${PN}" +TEST_EXPORT_ONLY ?= "0" + +DEFAULT_TEST_SUITES = "ping auto" +DEFAULT_TEST_SUITES_pn-core-image-minimal = "ping" +DEFAULT_TEST_SUITES_pn-core-image-sato = "ping ssh df connman syslog xorg scp vnc date rpm smart dmesg python parselogs" +DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "ping ssh df connman syslog xorg scp vnc date perl ldd gcc rpm smart kernelmodule dmesg python parselogs" +DEFAULT_TEST_SUITES_pn-meta-toolchain = "auto" +TEST_SUITES ?= "${DEFAULT_TEST_SUITES}" + +TEST_QEMUBOOT_TIMEOUT ?= "1000" +TEST_TARGET ?= "qemu" +TEST_TARGET_IP ?= "" +TEST_SERVER_IP ?= "" + +TESTIMAGEDEPENDS = "" +TESTIMAGEDEPENDS_qemuall = "qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot" + +TESTIMAGELOCK = "${TMPDIR}/testimage.lock" +TESTIMAGELOCK_qemuall = "" + +python do_testimage() { + testimage_main(d) +} +addtask testimage +do_testimage[nostamp] = "1" +do_testimage[depends] += "${TESTIMAGEDEPENDS}" +do_testimage[lockfiles] += "${TESTIMAGELOCK}" + +python do_testsdk() { + testsdk_main(d) +} +addtask testsdk +do_testsdk[nostamp] = "1" +do_testsdk[depends] += "${TESTIMAGEDEPENDS}" +do_testsdk[lockfiles] += "${TESTIMAGELOCK}" + +def get_tests_list(d, type="runtime"): + testsuites = d.getVar("TEST_SUITES", True).split() + bbpath = d.getVar("BBPATH", True).split(':') + + # This relies on lib/ under each directory in BBPATH being added to sys.path + # (as done by default in base.bbclass) + testslist = [] + for testname in testsuites: + if testname != "auto": + found = False + for p in bbpath: + if os.path.exists(os.path.join(p, 'lib', 'oeqa', type, testname + '.py')): + testslist.append("oeqa." + type + "." + testname) + found = True + break + if not found: + bb.fatal('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname) + + if "auto" in testsuites: + def add_auto_list(path): + if not os.path.exists(os.path.join(path, '__init__.py')): + bb.fatal('Tests directory %s exists but is missing __init__.py' % path) + files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')]) + for f in files: + module = 'oeqa.' + type + '.' + f[:-3] + if module not in testslist: + testslist.append(module) + + for p in bbpath: + testpath = os.path.join(p, 'lib', 'oeqa', type) + bb.debug(2, 'Searching for tests in %s' % testpath) + if os.path.exists(testpath): + add_auto_list(testpath) + + return testslist + + +def exportTests(d,tc): + import json + import shutil + import pkgutil + + exportpath = d.getVar("TEST_EXPORT_DIR", True) + + savedata = {} + savedata["d"] = {} + savedata["target"] = {} + for key in tc.__dict__: + # special cases + if key != "d" and key != "target": + savedata[key] = getattr(tc, key) + savedata["target"]["ip"] = tc.target.ip or d.getVar("TEST_TARGET_IP", True) + savedata["target"]["server_ip"] = tc.target.server_ip or d.getVar("TEST_SERVER_IP", True) + + keys = [ key for key in d.keys() if not key.startswith("_") and not key.startswith("BB") \ + and not key.startswith("B_pn") and not key.startswith("do_") and not d.getVarFlag(key, "func")] + for key in keys: + try: + savedata["d"][key] = d.getVar(key, True) + except bb.data_smart.ExpansionError: + # we don't care about those anyway + pass + + with open(os.path.join(exportpath, "testdata.json"), "w") as f: + json.dump(savedata, f, skipkeys=True, indent=4, sort_keys=True) + + # now start copying files + # we'll basically copy everything under meta/lib/oeqa, with these exceptions + # - oeqa/targetcontrol.py - not needed + # - oeqa/selftest - something else + # That means: + # - all tests from oeqa/runtime defined in TEST_SUITES (including from other layers) + # - the contents of oeqa/utils and oeqa/runtime/files + # - oeqa/oetest.py and oeqa/runexport.py (this will get copied to exportpath not exportpath/oeqa) + # - __init__.py files + bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/runtime/files")) + bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/utils")) + # copy test modules, this should cover tests in other layers too + for t in tc.testslist: + mod = pkgutil.get_loader(t) + shutil.copy2(mod.filename, os.path.join(exportpath, "oeqa/runtime")) + # copy __init__.py files + oeqadir = pkgutil.get_loader("oeqa").filename + shutil.copy2(os.path.join(oeqadir, "__init__.py"), os.path.join(exportpath, "oeqa")) + shutil.copy2(os.path.join(oeqadir, "runtime/__init__.py"), os.path.join(exportpath, "oeqa/runtime")) + # copy oeqa/oetest.py and oeqa/runexported.py + shutil.copy2(os.path.join(oeqadir, "oetest.py"), os.path.join(exportpath, "oeqa")) + shutil.copy2(os.path.join(oeqadir, "runexported.py"), exportpath) + # copy oeqa/utils/*.py + for root, dirs, files in os.walk(os.path.join(oeqadir, "utils")): + for f in files: + if f.endswith(".py"): + shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/utils")) + # copy oeqa/runtime/files/* + for root, dirs, files in os.walk(os.path.join(oeqadir, "runtime/files")): + for f in files: + shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/runtime/files")) + + bb.plain("Exported tests to: %s" % exportpath) + + +def testimage_main(d): + import unittest + import os + import oeqa.runtime + import time + from oeqa.oetest import loadTests, runTests + from oeqa.targetcontrol import get_target_controller + + pn = d.getVar("PN", True) + export = oe.utils.conditional("TEST_EXPORT_ONLY", "1", True, False, d) + bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True)) + if export: + bb.utils.remove(d.getVar("TEST_EXPORT_DIR", True), recurse=True) + bb.utils.mkdirhier(d.getVar("TEST_EXPORT_DIR", True)) + + # tests in TEST_SUITES become required tests + # they won't be skipped even if they aren't suitable for a image (like xorg for minimal) + # testslist is what we'll actually pass to the unittest loader + testslist = get_tests_list(d) + testsrequired = [t for t in d.getVar("TEST_SUITES", True).split() if t != "auto"] + + # the robot dance + target = get_target_controller(d) + + class TestContext(object): + def __init__(self): + self.d = d + self.testslist = testslist + self.testsrequired = testsrequired + self.filesdir = os.path.join(os.path.dirname(os.path.abspath(oeqa.runtime.__file__)),"files") + self.target = target + self.imagefeatures = d.getVar("IMAGE_FEATURES", True).split() + self.distrofeatures = d.getVar("DISTRO_FEATURES", True).split() + manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + ".manifest") + try: + with open(manifest) as f: + self.pkgmanifest = f.read() + except IOError as e: + bb.fatal("No package manifest file found. Did you build the image?\n%s" % e) + + # test context + tc = TestContext() + + # this is a dummy load of tests + # we are doing that to find compile errors in the tests themselves + # before booting the image + try: + loadTests(tc) + except Exception as e: + import traceback + bb.fatal("Loading tests failed:\n%s" % traceback.format_exc()) + + target.deploy() + + target.start() + try: + if export: + exportTests(d,tc) + else: + starttime = time.time() + result = runTests(tc) + stoptime = time.time() + if result.wasSuccessful(): + bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime)) + msg = "%s - OK - All required tests passed" % pn + skipped = len(result.skipped) + if skipped: + msg += " (skipped=%d)" % skipped + bb.plain(msg) + else: + raise bb.build.FuncFailed("%s - FAILED - check the task log and the ssh log" % pn ) + finally: + target.stop() + +testimage_main[vardepsexclude] =+ "BB_ORIGENV" + + +def testsdk_main(d): + import unittest + import os + import glob + import oeqa.runtime + import oeqa.sdk + import time + import subprocess + from oeqa.oetest import loadTests, runTests + + pn = d.getVar("PN", True) + bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True)) + + # tests in TEST_SUITES become required tests + # they won't be skipped even if they aren't suitable. + # testslist is what we'll actually pass to the unittest loader + testslist = get_tests_list(d, "sdk") + testsrequired = [t for t in d.getVar("TEST_SUITES", True).split() if t != "auto"] + + sdktestdir = d.expand("${WORKDIR}/testimage-sdk/") + bb.utils.remove(sdktestdir, True) + bb.utils.mkdirhier(sdktestdir) + + tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh") + if not os.path.exists(tcname): + bb.fatal("The toolchain is not built. Build it before running the tests: 'bitbake meta-toolchain' .") + subprocess.call("cd %s; %s < 1: + bb.fatal("Error, multiple targets within the SDK found and we don't know which to test? %s" % str(targets)) + sdkenv = sdktestdir + "/tc/environment-setup-" + os.path.basename(targets[0]) + + class TestContext(object): + def __init__(self): + self.d = d + self.testslist = testslist + self.testsrequired = testsrequired + self.filesdir = os.path.join(os.path.dirname(os.path.abspath(oeqa.runtime.__file__)),"files") + self.sdktestdir = sdktestdir + self.sdkenv = sdkenv + self.imagefeatures = d.getVar("IMAGE_FEATURES", True).split() + self.distrofeatures = d.getVar("DISTRO_FEATURES", True).split() + manifest = os.path.join(d.getVar("SDK_MANIFEST", True)) + try: + with open(manifest) as f: + self.pkgmanifest = f.read() + except IOError as e: + bb.fatal("No package manifest file found. Did you build the sdk image?\n%s" % e) + + # test context + tc = TestContext() + + # this is a dummy load of tests + # we are doing that to find compile errors in the tests themselves + # before booting the image + try: + loadTests(tc, "sdk") + except Exception as e: + import traceback + bb.fatal("Loading tests failed:\n%s" % traceback.format_exc()) + + try: + starttime = time.time() + result = runTests(tc, "sdk") + stoptime = time.time() + if result.wasSuccessful(): + bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime)) + msg = "%s - OK - All required tests passed" % pn + skipped = len(result.skipped) + if skipped: + msg += " (skipped=%d)" % skipped + bb.plain(msg) + else: + raise bb.build.FuncFailed("%s - FAILED - check the task log and the commands log" % pn ) + finally: + pass + bb.utils.remove(sdktestdir, True) + +testsdk_main[vardepsexclude] =+ "BB_ORIGENV" + diff --git a/meta/classes/texinfo.bbclass b/meta/classes/texinfo.bbclass new file mode 100644 index 0000000000..92efbccddf --- /dev/null +++ b/meta/classes/texinfo.bbclass @@ -0,0 +1,15 @@ +# This class is inherited by recipes whose upstream packages invoke the +# texinfo utilities at build-time. Native and cross recipes are made to use the +# dummy scripts provided by texinfo-dummy-native, for improved performance. +# Target architecture recipes use the genuine Texinfo utilities. By default, +# they use the Texinfo utilities on the host system. If you want to use the +# Texinfo recipe shipped with yoco, you can remove texinfo-native from +# ASSUME_PROVIDED and makeinfo from SANITY_REQUIRED_UTILITIES. + +TEXDEP = "texinfo-native" +TEXDEP_class-native = "texinfo-dummy-native" +TEXDEP_class-cross = "texinfo-dummy-native" +DEPENDS_append = " ${TEXDEP}" +PATH_prepend_class-native = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:" +PATH_prepend_class-cross = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:" + diff --git a/meta/classes/tinderclient.bbclass b/meta/classes/tinderclient.bbclass new file mode 100644 index 0000000000..6984efd1be --- /dev/null +++ b/meta/classes/tinderclient.bbclass @@ -0,0 +1,368 @@ +def tinder_http_post(server, selector, content_type, body): + import httplib + # now post it + for i in range(0,5): + try: + h = httplib.HTTP(server) + h.putrequest('POST', selector) + h.putheader('content-type', content_type) + h.putheader('content-length', str(len(body))) + h.endheaders() + h.send(body) + errcode, errmsg, headers = h.getreply() + #print errcode, errmsg, headers + return (errcode,errmsg, headers, h.file) + except: + print "Error sending the report!" + # try again + pass + + # return some garbage + return (-1, "unknown", "unknown", None) + +def tinder_form_data(bound, dict, log): + output = [] + # for each key in the dictionary + for name in dict: + assert dict[name] + output.append( "--" + bound ) + output.append( 'Content-Disposition: form-data; name="%s"' % name ) + output.append( "" ) + output.append( dict[name] ) + if log: + output.append( "--" + bound ) + output.append( 'Content-Disposition: form-data; name="log"; filename="log.txt"' ) + output.append( '' ) + output.append( log ) + output.append( '--' + bound + '--' ) + output.append( '' ) + + return "\r\n".join(output) + +def tinder_time_string(): + """ + Return the time as GMT + """ + return "" + +def tinder_format_http_post(d,status,log): + """ + Format the Tinderbox HTTP post with the data needed + for the tinderbox to be happy. + """ + + import random + + # the variables we will need to send on this form post + variables = { + "tree" : d.getVar('TINDER_TREE', True), + "machine_name" : d.getVar('TINDER_MACHINE', True), + "os" : os.uname()[0], + "os_version" : os.uname()[2], + "compiler" : "gcc", + "clobber" : d.getVar('TINDER_CLOBBER', True) or "0", + "srcdate" : d.getVar('SRCDATE', True), + "PN" : d.getVar('PN', True), + "PV" : d.getVar('PV', True), + "PR" : d.getVar('PR', True), + "FILE" : d.getVar('FILE', True) or "N/A", + "TARGETARCH" : d.getVar('TARGET_ARCH', True), + "TARGETFPU" : d.getVar('TARGET_FPU', True) or "Unknown", + "TARGETOS" : d.getVar('TARGET_OS', True) or "Unknown", + "MACHINE" : d.getVar('MACHINE', True) or "Unknown", + "DISTRO" : d.getVar('DISTRO', True) or "Unknown", + "zecke-rocks" : "sure", + } + + # optionally add the status + if status: + variables["status"] = str(status) + + # try to load the machine id + # we only need on build_status.pl but sending it + # always does not hurt + try: + f = file(d.getVar('TMPDIR',True)+'/tinder-machine.id', 'r') + id = f.read() + variables['machine_id'] = id + except: + pass + + # the boundary we will need + boundary = "----------------------------------%d" % int(random.random()*1000000000000) + + # now format the body + body = tinder_form_data( boundary, variables, log ) + + return ("multipart/form-data; boundary=%s" % boundary),body + + +def tinder_build_start(d): + """ + Inform the tinderbox that a build is starting. We do this + by posting our name and tree to the build_start.pl script + on the server. + """ + + # get the body and type + content_type, body = tinder_format_http_post(d,None,None) + server = d.getVar('TINDER_HOST', True ) + url = d.getVar('TINDER_URL', True ) + + selector = url + "/xml/build_start.pl" + + #print "selector %s and url %s" % (selector, url) + + # now post it + errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body) + #print errcode, errmsg, headers + report = h_file.read() + + # now let us find the machine id that was assigned to us + search = " 0: + content_type, body = tinder_format_http_post(d,status,new_log[0:18000]) + errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body) + #print errcode, errmsg, headers + #print h.file.read() + new_log = new_log[18000:] + + +def tinder_print_info(d): + """ + Print the TinderBox Info + Including informations of the BaseSystem and the Tree + we use. + """ + + # get the local vars + time = tinder_time_string() + ops = os.uname()[0] + version = os.uname()[2] + url = d.getVar( 'TINDER_URL' , True ) + tree = d.getVar( 'TINDER_TREE', True ) + branch = d.getVar( 'TINDER_BRANCH', True ) + srcdate = d.getVar( 'SRCDATE', True ) + machine = d.getVar( 'MACHINE', True ) + distro = d.getVar( 'DISTRO', True ) + bbfiles = d.getVar( 'BBFILES', True ) + tarch = d.getVar( 'TARGET_ARCH', True ) + fpu = d.getVar( 'TARGET_FPU', True ) + oerev = d.getVar( 'OE_REVISION', True ) or "unknown" + + # there is a bug with tipple quoted strings + # i will work around but will fix the original + # bug as well + output = [] + output.append("== Tinderbox Info" ) + output.append("Time: %(time)s" ) + output.append("OS: %(ops)s" ) + output.append("%(version)s" ) + output.append("Compiler: gcc" ) + output.append("Tinderbox Client: 0.1" ) + output.append("Tinderbox Client Last Modified: yesterday" ) + output.append("Tinderbox Protocol: 0.1" ) + output.append("URL: %(url)s" ) + output.append("Tree: %(tree)s" ) + output.append("Config:" ) + output.append("branch = '%(branch)s'" ) + output.append("TARGET_ARCH = '%(tarch)s'" ) + output.append("TARGET_FPU = '%(fpu)s'" ) + output.append("SRCDATE = '%(srcdate)s'" ) + output.append("MACHINE = '%(machine)s'" ) + output.append("DISTRO = '%(distro)s'" ) + output.append("BBFILES = '%(bbfiles)s'" ) + output.append("OEREV = '%(oerev)s'" ) + output.append("== End Tinderbox Client Info" ) + + # now create the real output + return "\n".join(output) % vars() + + +def tinder_print_env(): + """ + Print the environment variables of this build + """ + time_start = tinder_time_string() + time_end = tinder_time_string() + + # build the environment + env = "" + for var in os.environ: + env += "%s=%s\n" % (var, os.environ[var]) + + output = [] + output.append( "---> TINDERBOX RUNNING env %(time_start)s" ) + output.append( env ) + output.append( "<--- TINDERBOX FINISHED (SUCCESS) %(time_end)s" ) + + return "\n".join(output) % vars() + +def tinder_tinder_start(d, event): + """ + PRINT the configuration of this build + """ + + time_start = tinder_time_string() + config = tinder_print_info(d) + #env = tinder_print_env() + time_end = tinder_time_string() + packages = " ".join( event.getPkgs() ) + + output = [] + output.append( "---> TINDERBOX PRINTING CONFIGURATION %(time_start)s" ) + output.append( config ) + #output.append( env ) + output.append( "<--- TINDERBOX FINISHED PRINTING CONFIGURATION %(time_end)s" ) + output.append( "---> TINDERBOX BUILDING '%(packages)s'" ) + output.append( "<--- TINDERBOX STARTING BUILD NOW" ) + + output.append( "" ) + + return "\n".join(output) % vars() + +def tinder_do_tinder_report(event): + """ + Report to the tinderbox: + On the BuildStart we will inform the box directly + On the other events we will write to the TINDER_LOG and + when the Task is finished we will send the report. + + The above is not yet fully implemented. Currently we send + information immediately. The caching/queuing needs to be + implemented. Also sending more or less information is not + implemented yet. + + We have two temporary files stored in the TMP directory. One file + contains the assigned machine id for the tinderclient. This id gets + assigned when we connect the box and start the build process the second + file is used to workaround an EventHandler limitation. If BitBake is ran + with the continue option we want the Build to fail even if we get the + BuildCompleted Event. In this case we have to look up the status and + send it instead of 100/success. + """ + import glob + + # variables + name = bb.event.getName(event) + log = "" + status = 1 + # Check what we need to do Build* shows we start or are done + if name == "BuildStarted": + tinder_build_start(event.data) + log = tinder_tinder_start(event.data,event) + + try: + # truncate the tinder log file + f = file(event.data.getVar('TINDER_LOG', True), 'w') + f.write("") + f.close() + except: + pass + + try: + # write a status to the file. This is needed for the -k option + # of BitBake + g = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w') + g.write("") + g.close() + except IOError: + pass + + # Append the Task-Log (compile,configure...) to the log file + # we will send to the server + if name == "TaskSucceeded" or name == "TaskFailed": + log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task)) + + if len(log_file) != 0: + to_file = event.data.getVar('TINDER_LOG', True) + log += "".join(open(log_file[0], 'r').readlines()) + + # set the right 'HEADER'/Summary for the TinderBox + if name == "TaskStarted": + log += "---> TINDERBOX Task %s started\n" % event.task + elif name == "TaskSucceeded": + log += "<--- TINDERBOX Task %s done (SUCCESS)\n" % event.task + elif name == "TaskFailed": + log += "<--- TINDERBOX Task %s failed (FAILURE)\n" % event.task + elif name == "PkgStarted": + log += "---> TINDERBOX Package %s started\n" % event.data.getVar('PF', True) + elif name == "PkgSucceeded": + log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % event.data.getVar('PF', True) + elif name == "PkgFailed": + if not event.data.getVar('TINDER_AUTOBUILD', True) == "0": + build.exec_task('do_clean', event.data) + log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % event.data.getVar('PF', True) + status = 200 + # remember the failure for the -k case + h = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w') + h.write("200") + elif name == "BuildCompleted": + log += "Build Completed\n" + status = 100 + # Check if we have a old status... + try: + h = file(event.data.getVar('TMPDIR',True)+'/tinder-status', 'r') + status = int(h.read()) + except: + pass + + elif name == "MultipleProviders": + log += "---> TINDERBOX Multiple Providers\n" + log += "multiple providers are available (%s);\n" % ", ".join(event.getCandidates()) + log += "consider defining PREFERRED_PROVIDER_%s\n" % event.getItem() + log += "is runtime: %d\n" % event.isRuntime() + log += "<--- TINDERBOX Multiple Providers\n" + elif name == "NoProvider": + log += "Error: No Provider for: %s\n" % event.getItem() + log += "Error:Was Runtime: %d\n" % event.isRuntime() + status = 200 + # remember the failure for the -k case + h = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w') + h.write("200") + + # now post the log + if len(log) == 0: + return + + # for now we will use the http post method as it is the only one + log_post_method = tinder_send_http + log_post_method(event.data, status, log) + + +# we want to be an event handler +addhandler tinderclient_eventhandler +python tinderclient_eventhandler() { + if e.data is None or bb.event.getName(e) == "MsgNote": + return + + do_tinder_report = e.data.getVar('TINDER_REPORT', True) + if do_tinder_report and do_tinder_report == "1": + tinder_do_tinder_report(e) + + return +} diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass new file mode 100644 index 0000000000..a7dd0aa854 --- /dev/null +++ b/meta/classes/toaster.bbclass @@ -0,0 +1,343 @@ +# +# Toaster helper class +# +# Copyright (C) 2013 Intel Corporation +# +# Released under the MIT license (see COPYING.MIT) +# +# This bbclass is designed to extract data used by OE-Core during the build process, +# for recording in the Toaster system. +# The data access is synchronous, preserving the build data integrity across +# different builds. +# +# The data is transferred through the event system, using the MetadataEvent objects. +# +# The model is to enable the datadump functions as postfuncs, and have the dump +# executed after the real taskfunc has been executed. This prevents task signature changing +# is toaster is enabled or not. Build performance is not affected if Toaster is not enabled. +# +# To enable, use INHERIT in local.conf: +# +# INHERIT += "toaster" +# +# +# +# + +# Find and dump layer info when we got the layers parsed + + + +python toaster_layerinfo_dumpdata() { + import subprocess + + def _get_git_branch(layer_path): + branch = subprocess.Popen("git symbolic-ref HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0] + branch = branch.replace('refs/heads/', '').rstrip() + return branch + + def _get_git_revision(layer_path): + revision = subprocess.Popen("git rev-parse HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0].rstrip() + return revision + + def _get_url_map_name(layer_name): + """ Some layers have a different name on openembedded.org site, + this method returns the correct name to use in the URL + """ + + url_name = layer_name + url_mapping = {'meta': 'openembedded-core'} + + for key in url_mapping.keys(): + if key == layer_name: + url_name = url_mapping[key] + + return url_name + + def _get_layer_version_information(layer_path): + + layer_version_info = {} + layer_version_info['branch'] = _get_git_branch(layer_path) + layer_version_info['commit'] = _get_git_revision(layer_path) + layer_version_info['priority'] = 0 + + return layer_version_info + + + def _get_layer_dict(layer_path): + + layer_info = {} + layer_name = layer_path.split('/')[-1] + layer_url = 'http://layers.openembedded.org/layerindex/layer/{layer}/' + layer_url_name = _get_url_map_name(layer_name) + + layer_info['name'] = layer_url_name + layer_info['local_path'] = layer_path + layer_info['layer_index_url'] = layer_url.format(layer=layer_url_name) + layer_info['version'] = _get_layer_version_information(layer_path) + + return layer_info + + + bblayers = e.data.getVar("BBLAYERS", True) + + llayerinfo = {} + + for layer in { l for l in bblayers.strip().split(" ") if len(l) }: + llayerinfo[layer] = _get_layer_dict(layer) + + + bb.event.fire(bb.event.MetadataEvent("LayerInfo", llayerinfo), e.data) +} + +# Dump package file info data + +def _toaster_load_pkgdatafile(dirpath, filepath): + import json + import re + pkgdata = {} + with open(os.path.join(dirpath, filepath), "r") as fin: + for line in fin: + try: + kn, kv = line.strip().split(": ", 1) + m = re.match(r"^PKG_([^A-Z:]*)", kn) + if m: + pkgdata['OPKGN'] = m.group(1) + kn = "_".join([x for x in kn.split("_") if x.isupper()]) + pkgdata[kn] = kv.strip() + if kn == 'FILES_INFO': + pkgdata[kn] = json.loads(kv) + + except ValueError: + pass # ignore lines without valid key: value pairs + return pkgdata + + +python toaster_package_dumpdata() { + """ + Dumps the data created by emit_pkgdata + """ + # replicate variables from the package.bbclass + + packages = d.getVar('PACKAGES', True) + pkgdest = d.getVar('PKGDEST', True) + + pkgdatadir = d.getVar('PKGDESTWORK', True) + + # scan and send data for each package + + lpkgdata = {} + for pkg in packages.split(): + + lpkgdata = _toaster_load_pkgdatafile(pkgdatadir + "/runtime/", pkg) + + # Fire an event containing the pkg data + bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d) +} + +# 2. Dump output image files information + +python toaster_image_dumpdata() { + """ + Image filename for output images is not standardized. + image_types.bbclass will spell out IMAGE_CMD_xxx variables that actually + have hardcoded ways to create image file names in them. + So we look for files starting with the set name. + """ + + deploy_dir_image = d.getVar('DEPLOY_DIR_IMAGE', True); + image_name = d.getVar('IMAGE_NAME', True); + + image_info_data = {} + + for dirpath, dirnames, filenames in os.walk(deploy_dir_image): + for fn in filenames: + if fn.startswith(image_name): + image_output = os.path.join(dirpath, fn) + image_info_data[image_output] = os.stat(image_output).st_size + + bb.event.fire(bb.event.MetadataEvent("ImageFileSize",image_info_data), d) +} + + + +# collect list of buildstats files based on fired events; when the build completes, collect all stats and fire an event with collected data + +python toaster_collect_task_stats() { + import bb.build + import bb.event + import bb.data + import bb.utils + import os + + if not e.data.getVar('BUILDSTATS_BASE', True): + return # if we don't have buildstats, we cannot collect stats + + def _append_read_list(v): + lock = bb.utils.lockfile(e.data.expand("${TOPDIR}/toaster.lock"), False, True) + + with open(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"), "a") as fout: + bn = get_bn(e) + bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn) + taskdir = os.path.join(bsdir, e.data.expand("${PF}")) + fout.write("%s:%s:%s:%s\n" % (e.taskfile, e.taskname, os.path.join(taskdir, e.task), e.data.expand("${PN}"))) + + bb.utils.unlockfile(lock) + + def _read_stats(filename): + cpu_usage = 0 + disk_io = 0 + startio = '0' + endio = '0' + started = '0' + ended = '0' + pn = '' + taskname = '' + statinfo = {} + + with open(filename, 'r') as task_bs: + for line in task_bs.readlines(): + k,v = line.strip().split(": ", 1) + statinfo[k] = v + + if "CPU usage" in statinfo: + cpu_usage = str(statinfo["CPU usage"]).strip('% \n\r') + + if "EndTimeIO" in statinfo: + endio = str(statinfo["EndTimeIO"]).strip('% \n\r') + + if "StartTimeIO" in statinfo: + startio = str(statinfo["StartTimeIO"]).strip('% \n\r') + + if "Started" in statinfo: + started = str(statinfo["Started"]).strip('% \n\r') + + if "Ended" in statinfo: + ended = str(statinfo["Ended"]).strip('% \n\r') + + disk_io = int(endio) - int(startio) + + elapsed_time = float(ended) - float(started) + + cpu_usage = float(cpu_usage) + + return {'cpu_usage': cpu_usage, 'disk_io': disk_io, 'elapsed_time': elapsed_time} + + + if isinstance(e, (bb.build.TaskSucceeded, bb.build.TaskFailed)): + _append_read_list(e) + pass + + + if isinstance(e, bb.event.BuildCompleted) and os.path.exists(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist")): + events = [] + with open(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"), "r") as fin: + for line in fin: + (taskfile, taskname, filename, recipename) = line.strip().split(":") + events.append((taskfile, taskname, _read_stats(filename), recipename)) + bb.event.fire(bb.event.MetadataEvent("BuildStatsList", events), e.data) + os.unlink(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist")) +} + +# dump relevant build history data as an event when the build is completed + +python toaster_buildhistory_dump() { + import re + BUILDHISTORY_DIR = e.data.expand("${TOPDIR}/buildhistory") + BUILDHISTORY_DIR_IMAGE_BASE = e.data.expand("%s/images/${MACHINE_ARCH}/${TCLIBC}/"% BUILDHISTORY_DIR) + pkgdata_dir = e.data.getVar("PKGDATA_DIR", True) + + + # scan the build targets for this build + images = {} + allpkgs = {} + files = {} + for target in e._pkgs: + installed_img_path = e.data.expand(os.path.join(BUILDHISTORY_DIR_IMAGE_BASE, target)) + if os.path.exists(installed_img_path): + images[target] = {} + files[target] = {} + files[target]['dirs'] = [] + files[target]['syms'] = [] + files[target]['files'] = [] + with open("%s/installed-package-sizes.txt" % installed_img_path, "r") as fin: + for line in fin: + line = line.rstrip(";") + psize, px = line.split("\t") + punit, pname = px.split(" ") + # this size is "installed-size" as it measures how much space it takes on disk + images[target][pname.strip()] = {'size':int(psize)*1024, 'depends' : []} + + with open("%s/depends.dot" % installed_img_path, "r") as fin: + p = re.compile(r' -> ') + dot = re.compile(r'.*style=dotted') + for line in fin: + line = line.rstrip(';') + linesplit = p.split(line) + if len(linesplit) == 2: + pname = linesplit[0].rstrip('"').strip('"') + dependsname = linesplit[1].split(" ")[0].strip().strip(";").strip('"').rstrip('"') + deptype = "depends" + if dot.match(line): + deptype = "recommends" + if not pname in images[target]: + images[target][pname] = {'size': 0, 'depends' : []} + if not dependsname in images[target]: + images[target][dependsname] = {'size': 0, 'depends' : []} + images[target][pname]['depends'].append((dependsname, deptype)) + + with open("%s/files-in-image.txt" % installed_img_path, "r") as fin: + for line in fin: + lc = [ x for x in line.strip().split(" ") if len(x) > 0 ] + if lc[0].startswith("l"): + files[target]['syms'].append(lc) + elif lc[0].startswith("d"): + files[target]['dirs'].append(lc) + else: + files[target]['files'].append(lc) + + for pname in images[target]: + if not pname in allpkgs: + try: + pkgdata = _toaster_load_pkgdatafile("%s/runtime-reverse/" % pkgdata_dir, pname) + except IOError as err: + if err.errno == 2: + # We expect this e.g. for RRECOMMENDS that are unsatisfied at runtime + continue + else: + raise + allpkgs[pname] = pkgdata + + + data = { 'pkgdata' : allpkgs, 'imgdata' : images, 'filedata' : files } + + bb.event.fire(bb.event.MetadataEvent("ImagePkgList", data), e.data) + +} + +# dump information related to license manifest path + +python toaster_licensemanifest_dump() { + deploy_dir = d.getVar('DEPLOY_DIR', True); + image_name = d.getVar('IMAGE_NAME', True); + + data = { 'deploy_dir' : deploy_dir, 'image_name' : image_name } + + bb.event.fire(bb.event.MetadataEvent("LicenseManifestPath", data), d) +} + +# set event handlers +addhandler toaster_layerinfo_dumpdata +toaster_layerinfo_dumpdata[eventmask] = "bb.event.TreeDataPreparationCompleted" + +addhandler toaster_collect_task_stats +toaster_collect_task_stats[eventmask] = "bb.event.BuildCompleted bb.build.TaskSucceeded bb.build.TaskFailed" + +addhandler toaster_buildhistory_dump +toaster_buildhistory_dump[eventmask] = "bb.event.BuildCompleted" +do_package[postfuncs] += "toaster_package_dumpdata " +do_package[vardepsexclude] += "toaster_package_dumpdata " + +do_rootfs[postfuncs] += "toaster_image_dumpdata " +do_rootfs[postfuncs] += "toaster_licensemanifest_dump " +do_rootfs[vardepsexclude] += "toaster_image_dumpdata toaster_licensemanifest_dump" diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass new file mode 100644 index 0000000000..75464d1317 --- /dev/null +++ b/meta/classes/toolchain-scripts.bbclass @@ -0,0 +1,138 @@ +inherit siteinfo kernel-arch + +# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it +# doesn't always match our expectations... but we default to the stock value +REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}" + +# This function creates an environment-setup-script for use in a deployable SDK +toolchain_create_sdk_env_script () { + # Create environment setup script + libdir=${4:-${libdir}} + sysroot=${3:-${SDKTARGETSYSROOT}} + multimach_target_sys=${2:-${REAL_MULTIMACH_TARGET_SYS}} + script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-$multimach_target_sys} + rm -f $script + touch $script + echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script + EXTRAPATH="" + for i in ${CANADIANEXTRAOS}; do + EXTRAPATH="$EXTRAPATH:${SDKPATHNATIVE}${bindir_nativesdk}/${TARGET_ARCH}${TARGET_VENDOR}-$i" + done + echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${bindir_nativesdk}/${TARGET_SYS}'$EXTRAPATH':$PATH' >> $script + echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script + echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig' >> $script + echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script + echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script + echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script + echo 'export OECORE_ACLOCAL_OPTS="-I ${SDKPATHNATIVE}/usr/share/aclocal"' >> $script + echo 'export PYTHONHOME=${SDKPATHNATIVE}${prefix_nativesdk}' >> $script + + toolchain_shared_env_script +} + +# This function creates an environment-setup-script in the TMPDIR which enables +# a OE-core IDE to integrate with the build tree +toolchain_create_tree_env_script () { + script=${TMPDIR}/environment-setup-${REAL_MULTIMACH_TARGET_SYS} + rm -f $script + touch $script + echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${PATH}' >> $script + echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script + echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script + echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script + echo 'export SDKTARGETSYSROOT=${STAGING_DIR_TARGET}' >> $script + echo 'export OECORE_NATIVE_SYSROOT="${STAGING_DIR_NATIVE}"' >> $script + echo 'export OECORE_TARGET_SYSROOT="${STAGING_DIR_TARGET}"' >> $script + echo 'export OECORE_ACLOCAL_OPTS="-I ${STAGING_DIR_NATIVE}/usr/share/aclocal"' >> $script + + toolchain_shared_env_script +} + +toolchain_shared_env_script () { + echo 'export CC="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script + echo 'export CXX="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script + echo 'export CPP="${TARGET_PREFIX}gcc -E ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script + echo 'export AS="${TARGET_PREFIX}as ${TARGET_AS_ARCH}"' >> $script + echo 'export LD="${TARGET_PREFIX}ld ${TARGET_LD_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script + echo 'export GDB=${TARGET_PREFIX}gdb' >> $script + echo 'export STRIP=${TARGET_PREFIX}strip' >> $script + echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script + echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script + echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script + echo 'export AR=${TARGET_PREFIX}ar' >> $script + echo 'export NM=${TARGET_PREFIX}nm' >> $script + echo 'export M4=m4' >> $script + echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script + echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=$SDKTARGETSYSROOT"' >> $script + echo 'export CFLAGS="${TARGET_CFLAGS}"' >> $script + echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script + echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script + echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script + echo 'export KCFLAGS="--sysroot=$SDKTARGETSYSROOT"' >> $script + echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script + echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script + echo 'export ARCH=${ARCH}' >> $script + echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script + + cat >> $script <> $siteconfig + done + + #get cached site config + for sitefile in ${TOOLCHAIN_NEED_CONFIGSITE_CACHE}; do + if [ -r ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config ]; then + cat ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config >> $siteconfig + fi + done +} +# The immediate expansion above can result in unwanted path dependencies here +toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE" + +#This function create a version information file +toolchain_create_sdk_version () { + local versionfile=$1 + rm -f $versionfile + touch $versionfile + echo 'Distro: ${DISTRO}' >> $versionfile + echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile + echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile + echo 'Timestamp: ${DATETIME}' >> $versionfile +} +toolchain_create_sdk_version[vardepsexclude] = "DATETIME" + +python __anonymous () { + deps = "" + for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE', True) or "").split(): + deps += " %s:do_populate_sysroot" % dep + for variant in (d.getVar('MULTILIB_VARIANTS', True) or "").split(): + deps += " %s-%s:do_populate_sysroot" % (variant, dep) + d.appendVarFlag('do_configure', 'depends', deps) +} diff --git a/meta/classes/typecheck.bbclass b/meta/classes/typecheck.bbclass new file mode 100644 index 0000000000..72da932232 --- /dev/null +++ b/meta/classes/typecheck.bbclass @@ -0,0 +1,12 @@ +# Check types of bitbake configuration variables +# +# See oe.types for details. + +python check_types() { + import oe.types + for key in e.data.keys(): + if e.data.getVarFlag(key, "type"): + oe.data.typed_value(key, e.data) +} +addhandler check_types +check_types[eventmask] = "bb.event.ConfigParsed" diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass new file mode 100644 index 0000000000..8ac1b71bc2 --- /dev/null +++ b/meta/classes/uboot-config.bbclass @@ -0,0 +1,61 @@ +# Handle U-Boot config for a machine +# +# The format to specify it, in the machine, is: +# +# UBOOT_CONFIG ??= +# UBOOT_CONFIG[foo] = "config,images" +# +# or +# +# UBOOT_MACHINE = "config" +# +# Copyright 2013, 2014 (C) O.S. Systems Software LTDA. + +python () { + ubootmachine = d.getVar("UBOOT_MACHINE", True) + ubootconfigflags = d.getVarFlags('UBOOT_CONFIG') + # The "doc" varflag is special, we don't want to see it here + ubootconfigflags.pop('doc', None) + + if not ubootmachine and not ubootconfigflags: + PN = d.getVar("PN", True) + FILE = os.path.basename(d.getVar("FILE", True)) + bb.debug(1, "To build %s, see %s for instructions on \ + setting up your machine config" % (PN, FILE)) + raise bb.parse.SkipPackage("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE", True)) + + if ubootmachine and ubootconfigflags: + raise bb.parse.SkipPackage("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.") + + if not ubootconfigflags: + return + + ubootconfig = (d.getVar('UBOOT_CONFIG', True) or "").split() + if len(ubootconfig) > 1: + raise bb.parse.SkipPackage('You can only have a single default for UBOOT_CONFIG.') + elif len(ubootconfig) == 0: + raise bb.parse.SkipPackage('You must set a default in UBOOT_CONFIG.') + ubootconfig = ubootconfig[0] + + for f, v in ubootconfigflags.items(): + if f == 'defaultval': + continue + + items = v.split(',') + if items[0] and len(items) > 2: + raise bb.parse.SkipPackage('Only config,images can be specified!') + + if ubootconfig == f: + bb.debug(1, "Setting UBOOT_MACHINE to %s." % items[0]) + d.setVar('UBOOT_MACHINE', items[0]) + + # IMAGE_FSTYPES appending + if len(items) > 1 and items[1]: + bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1]) + d.appendVar('IMAGE_FSTYPES', ' ' + items[1]) + + # Go out as we found a match! + break + else: + raise bb.parse.SkipPackage("UBOOT_CONFIG %s is not supported" % ubootconfig) +} diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass new file mode 100644 index 0000000000..51391dbc4a --- /dev/null +++ b/meta/classes/uninative.bbclass @@ -0,0 +1,44 @@ +NATIVELSBSTRING = "universal" + +UNINATIVE_LOADER = "${STAGING_DIR_NATIVE}/lib/ld-linux-x86-64.so.2" + +addhandler uninative_eventhandler +uninative_eventhandler[eventmask] = "bb.event.BuildStarted" + +python uninative_eventhandler() { + loader = e.data.getVar("UNINATIVE_LOADER", True) + if not os.path.exists(loader): + import subprocess + cmd = e.data.expand("mkdir -p ${STAGING_DIR}; cd ${STAGING_DIR}; tar -xjf ${COREBASE}/${BUILD_ARCH}-nativesdk-libc.tar.bz2; ${STAGING_DIR}/relocate_sdk.py ${STAGING_DIR_NATIVE} ${UNINATIVE_LOADER} ${UNINATIVE_LOADER} ${STAGING_BINDIR_NATIVE}/patchelf-uninative") + #bb.warn("nativesdk lib extraction: " + cmd) + subprocess.check_call(cmd, shell=True) +} + +SSTATEPOSTUNPACKFUNCS_append = " uninative_changeinterp" + +python uninative_changeinterp () { + import subprocess + import stat + import oe.qa + + if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)): + return + + sstateinst = d.getVar('SSTATE_INSTDIR', True) + for walkroot, dirs, files in os.walk(sstateinst): + for file in files: + f = os.path.join(walkroot, file) + if os.path.islink(f): + continue + s = os.stat(f) + if not ((s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH)): + continue + elf = oe.qa.ELFFile(f) + try: + elf.open() + except: + continue + + #bb.warn("patchelf-uninative --set-interpreter %s %s" % (d.getVar("UNINATIVE_LOADER", True), f)) + subprocess.call("patchelf-uninative --set-interpreter %s %s" % (d.getVar("UNINATIVE_LOADER", True), f), shell=True) +} diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass new file mode 100644 index 0000000000..9f2c250d03 --- /dev/null +++ b/meta/classes/update-alternatives.bbclass @@ -0,0 +1,267 @@ +# This class is used to help the alternatives system which is useful when +# multiple sources provide same command. You can use update-alternatives +# command directly in your recipe, but in most cases this class simplifies +# that job. +# +# To use this class a number of variables should be defined: +# +# List all of the alternatives needed by a package: +# ALTERNATIVE_ = "name1 name2 name3 ..." +# +# i.e. ALTERNATIVE_busybox = "sh sed test bracket" +# +# The pathname of the link +# ALTERNATIVE_LINK_NAME[name] = "target" +# +# This is the name of the binary once it's been installed onto the runtime. +# This name is global to all split packages in this recipe, and should match +# other recipes with the same functionality. +# i.e. ALTERNATIVE_LINK_NAME[bracket] = "/usr/bin/[" +# +# NOTE: If ALTERNATIVE_LINK_NAME is not defined, it defaults to ${bindir}/name +# +# The default link to create for all targets +# ALTERNATIVE_TARGET = "target" +# +# This is useful in a multicall binary case +# i.e. ALTERNATIVE_TARGET = "/bin/busybox" +# +# A non-default link to create for a target +# ALTERNATIVE_TARGET[name] = "target" +# +# This is the name of the binary as it's been install by do_install +# i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash" +# +# A package specific link for a target +# ALTERNATIVE_TARGET_[name] = "target" +# +# This is useful when a recipe provides multiple alternatives for the +# same item. +# +# NOTE: If ALTERNATIVE_TARGET is not defined, it will inherit the value +# from ALTERNATIVE_LINK_NAME. +# +# NOTE: If the ALTERNATIVE_LINK_NAME and ALTERNATIVE_TARGET are the same, +# ALTERNATIVE_TARGET will have '.{BPN}' appended to it. If the file +# referenced has not been renamed, it will also be renamed. (This avoids +# the need to rename alternative files in the do_install step, but still +# supports it if necessary for some reason.) +# +# The default priority for any alternatives +# ALTERNATIVE_PRIORITY = "priority" +# +# i.e. default is ALTERNATIVE_PRIORITY = "10" +# +# The non-default priority for a specific target +# ALTERNATIVE_PRIORITY[name] = "priority" +# +# The package priority for a specific target +# ALTERNATIVE_PRIORITY_[name] = "priority" + +ALTERNATIVE_PRIORITY = "10" + +# We need special processing for vardeps because it can not work on +# modified flag values. So we agregate the flags into a new variable +# and include that vairable in the set. +UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY" + +def gen_updatealternativesvardeps(d): + pkgs = (d.getVar("PACKAGES", True) or "").split() + vars = (d.getVar("UPDALTVARS", True) or "").split() + + # First compute them for non_pkg versions + for v in vars: + for flag in (d.getVarFlags(v) or {}): + if flag == "doc" or flag == "vardeps" or flag == "vardepsexp": + continue + d.appendVar('%s_VARDEPS' % (v), ' %s:%s' % (flag, d.getVarFlag(v, flag, False))) + + for p in pkgs: + for v in vars: + for flag in (d.getVarFlags("%s_%s" % (v,p)) or {}): + if flag == "doc" or flag == "vardeps" or flag == "vardepsexp": + continue + d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False))) + +def ua_extend_depends(d): + if not 'virtual/update-alternatives' in d.getVar('PROVIDES', True): + d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives') + +python __anonymous() { + # Update Alternatives only works on target packages... + if bb.data.inherits_class('native', d) or \ + bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or \ + bb.data.inherits_class('cross-canadian', d): + return + + # compute special vardeps + gen_updatealternativesvardeps(d) + + # extend the depends to include virtual/update-alternatives + ua_extend_depends(d) +} + +def gen_updatealternativesvars(d): + ret = [] + pkgs = (d.getVar("PACKAGES", True) or "").split() + vars = (d.getVar("UPDALTVARS", True) or "").split() + + for v in vars: + ret.append(v + "_VARDEPS") + + for p in pkgs: + for v in vars: + ret.append(v + "_" + p) + ret.append(v + "_VARDEPS_" + p) + return " ".join(ret) + +# Now the new stuff, we use a custom function to generate the right values +populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}" + +# We need to do the rename after the image creation step, but before +# the split and strip steps.. packagecopy seems to be the earliest reasonable +# place. +python perform_packagecopy_append () { + # Check for deprecated usage... + pn = d.getVar('BPN', True) + if d.getVar('ALTERNATIVE_LINKS', True) != None: + bb.fatal('%s: Use of ALTERNATIVE_LINKS/ALTERNATIVE_PATH/ALTERNATIVE_NAME is no longer supported, please convert to the updated syntax, see update-alternatives.bbclass for more info.' % pn) + + # Do actual update alternatives processing + pkgdest = d.getVar('PKGD', True) + for pkg in (d.getVar('PACKAGES', True) or "").split(): + # If the src == dest, we know we need to rename the dest by appending ${BPN} + link_rename = {} + for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split(): + alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True) + if not alt_link: + alt_link = "%s/%s" % (d.getVar('bindir', True), alt_name) + d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link) + + alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True) + alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link + # Sometimes alt_target is specified as relative to the link name. + alt_target = os.path.join(os.path.dirname(alt_link), alt_target) + + # If the link and target are the same name, we need to rename the target. + if alt_link == alt_target: + src = '%s/%s' % (pkgdest, alt_target) + alt_target_rename = '%s.%s' % (alt_target, pn) + dest = '%s/%s' % (pkgdest, alt_target_rename) + if os.path.lexists(dest): + bb.note('%s: Already renamed: %s' % (pn, alt_target_rename)) + elif os.path.lexists(src): + if os.path.islink(src): + # Delay rename of links + link_rename[alt_target] = alt_target_rename + else: + bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename)) + os.rename(src, dest) + else: + bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename)) + continue + d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, alt_target_rename) + + # Process delayed link names + # Do these after other renames so we can correct broken links + for alt_target in link_rename: + src = '%s/%s' % (pkgdest, alt_target) + dest = '%s/%s' % (pkgdest, link_rename[alt_target]) + link = os.readlink(src) + link_target = oe.path.realpath(src, pkgdest, True) + + if os.path.lexists(link_target): + # Ok, the link_target exists, we can rename + bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, link_rename[alt_target])) + os.rename(src, dest) + else: + # Try to resolve the broken link to link.${BPN} + link_maybe = '%s.%s' % (os.readlink(src), pn) + if os.path.lexists(os.path.join(os.path.dirname(src), link_maybe)): + # Ok, the renamed link target exists.. create a new link, and remove the original + bb.note('%s: Creating new link %s -> %s' % (pn, link_rename[alt_target], link_maybe)) + os.symlink(link_maybe, dest) + os.unlink(src) + else: + bb.warn('%s: Unable to resolve dangling symlink: %s' % (pn, alt_target)) +} + +PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives " + +python populate_packages_updatealternatives () { + pn = d.getVar('BPN', True) + + # Do actual update alternatives processing + pkgdest = d.getVar('PKGD', True) + for pkg in (d.getVar('PACKAGES', True) or "").split(): + # Create post install/removal scripts + alt_setup_links = "" + alt_remove_links = "" + for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split(): + alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True) + alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True) + alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link + # Sometimes alt_target is specified as relative to the link name. + alt_target = os.path.join(os.path.dirname(alt_link), alt_target) + + alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name, True) + alt_priority = alt_priority or d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg, True) or d.getVar('ALTERNATIVE_PRIORITY', True) + + # This shouldn't trigger, as it should have been resolved earlier! + if alt_link == alt_target: + bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target)) + alt_target = '%s.%s' % (alt_target, pn) + + if not os.path.lexists('%s/%s' % (pkgdest, alt_target)): + bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target)) + continue + + # Default to generate shell script.. eventually we may want to change this... + alt_target = os.path.normpath(alt_target) + + alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority) + alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target) + + if alt_setup_links: + # RDEPENDS setup + provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives', True) + if provider: + #bb.note('adding runtime requirement for update-alternatives for %s' % pkg) + d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX') + provider) + + bb.note('adding update-alternatives calls to postinst/postrm for %s' % pkg) + bb.note('%s' % alt_setup_links) + postinst = d.getVar('pkg_postinst_%s' % pkg, True) or '#!/bin/sh\n' + postinst += alt_setup_links + d.setVar('pkg_postinst_%s' % pkg, postinst) + + bb.note('%s' % alt_remove_links) + postrm = d.getVar('pkg_postrm_%s' % pkg, True) or '#!/bin/sh\n' + postrm += alt_remove_links + d.setVar('pkg_postrm_%s' % pkg, postrm) +} + +python package_do_filedeps_append () { + pn = d.getVar('BPN', True) + pkgdest = d.getVar('PKGDEST', True) + + for pkg in packages.split(): + for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split(): + alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True) + alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True) + alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link + + if alt_link == alt_target: + bb.warn('alt_link == alt_target: %s == %s' % (alt_link, alt_target)) + alt_target = '%s.%s' % (alt_target, pn) + + if not os.path.lexists('%s/%s/%s' % (pkgdest, pkg, alt_target)): + continue + + # Add file provide + trans_target = oe.package.file_translate(alt_target) + d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link) + if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg, True) or ""): + d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target) +} + diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass new file mode 100644 index 0000000000..bc1aa7dad6 --- /dev/null +++ b/meta/classes/update-rc.d.bbclass @@ -0,0 +1,135 @@ +UPDATERCPN ?= "${PN}" + +DEPENDS_append = " update-rc.d-native" +VIRTUAL-RUNTIME_initscripts ?= "initscripts" +DEPENDS_append_class-target = " ${VIRTUAL-RUNTIME_initscripts}" +UPDATERCD = "update-rc.d" +UPDATERCD_class-cross = "" +UPDATERCD_class-native = "" +UPDATERCD_class-nativesdk = "" + +RRECOMMENDS_${UPDATERCPN}_append = " ${UPDATERCD}" + +INITSCRIPT_PARAMS ?= "defaults" + +INIT_D_DIR = "${sysconfdir}/init.d" + +updatercd_preinst() { +if [ -z "$D" -a -f "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then + ${INIT_D_DIR}/${INITSCRIPT_NAME} stop +fi +if type update-rc.d >/dev/null 2>/dev/null; then + if [ -n "$D" ]; then + OPT="-f -r $D" + else + OPT="-f" + fi + update-rc.d $OPT ${INITSCRIPT_NAME} remove +fi +} + +updatercd_postinst() { +if type update-rc.d >/dev/null 2>/dev/null; then + if [ -n "$D" ]; then + OPT="-r $D" + else + OPT="-s" + fi + update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS} +fi +} + +updatercd_prerm() { +if [ -z "$D" ]; then + ${INIT_D_DIR}/${INITSCRIPT_NAME} stop +fi +} + +updatercd_postrm() { +if type update-rc.d >/dev/null 2>/dev/null; then + if [ -n "$D" ]; then + OPT="-r $D" + else + OPT="" + fi + update-rc.d $OPT ${INITSCRIPT_NAME} remove +fi +} + + +def update_rc_after_parse(d): + if d.getVar('INITSCRIPT_PACKAGES') == None: + if d.getVar('INITSCRIPT_NAME') == None: + raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE')) + if d.getVar('INITSCRIPT_PARAMS') == None: + raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE')) + +python __anonymous() { + update_rc_after_parse(d) +} + +PACKAGESPLITFUNCS_prepend = "populate_packages_updatercd " +PACKAGESPLITFUNCS_remove_class-nativesdk = "populate_packages_updatercd " + +populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_preinst updatercd_postinst" +populate_packages_updatercd[vardepsexclude] += "OVERRIDES" + +python populate_packages_updatercd () { + def update_rcd_auto_depend(pkg): + import subprocess + import os + path = d.expand("${D}${INIT_D_DIR}/${INITSCRIPT_NAME}") + if not os.path.exists(path): + return + statement = "grep -q -w '/etc/init.d/functions' %s" % path + if subprocess.call(statement, shell=True) == 0: + mlprefix = d.getVar('MLPREFIX', True) or "" + d.appendVar('RDEPENDS_' + pkg, ' %sinitscripts-functions' % (mlprefix)) + + def update_rcd_package(pkg): + bb.debug(1, 'adding update-rc.d calls to preinst/postinst/prerm/postrm for %s' % pkg) + + localdata = bb.data.createCopy(d) + overrides = localdata.getVar("OVERRIDES", True) + localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides)) + bb.data.update_data(localdata) + + update_rcd_auto_depend(pkg) + + preinst = d.getVar('pkg_preinst_%s' % pkg, True) + if not preinst: + preinst = '#!/bin/sh\n' + preinst += localdata.getVar('updatercd_preinst', True) + d.setVar('pkg_preinst_%s' % pkg, preinst) + + postinst = d.getVar('pkg_postinst_%s' % pkg, True) + if not postinst: + postinst = '#!/bin/sh\n' + postinst += localdata.getVar('updatercd_postinst', True) + d.setVar('pkg_postinst_%s' % pkg, postinst) + + prerm = d.getVar('pkg_prerm_%s' % pkg, True) + if not prerm: + prerm = '#!/bin/sh\n' + prerm += localdata.getVar('updatercd_prerm', True) + d.setVar('pkg_prerm_%s' % pkg, prerm) + + postrm = d.getVar('pkg_postrm_%s' % pkg, True) + if not postrm: + postrm = '#!/bin/sh\n' + postrm += localdata.getVar('updatercd_postrm', True) + d.setVar('pkg_postrm_%s' % pkg, postrm) + + # Check that this class isn't being inhibited (generally, by + # systemd.bbclass) before doing any work. + if bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) or \ + not d.getVar("INHIBIT_UPDATERCD_BBCLASS", True): + pkgs = d.getVar('INITSCRIPT_PACKAGES', True) + if pkgs == None: + pkgs = d.getVar('UPDATERCPN', True) + packages = (d.getVar('PACKAGES', True) or "").split() + if not pkgs in packages and packages != []: + pkgs = packages[0] + for pkg in pkgs.split(): + update_rcd_package(pkg) +} diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass new file mode 100644 index 0000000000..421a70a6ab --- /dev/null +++ b/meta/classes/useradd-staticids.bbclass @@ -0,0 +1,276 @@ +# In order to support a deterministic set of 'dynamic' users/groups, +# we need a function to reformat the params based on a static file +def update_useradd_static_config(d): + import argparse + import re + + class myArgumentParser( argparse.ArgumentParser ): + def _print_message(self, message, file=None): + bb.warn("%s - %s: %s" % (d.getVar('PN', True), pkg, message)) + + # This should never be called... + def exit(self, status=0, message=None): + message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN', True), pkg)) + error(message) + + def error(self, message): + raise bb.build.FuncFailed(message) + + # We parse and rewrite the useradd components + def rewrite_useradd(params): + # The following comes from --help on useradd from shadow + parser = myArgumentParser(prog='useradd') + parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account") + parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account") + parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account") + parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true") + parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account") + parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account") + parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account") + parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account") + parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory") + parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults") + parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true") + parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_true") + parser.add_argument("-M", "--no-create-home", help="do not create the user's home directory", action="store_true") + parser.add_argument("-N", "--no-user-group", help="do not create a group with the same name as the user", action="store_true") + parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true") + parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account") + parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into") + parser.add_argument("-r", "--system", help="create a system account", action="store_true") + parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account") + parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account") + parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_true") + parser.add_argument("LOGIN", help="Login name of the new user") + + # Return a list of configuration files based on either the default + # files/passwd or the contents of USERADD_UID_TABLES + # paths are resulved via BBPATH + def get_passwd_list(d): + str = "" + bbpath = d.getVar('BBPATH', True) + passwd_tables = d.getVar('USERADD_UID_TABLES', True) + if not passwd_tables: + passwd_tables = 'files/passwd' + for conf_file in passwd_tables.split(): + str += " %s" % bb.utils.which(bbpath, conf_file) + return str + + newparams = [] + for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params): + param = param.strip() + if not param: + continue + try: + uaargs = parser.parse_args(re.split('''[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param)) + except: + raise bb.build.FuncFailed("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param)) + + # files/passwd or the contents of USERADD_UID_TABLES + # Use the standard passwd layout: + # username:password:user_id:group_id:comment:home_directory:login_shell + # (we want to process in reverse order, as 'last found' in the list wins) + # + # If a field is left blank, the original value will be used. The 'username' + # field is required. + # + # Note: we ignore the password field, as including even the hashed password + # in the useradd command may introduce a security hole. It's assumed that + # all new users get the default ('*' which prevents login) until the user is + # specifically configured by the system admin. + for conf in get_passwd_list(d).split()[::-1]: + if os.path.exists(conf): + f = open(conf, "r") + for line in f: + if line.startswith('#'): + continue + field = line.rstrip().split(":") + if field[0] == uaargs.LOGIN: + if uaargs.uid and field[2] and (uaargs.uid != field[2]): + bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.uid, field[2])) + uaargs.uid = [field[2], uaargs.uid][not field[2]] + + # Determine the possible groupname + # Unless the group name (or gid) is specified, we assume that the LOGIN is the groupname + # + # By default the system has creation of the matching groups enabled + # So if the implicit username-group creation is on, then the implicit groupname (LOGIN) + # is used, and we disable the user_group option. + # + uaargs.groupname = [uaargs.gid, uaargs.LOGIN][not uaargs.gid or uaargs.user_group] + uaargs.groupid = [uaargs.gid, uaargs.groupname][not uaargs.gid] + uaargs.groupid = [field[3], uaargs.groupid][not field[3]] + + if not uaargs.gid or uaargs.gid != uaargs.groupid: + if (uaargs.groupid and uaargs.groupid.isdigit()) and (uaargs.groupname and uaargs.groupname.isdigit()) and (uaargs.groupid != uaargs.groupname): + # We want to add a group, but we don't know it's name... so we can't add the group... + # We have to assume the group has previously been added or we'll fail on the adduser... + # Note: specifying the actual gid is very rare in OE, usually the group name is specified. + bb.warn("%s: Changing gid for login %s from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.groupname, uaargs.gid)) + elif (uaargs.groupid and not uaargs.groupid.isdigit()) and uaargs.groupid == uaargs.groupname: + # We don't have a number, so we have to add a name + bb.debug(1, "Adding group %s!" % (uaargs.groupname)) + uaargs.gid = uaargs.groupid + uaargs.user_group = False + groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg, True) + newgroup = "%s %s" % (['', ' --system'][uaargs.system], uaargs.groupname) + if groupadd: + d.setVar("GROUPADD_PARAM_%s" % pkg, "%s ; %s" % (groupadd, newgroup)) + else: + d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup) + elif uaargs.groupname and (uaargs.groupid and uaargs.groupid.isdigit()): + # We have a group name and a group number to assign it to + bb.debug(1, "Adding group %s gid (%s)!" % (uaargs.groupname, uaargs.groupid)) + uaargs.gid = uaargs.groupid + uaargs.user_group = False + groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg, True) + newgroup = "-g %s %s" % (uaargs.gid, uaargs.groupname) + if groupadd: + d.setVar("GROUPADD_PARAM_%s" % pkg, "%s ; %s" % (groupadd, newgroup)) + else: + d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup) + + uaargs.comment = ["'%s'" % field[4], uaargs.comment][not field[4]] + uaargs.home_dir = [field[5], uaargs.home_dir][not field[5]] + uaargs.shell = [field[6], uaargs.shell][not field[6]] + break + + # Should be an error if a specific option is set... + if d.getVar('USERADD_ERROR_DYNAMIC', True) == '1' and not ((uaargs.uid and uaargs.uid.isdigit()) and uaargs.gid): + #bb.error("Skipping recipe %s, package %s which adds username %s does not have a static uid defined." % (d.getVar('PN', True), pkg, uaargs.LOGIN)) + raise bb.build.FuncFailed("%s - %s: Username %s does not have a static uid defined." % (d.getVar('PN', True), pkg, uaargs.LOGIN)) + + # Reconstruct the args... + newparam = ['', ' --defaults'][uaargs.defaults] + newparam += ['', ' --base-dir %s' % uaargs.base_dir][uaargs.base_dir != None] + newparam += ['', ' --comment %s' % uaargs.comment][uaargs.comment != None] + newparam += ['', ' --home-dir %s' % uaargs.home_dir][uaargs.home_dir != None] + newparam += ['', ' --expiredata %s' % uaargs.expiredate][uaargs.expiredate != None] + newparam += ['', ' --inactive %s' % uaargs.inactive][uaargs.inactive != None] + newparam += ['', ' --gid %s' % uaargs.gid][uaargs.gid != None] + newparam += ['', ' --groups %s' % uaargs.groups][uaargs.groups != None] + newparam += ['', ' --skel %s' % uaargs.skel][uaargs.skel != None] + newparam += ['', ' --key %s' % uaargs.key][uaargs.key != None] + newparam += ['', ' --no-log-init'][uaargs.no_log_init] + newparam += ['', ' --create-home'][uaargs.create_home] + newparam += ['', ' --no-create-home'][uaargs.no_create_home] + newparam += ['', ' --no-user-group'][uaargs.no_user_group] + newparam += ['', ' --non-unique'][uaargs.non_unique] + newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None] + newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None] + newparam += ['', ' --system'][uaargs.system] + newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None] + newparam += ['', ' --uid %s' % uaargs.uid][uaargs.uid != None] + newparam += ['', ' --user-group'][uaargs.user_group] + newparam += ' %s' % uaargs.LOGIN + + newparams.append(newparam) + + return " ;".join(newparams).strip() + + # We parse and rewrite the groupadd components + def rewrite_groupadd(params): + # The following comes from --help on groupadd from shadow + parser = myArgumentParser(prog='groupadd') + parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true") + parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group") + parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults") + parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true") + parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group") + parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into") + parser.add_argument("-r", "--system", help="create a system account", action="store_true") + parser.add_argument("GROUP", help="Group name of the new group") + + # Return a list of configuration files based on either the default + # files/group or the contents of USERADD_GID_TABLES + # paths are resulved via BBPATH + def get_group_list(d): + str = "" + bbpath = d.getVar('BBPATH', True) + group_tables = d.getVar('USERADD_GID_TABLES', True) + if not group_tables: + group_tables = 'files/group' + for conf_file in group_tables.split(): + str += " %s" % bb.utils.which(bbpath, conf_file) + return str + + newparams = [] + for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params): + param = param.strip() + if not param: + continue + try: + # If we're processing multiple lines, we could have left over values here... + gaargs = parser.parse_args(re.split('''[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param)) + except: + raise bb.build.FuncFailed("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param)) + + # Need to iterate over layers and open the right file(s) + # Use the standard group layout: + # groupname:password:group_id:group_members + # + # If a field is left blank, the original value will be used. The 'groupname' field + # is required. + # + # Note: similar to the passwd file, the 'password' filed is ignored + # Note: group_members is ignored, group members must be configured with the GROUPMEMS_PARAM + for conf in get_group_list(d).split()[::-1]: + if os.path.exists(conf): + f = open(conf, "r") + for line in f: + if line.startswith('#'): + continue + field = line.rstrip().split(":") + if field[0] == gaargs.GROUP and field[2]: + if gaargs.gid and (gaargs.gid != field[2]): + bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), gaargs.GROUP, gaargs.gid, field[2])) + gaargs.gid = field[2] + break + + if d.getVar('USERADD_ERROR_DYNAMIC', True) == '1' and not (gaargs.gid and gaargs.gid.isdigit()): + #bb.error("Skipping recipe %s, package %s which adds groupname %s does not have a static gid defined." % (d.getVar('PN', True), pkg, gaargs.GROUP)) + raise bb.build.FuncFailed("%s - %s: Groupname %s does not have a static gid defined." % (d.getVar('PN', True), pkg, gaargs.GROUP)) + + # Reconstruct the args... + newparam = ['', ' --force'][gaargs.force] + newparam += ['', ' --gid %s' % gaargs.gid][gaargs.gid != None] + newparam += ['', ' --key %s' % gaargs.key][gaargs.key != None] + newparam += ['', ' --non-unique'][gaargs.non_unique] + newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None] + newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None] + newparam += ['', ' --system'][gaargs.system] + newparam += ' %s' % gaargs.GROUP + + newparams.append(newparam) + + return " ;".join(newparams).strip() + + # Load and process the users and groups, rewriting the adduser/addgroup params + useradd_packages = d.getVar('USERADD_PACKAGES', True) + + for pkg in useradd_packages.split(): + # Groupmems doesn't have anything we might want to change, so simply validating + # is a bit of a waste -- only process useradd/groupadd + useradd_param = d.getVar('USERADD_PARAM_%s' % pkg, True) + if useradd_param: + #bb.warn("Before: 'USERADD_PARAM_%s' - '%s'" % (pkg, useradd_param)) + d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param)) + #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg, True))) + + groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg, True) + if groupadd_param: + #bb.warn("Before: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, groupadd_param)) + d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param)) + #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg, True))) + + + +python __anonymous() { + if not bb.data.inherits_class('nativesdk', d) \ + and not bb.data.inherits_class('native', d): + try: + update_useradd_static_config(d) + except bb.build.FuncFailed as f: + bb.debug(1, "Skipping recipe %s: %s" % (d.getVar('PN', True), f)) + raise bb.parse.SkipPackage(f) +} diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass new file mode 100644 index 0000000000..0b9a843b24 --- /dev/null +++ b/meta/classes/useradd.bbclass @@ -0,0 +1,213 @@ +inherit useradd_base + +# base-passwd-cross provides the default passwd and group files in the +# target sysroot, and shadow -native and -sysroot provide the utilities +# and support files needed to add and modify user and group accounts +DEPENDS_append = "${USERADDDEPENDS}" +USERADDDEPENDS = " base-files base-passwd shadow-native shadow-sysroot shadow" +USERADDDEPENDS_class-cross = "" +USERADDDEPENDS_class-native = "" +USERADDDEPENDS_class-nativesdk = "" + +# This preinstall function can be run in four different contexts: +# +# a) Before do_install +# b) At do_populate_sysroot_setscene when installing from sstate packages +# c) As the preinst script in the target package at do_rootfs time +# d) As the preinst script in the target package on device as a package upgrade +# +useradd_preinst () { +OPT="" +SYSROOT="" + +if test "x$D" != "x"; then + # Installing into a sysroot + SYSROOT="$D" + OPT="--root $D" + # user/group lookups should match useradd/groupadd --root + export PSEUDO_PASSWD="$SYSROOT:${STAGING_DIR_NATIVE}" +fi + +# If we're not doing a special SSTATE/SYSROOT install +# then set the values, otherwise use the environment +if test "x$UA_SYSROOT" = "x"; then + # Installing onto a target + # Add groups and users defined only for this package + GROUPADD_PARAM="${GROUPADD_PARAM}" + USERADD_PARAM="${USERADD_PARAM}" + GROUPMEMS_PARAM="${GROUPMEMS_PARAM}" +fi + +# Perform group additions first, since user additions may depend +# on these groups existing +if test "x$GROUPADD_PARAM" != "x"; then + echo "Running groupadd commands..." + # Invoke multiple instances of groupadd for parameter lists + # separated by ';' + opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1` + remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2-` + while test "x$opts" != "x"; do + perform_groupadd "$SYSROOT" "$OPT $opts" 10 + if test "x$opts" = "x$remaining"; then + break + fi + opts=`echo "$remaining" | cut -d ';' -f 1` + remaining=`echo "$remaining" | cut -d ';' -f 2-` + done +fi + +if test "x$USERADD_PARAM" != "x"; then + echo "Running useradd commands..." + # Invoke multiple instances of useradd for parameter lists + # separated by ';' + opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1` + remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2-` + while test "x$opts" != "x"; do + perform_useradd "$SYSROOT" "$OPT $opts" 10 + if test "x$opts" = "x$remaining"; then + break + fi + opts=`echo "$remaining" | cut -d ';' -f 1` + remaining=`echo "$remaining" | cut -d ';' -f 2-` + done +fi + +if test "x$GROUPMEMS_PARAM" != "x"; then + echo "Running groupmems commands..." + # Invoke multiple instances of groupmems for parameter lists + # separated by ';' + opts=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 1` + remaining=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 2-` + while test "x$opts" != "x"; do + perform_groupmems "$SYSROOT" "$OPT $opts" 10 + if test "x$opts" = "x$remaining"; then + break + fi + opts=`echo "$remaining" | cut -d ';' -f 1` + remaining=`echo "$remaining" | cut -d ';' -f 2-` + done +fi +} + +useradd_sysroot () { + # Pseudo may (do_install) or may not (do_populate_sysroot_setscene) be running + # at this point so we're explicit about the environment so pseudo can load if + # not already present. + export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir}/pseudo" + + # Explicitly set $D since it isn't set to anything + # before do_install + D=${STAGING_DIR_TARGET} + + # Add groups and users defined for all recipe packages + GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}" + USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}" + GROUPMEMS_PARAM="${@get_all_cmd_params(d, 'groupmems')}" + + # Tell the system to use the environment vars + UA_SYSROOT=1 + + useradd_preinst +} + +useradd_sysroot_sstate () { + if [ "${BB_CURRENTTASK}" = "package_setscene" -o "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ] + then + useradd_sysroot + fi +} + +do_install[prefuncs] += "${SYSROOTFUNC}" +SYSROOTFUNC = "useradd_sysroot" +SYSROOTFUNC_class-cross = "" +SYSROOTFUNC_class-native = "" +SYSROOTFUNC_class-nativesdk = "" +SSTATEPREINSTFUNCS += "${SYSROOTPOSTFUNC}" +SYSROOTPOSTFUNC = "useradd_sysroot_sstate" +SYSROOTPOSTFUNC_class-cross = "" +SYSROOTPOSTFUNC_class-native = "" +SYSROOTPOSTFUNC_class-nativesdk = "" + +USERADDSETSCENEDEPS = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene" +USERADDSETSCENEDEPS_class-cross = "" +USERADDSETSCENEDEPS_class-native = "" +USERADDSETSCENEDEPS_class-nativesdk = "" +do_package_setscene[depends] += "${USERADDSETSCENEDEPS}" +do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}" + +# Recipe parse-time sanity checks +def update_useradd_after_parse(d): + useradd_packages = d.getVar('USERADD_PACKAGES', True) + + if not useradd_packages: + raise bb.build.FuncFailed("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE')) + + for pkg in useradd_packages.split(): + if not d.getVar('USERADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg, True): + bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE'), pkg)) + +python __anonymous() { + if not bb.data.inherits_class('nativesdk', d) \ + and not bb.data.inherits_class('native', d): + update_useradd_after_parse(d) +} + +# Return a single [GROUP|USER]ADD_PARAM formatted string which includes the +# [group|user]add parameters for all USERADD_PACKAGES in this recipe +def get_all_cmd_params(d, cmd_type): + import string + + param_type = cmd_type.upper() + "_PARAM_%s" + params = [] + + useradd_packages = d.getVar('USERADD_PACKAGES', True) or "" + for pkg in useradd_packages.split(): + param = d.getVar(param_type % pkg, True) + if param: + params.append(param) + + return "; ".join(params) + +# Adds the preinst script into generated packages +fakeroot python populate_packages_prepend () { + def update_useradd_package(pkg): + bb.debug(1, 'adding user/group calls to preinst for %s' % pkg) + + """ + useradd preinst is appended here because pkg_preinst may be + required to execute on the target. Not doing so may cause + useradd preinst to be invoked twice, causing unwanted warnings. + """ + preinst = d.getVar('pkg_preinst_%s' % pkg, True) or d.getVar('pkg_preinst', True) + if not preinst: + preinst = '#!/bin/sh\n' + preinst += 'bbnote () {\n%s}\n' % d.getVar('bbnote', True) + preinst += 'bbwarn () {\n%s}\n' % d.getVar('bbwarn', True) + preinst += 'bbfatal () {\n%s}\n' % d.getVar('bbfatal', True) + preinst += 'perform_groupadd () {\n%s}\n' % d.getVar('perform_groupadd', True) + preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd', True) + preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems', True) + preinst += d.getVar('useradd_preinst', True) + d.setVar('pkg_preinst_%s' % pkg, preinst) + + # RDEPENDS setup + rdepends = d.getVar("RDEPENDS_%s" % pkg, True) or "" + rdepends += ' ' + d.getVar('MLPREFIX') + 'base-passwd' + rdepends += ' ' + d.getVar('MLPREFIX') + 'shadow' + # base-files is where the default /etc/skel is packaged + rdepends += ' ' + d.getVar('MLPREFIX') + 'base-files' + d.setVar("RDEPENDS_%s" % pkg, rdepends) + + # Add the user/group preinstall scripts and RDEPENDS requirements + # to packages specified by USERADD_PACKAGES + if not bb.data.inherits_class('nativesdk', d) \ + and not bb.data.inherits_class('native', d): + useradd_packages = d.getVar('USERADD_PACKAGES', True) or "" + for pkg in useradd_packages.split(): + update_useradd_package(pkg) +} + +# Use the following to extend the useradd with custom functions +USERADDEXTENSION ?= "" + +inherit ${USERADDEXTENSION} diff --git a/meta/classes/useradd_base.bbclass b/meta/classes/useradd_base.bbclass new file mode 100644 index 0000000000..c47b1eb810 --- /dev/null +++ b/meta/classes/useradd_base.bbclass @@ -0,0 +1,230 @@ +# This bbclass provides basic functionality for user/group settings. +# This bbclass is intended to be inherited by useradd.bbclass and +# extrausers.bbclass. + +# The following functions basically have similar logic. +# *) Perform necessary checks before invoking the actual command +# *) Invoke the actual command, make retries if necessary +# *) Error out if an error occurs. + +# Note that before invoking these functions, make sure the global variable +# PSEUDO is set up correctly. + +perform_groupadd () { + local rootdir="$1" + local opts="$2" + local retries="$3" + bbnote "Performing groupadd with [$opts] and $retries times of retry" + local groupname=`echo "$opts" | awk '{ print $NF }'` + local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`" + if test "x$group_exists" = "x"; then + local count=0 + while true; do + eval $PSEUDO groupadd $opts || true + group_exists="`grep "^$groupname:" $rootdir/etc/group || true`" + if test "x$group_exists" = "x"; then + bbwarn "groupadd command did not succeed. Retrying..." + else + break + fi + count=`expr $count + 1` + if test $count = $retries; then + bbfatal "Tried running groupadd command $retries times without scucess, giving up" + fi + sleep $count + done + else + bbwarn "group $groupname already exists, not re-creating it" + fi +} + +perform_useradd () { + local rootdir="$1" + local opts="$2" + local retries="$3" + bbnote "Performing useradd with [$opts] and $retries times of retry" + local username=`echo "$opts" | awk '{ print $NF }'` + local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`" + if test "x$user_exists" = "x"; then + local count=0 + while true; do + eval $PSEUDO useradd $opts || true + user_exists="`grep "^$username:" $rootdir/etc/passwd || true`" + if test "x$user_exists" = "x"; then + bbwarn "useradd command did not succeed. Retrying..." + else + break + fi + count=`expr $count + 1` + if test $count = $retries; then + bbfatal "Tried running useradd command $retries times without scucess, giving up" + fi + sleep $count + done + else + bbwarn "user $username already exists, not re-creating it" + fi +} + +perform_groupmems () { + local rootdir="$1" + local opts="$2" + local retries="$3" + bbnote "Performing groupmems with [$opts] and $retries times of retry" + local groupname=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-g" || $i == "--group") print $(i+1) }'` + local username=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-a" || $i == "--add") print $(i+1) }'` + bbnote "Running groupmems command with group $groupname and user $username" + # groupmems fails if /etc/gshadow does not exist + local gshadow="" + if [ -f $rootdir${sysconfdir}/gshadow ]; then + gshadow="yes" + else + gshadow="no" + touch $rootdir${sysconfdir}/gshadow + fi + local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`" + if test "x$mem_exists" = "x"; then + local count=0 + while true; do + eval $PSEUDO groupmems $opts || true + mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`" + if test "x$mem_exists" = "x"; then + bbwarn "groupmems command did not succeed. Retrying..." + else + break + fi + count=`expr $count + 1` + if test $count = $retries; then + if test "x$gshadow" = "xno"; then + rm -f $rootdir${sysconfdir}/gshadow + rm -f $rootdir${sysconfdir}/gshadow- + fi + bbfatal "Tried running groupmems command $retries times without scucess, giving up" + fi + sleep $count + done + else + bbwarn "group $groupname already contains $username, not re-adding it" + fi + if test "x$gshadow" = "xno"; then + rm -f $rootdir${sysconfdir}/gshadow + rm -f $rootdir${sysconfdir}/gshadow- + fi +} + +perform_groupdel () { + local rootdir="$1" + local opts="$2" + local retries="$3" + bbnote "Performing groupdel with [$opts] and $retries times of retry" + local groupname=`echo "$opts" | awk '{ print $NF }'` + local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`" + if test "x$group_exists" != "x"; then + local count=0 + while true; do + eval $PSEUDO groupdel $opts || true + group_exists="`grep "^$groupname:" $rootdir/etc/group || true`" + if test "x$group_exists" != "x"; then + bbwarn "groupdel command did not succeed. Retrying..." + else + break + fi + count=`expr $count + 1` + if test $count = $retries; then + bbfatal "Tried running groupdel command $retries times without scucess, giving up" + fi + sleep $count + done + else + bbwarn "group $groupname doesn't exist, not removing it" + fi +} + +perform_userdel () { + local rootdir="$1" + local opts="$2" + local retries="$3" + bbnote "Performing userdel with [$opts] and $retries times of retry" + local username=`echo "$opts" | awk '{ print $NF }'` + local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`" + if test "x$user_exists" != "x"; then + local count=0 + while true; do + eval $PSEUDO userdel $opts || true + user_exists="`grep "^$username:" $rootdir/etc/passwd || true`" + if test "x$user_exists" != "x"; then + bbwarn "userdel command did not succeed. Retrying..." + else + break + fi + count=`expr $count + 1` + if test $count = $retries; then + bbfatal "Tried running userdel command $retries times without scucess, giving up" + fi + sleep $count + done + else + bbwarn "user $username doesn't exist, not removing it" + fi +} + +perform_groupmod () { + # Other than the return value of groupmod, there's no simple way to judge whether the command + # succeeds, so we disable -e option temporarily + set +e + local rootdir="$1" + local opts="$2" + local retries="$3" + bbnote "Performing groupmod with [$opts] and $retries times of retry" + local groupname=`echo "$opts" | awk '{ print $NF }'` + local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`" + if test "x$group_exists" != "x"; then + local count=0 + while true; do + eval $PSEUDO groupmod $opts + if test $? != 0; then + bbwarn "groupmod command did not succeed. Retrying..." + else + break + fi + count=`expr $count + 1` + if test $count = $retries; then + bbfatal "Tried running groupmod command $retries times without scucess, giving up" + fi + sleep $count + done + else + bbwarn "group $groupname doesn't exist, unable to modify it" + fi + set -e +} + +perform_usermod () { + # Same reason with groupmod, temporarily disable -e option + set +e + local rootdir="$1" + local opts="$2" + local retries="$3" + bbnote "Performing usermod with [$opts] and $retries times of retry" + local username=`echo "$opts" | awk '{ print $NF }'` + local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`" + if test "x$user_exists" != "x"; then + local count=0 + while true; do + eval $PSEUDO usermod $opts + if test $? != 0; then + bbwarn "usermod command did not succeed. Retrying..." + else + break + fi + count=`expr $count + 1` + if test $count = $retries; then + bbfatal "Tried running usermod command $retries times without scucess, giving up" + fi + sleep $count + done + else + bbwarn "user $username doesn't exist, unable to modify it" + fi + set -e +} diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass new file mode 100644 index 0000000000..1792f18e8c --- /dev/null +++ b/meta/classes/utility-tasks.bbclass @@ -0,0 +1,69 @@ +addtask listtasks +do_listtasks[nostamp] = "1" +python do_listtasks() { + taskdescs = {} + maxlen = 0 + for e in d.keys(): + if d.getVarFlag(e, 'task'): + maxlen = max(maxlen, len(e)) + if e.endswith('_setscene'): + desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc') or '') + else: + desc = d.getVarFlag(e, 'doc') or '' + taskdescs[e] = desc + + tasks = sorted(taskdescs.keys()) + for taskname in tasks: + bb.plain("%s %s" % (taskname.ljust(maxlen), taskdescs[taskname])) +} + +CLEANFUNCS ?= "" + +T_task-clean = "${LOG_DIR}/cleanlogs/${PN}" +addtask clean +do_clean[nostamp] = "1" +python do_clean() { + """clear the build and temp directories""" + dir = d.expand("${WORKDIR}") + bb.note("Removing " + dir) + oe.path.remove(dir) + + dir = "%s.*" % bb.data.expand(d.getVar('STAMP'), d) + bb.note("Removing " + dir) + oe.path.remove(dir) + + for f in (d.getVar('CLEANFUNCS', True) or '').split(): + bb.build.exec_func(f, d) +} + +addtask checkuri +do_checkuri[nostamp] = "1" +python do_checkuri() { + src_uri = (d.getVar('SRC_URI', True) or "").split() + if len(src_uri) == 0: + return + + localdata = bb.data.createCopy(d) + bb.data.update_data(localdata) + + try: + fetcher = bb.fetch2.Fetch(src_uri, localdata) + fetcher.checkstatus() + except bb.fetch2.BBFetchException, e: + raise bb.build.FuncFailed(e) +} + +addtask checkuriall after do_checkuri +do_checkuriall[recrdeptask] = "do_checkuriall do_checkuri" +do_checkuriall[recideptask] = "do_${BB_DEFAULT_TASK}" +do_checkuriall[nostamp] = "1" +do_checkuriall() { + : +} + +addtask fetchall after do_fetch +do_fetchall[recrdeptask] = "do_fetchall do_fetch" +do_fetchall[recideptask] = "do_${BB_DEFAULT_TASK}" +do_fetchall() { + : +} diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass new file mode 100644 index 0000000000..80e90e8777 --- /dev/null +++ b/meta/classes/utils.bbclass @@ -0,0 +1,379 @@ +# For compatibility +def base_path_join(a, *p): + return oe.path.join(a, *p) + +def base_path_relative(src, dest): + return oe.path.relative(src, dest) + +def base_path_out(path, d): + return oe.path.format_display(path, d) + +def base_read_file(filename): + return oe.utils.read_file(filename) + +def base_ifelse(condition, iftrue = True, iffalse = False): + return oe.utils.ifelse(condition, iftrue, iffalse) + +def base_conditional(variable, checkvalue, truevalue, falsevalue, d): + return oe.utils.conditional(variable, checkvalue, truevalue, falsevalue, d) + +def base_less_or_equal(variable, checkvalue, truevalue, falsevalue, d): + return oe.utils.less_or_equal(variable, checkvalue, truevalue, falsevalue, d) + +def base_version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d): + return oe.utils.version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d) + +def base_contains(variable, checkvalues, truevalue, falsevalue, d): + return bb.utils.contains(variable, checkvalues, truevalue, falsevalue, d) + +def base_both_contain(variable1, variable2, checkvalue, d): + return oe.utils.both_contain(variable1, variable2, checkvalue, d) + +def base_prune_suffix(var, suffixes, d): + return oe.utils.prune_suffix(var, suffixes, d) + +def oe_filter(f, str, d): + return oe.utils.str_filter(f, str, d) + +def oe_filter_out(f, str, d): + return oe.utils.str_filter_out(f, str, d) + +def machine_paths(d): + """List any existing machine specific filespath directories""" + machine = d.getVar("MACHINE", True) + filespathpkg = d.getVar("FILESPATHPKG", True).split(":") + for basepath in d.getVar("FILESPATHBASE", True).split(":"): + for pkgpath in filespathpkg: + machinepath = os.path.join(basepath, pkgpath, machine) + if os.path.isdir(machinepath): + yield machinepath + +def is_machine_specific(d): + """Determine whether the current recipe is machine specific""" + machinepaths = set(machine_paths(d)) + srcuri = d.getVar("SRC_URI", True).split() + for url in srcuri: + fetcher = bb.fetch2.Fetch([srcuri], d) + if url.startswith("file://"): + if any(fetcher.localpath(url).startswith(mp + "/") for mp in machinepaths): + return True + +oe_soinstall() { + # Purpose: Install shared library file and + # create the necessary links + # Example: + # + # oe_ + # + #bbnote installing shared library $1 to $2 + # + libname=`basename $1` + install -m 755 $1 $2/$libname + sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'` + solink=`echo $libname | sed -e 's/\.so\..*/.so/'` + ln -sf $libname $2/$sonamelink + ln -sf $libname $2/$solink +} + +oe_libinstall() { + # Purpose: Install a library, in all its forms + # Example + # + # oe_libinstall libltdl ${STAGING_LIBDIR}/ + # oe_libinstall -C src/libblah libblah ${D}/${libdir}/ + dir="" + libtool="" + silent="" + require_static="" + require_shared="" + staging_install="" + while [ "$#" -gt 0 ]; do + case "$1" in + -C) + shift + dir="$1" + ;; + -s) + silent=1 + ;; + -a) + require_static=1 + ;; + -so) + require_shared=1 + ;; + -*) + bbfatal "oe_libinstall: unknown option: $1" + ;; + *) + break; + ;; + esac + shift + done + + libname="$1" + shift + destpath="$1" + if [ -z "$destpath" ]; then + bbfatal "oe_libinstall: no destination path specified" + fi + if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null + then + staging_install=1 + fi + + __runcmd () { + if [ -z "$silent" ]; then + echo >&2 "oe_libinstall: $*" + fi + $* + } + + if [ -z "$dir" ]; then + dir=`pwd` + fi + + dotlai=$libname.lai + + # Sanity check that the libname.lai is unique + number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l` + if [ $number_of_files -gt 1 ]; then + bbfatal "oe_libinstall: $dotlai is not unique in $dir" + fi + + + dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"` + olddir=`pwd` + __runcmd cd $dir + + lafile=$libname.la + + # If such file doesn't exist, try to cut version suffix + if [ ! -f "$lafile" ]; then + libname1=`echo "$libname" | sed 's/-[0-9.]*$//'` + lafile1=$libname.la + if [ -f "$lafile1" ]; then + libname=$libname1 + lafile=$lafile1 + fi + fi + + if [ -f "$lafile" ]; then + # libtool archive + eval `cat $lafile|grep "^library_names="` + libtool=1 + else + library_names="$libname.so* $libname.dll.a $libname.*.dylib" + fi + + __runcmd install -d $destpath/ + dota=$libname.a + if [ -f "$dota" -o -n "$require_static" ]; then + rm -f $destpath/$dota + __runcmd install -m 0644 $dota $destpath/ + fi + if [ -f "$dotlai" -a -n "$libtool" ]; then + rm -f $destpath/$libname.la + __runcmd install -m 0644 $dotlai $destpath/$libname.la + fi + + for name in $library_names; do + files=`eval echo $name` + for f in $files; do + if [ ! -e "$f" ]; then + if [ -n "$libtool" ]; then + bbfatal "oe_libinstall: $dir/$f not found." + fi + elif [ -L "$f" ]; then + __runcmd cp -P "$f" $destpath/ + elif [ ! -L "$f" ]; then + libfile="$f" + rm -f $destpath/$libfile + __runcmd install -m 0755 $libfile $destpath/ + fi + done + done + + if [ -z "$libfile" ]; then + if [ -n "$require_shared" ]; then + bbfatal "oe_libinstall: unable to locate shared library" + fi + elif [ -z "$libtool" ]; then + # special case hack for non-libtool .so.#.#.# links + baselibfile=`basename "$libfile"` + if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then + sonamelink=`${HOST_PREFIX}readelf -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'` + solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'` + if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then + __runcmd ln -sf $baselibfile $destpath/$sonamelink + fi + __runcmd ln -sf $baselibfile $destpath/$solink + fi + fi + + __runcmd cd "$olddir" +} + +oe_machinstall() { + # Purpose: Install machine dependent files, if available + # If not available, check if there is a default + # If no default, just touch the destination + # Example: + # $1 $2 $3 $4 + # oe_machinstall -m 0644 fstab ${D}/etc/fstab + # + # TODO: Check argument number? + # + filename=`basename $3` + dirname=`dirname $3` + + for o in `echo ${OVERRIDES} | tr ':' ' '`; do + if [ -e $dirname/$o/$filename ]; then + bbnote $dirname/$o/$filename present, installing to $4 + install $1 $2 $dirname/$o/$filename $4 + return + fi + done +# bbnote overrides specific file NOT present, trying default=$3... + if [ -e $3 ]; then + bbnote $3 present, installing to $4 + install $1 $2 $3 $4 + else + bbnote $3 NOT present, touching empty $4 + touch $4 + fi +} + +create_cmdline_wrapper () { + # Create a wrapper script where commandline options are needed + # + # These are useful to work around relocation issues, by passing extra options + # to a program + # + # Usage: create_cmdline_wrapper FILENAME + + cmd=$1 + shift + + echo "Generating wrapper script for $cmd" + + mv $cmd $cmd.real + cmdname=`basename $cmd` + cat <$cmd +#!/bin/bash +realpath=\`readlink -fn \$0\` +exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $@ "\$@" +END + chmod +x $cmd +} + +create_wrapper () { + # Create a wrapper script where extra environment variables are needed + # + # These are useful to work around relocation issues, by setting environment + # variables which point to paths in the filesystem. + # + # Usage: create_wrapper FILENAME [[VAR=VALUE]..] + + cmd=$1 + shift + + echo "Generating wrapper script for $cmd" + + mv $cmd $cmd.real + cmdname=`basename $cmd` + cat <$cmd +#!/bin/bash +realpath=\`readlink -fn \$0\` +export $@ +exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real "\$@" +END + chmod +x $cmd +} + +# Copy files/directories from $1 to $2 but using hardlinks +# (preserve symlinks) +hardlinkdir () { + from=$1 + to=$2 + (cd $from; find . -print0 | cpio --null -pdlu $to) +} + + +def check_app_exists(app, d): + app = d.expand(app) + path = d.getVar('PATH', d, True) + return bool(bb.utils.which(path, app)) + +def explode_deps(s): + return bb.utils.explode_deps(s) + +def base_set_filespath(path, d): + filespath = [] + extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "") + # Remove default flag which was used for checking + extrapaths = extrapaths.replace("__default:", "") + # Don't prepend empty strings to the path list + if extrapaths != "": + path = extrapaths.split(":") + path + # The ":" ensures we have an 'empty' override + overrides = (":" + (d.getVar("FILESOVERRIDES", True) or "")).split(":") + overrides.reverse() + for o in overrides: + for p in path: + if p != "": + filespath.append(os.path.join(p, o)) + return ":".join(filespath) + +def extend_variants(d, var, extend, delim=':'): + """Return a string of all bb class extend variants for the given extend""" + variants = [] + whole = d.getVar(var, True) or "" + for ext in whole.split(): + eext = ext.split(delim) + if len(eext) > 1 and eext[0] == extend: + variants.append(eext[1]) + return " ".join(variants) + +def multilib_pkg_extend(d, pkg): + variants = (d.getVar("MULTILIB_VARIANTS", True) or "").split() + if not variants: + return pkg + pkgs = pkg + for v in variants: + pkgs = pkgs + " " + v + "-" + pkg + return pkgs + +def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '): + """Return a string of all ${var} in all multilib tune configuration""" + values = [] + value = d.getVar(var, True) or "" + if value != "": + if need_split: + for item in value.split(delim): + values.append(item) + else: + values.append(value) + variants = d.getVar("MULTILIB_VARIANTS", True) or "" + for item in variants.split(): + localdata = bb.data.createCopy(d) + overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item + localdata.setVar("OVERRIDES", overrides) + bb.data.update_data(localdata) + value = localdata.getVar(var, True) or "" + if value != "": + if need_split: + for item in value.split(delim): + values.append(item) + else: + values.append(value) + if unique: + #we do this to keep order as much as possible + ret = [] + for value in values: + if not value in ret: + ret.append(value) + else: + ret = values + return " ".join(ret) diff --git a/meta/classes/vala.bbclass b/meta/classes/vala.bbclass new file mode 100644 index 0000000000..0b7803b251 --- /dev/null +++ b/meta/classes/vala.bbclass @@ -0,0 +1,21 @@ +# Vala has problems with multiple concurrent invocations +PARALLEL_MAKE = "" + +# Everyone needs vala-native and targets need vala, too, +# because that is where target builds look for .vapi files. +# +VALADEPENDS = "" +VALADEPENDS_class-target = "vala" +DEPENDS_append = " vala-native ${VALADEPENDS}" + +# Our patched version of Vala looks in STAGING_DATADIR for .vapi files +export STAGING_DATADIR +# Upstream Vala >= 0.11 looks in XDG_DATA_DIRS for .vapi files +export XDG_DATA_DIRS = "${STAGING_DATADIR}" + +# Package additional files +FILES_${PN}-dev += "\ + ${datadir}/vala/vapi/*.vapi \ + ${datadir}/vala/vapi/*.deps \ + ${datadir}/gir-1.0 \ +" diff --git a/meta/classes/waf.bbclass b/meta/classes/waf.bbclass new file mode 100644 index 0000000000..3a221e7082 --- /dev/null +++ b/meta/classes/waf.bbclass @@ -0,0 +1,13 @@ +waf_do_configure() { + ${S}/waf configure --prefix=${prefix} ${EXTRA_OECONF} +} + +waf_do_compile() { + ${S}/waf build ${PARALLEL_MAKE} +} + +waf_do_install() { + ${S}/waf install --destdir=${D} +} + +EXPORT_FUNCTIONS do_configure do_compile do_install -- cgit v1.2.3-54-g00ecf