summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass32
-rw-r--r--meta/classes/archive-configured-source.bbclass67
-rw-r--r--meta/classes/archive-original-source.bbclass67
-rw-r--r--meta/classes/archive-patched-source.bbclass67
-rw-r--r--meta/classes/archiver.bbclass569
-rw-r--r--meta/classes/autotools.bbclass248
-rw-r--r--meta/classes/autotools_stage.bbclass2
-rw-r--r--meta/classes/base.bbclass654
-rw-r--r--meta/classes/bin_package.bbclass36
-rw-r--r--meta/classes/binconfig.bbclass59
-rw-r--r--meta/classes/blacklist.bbclass45
-rw-r--r--meta/classes/boot-directdisk.bbclass168
-rw-r--r--meta/classes/bootimg.bbclass235
-rw-r--r--meta/classes/bugzilla.bbclass187
-rw-r--r--meta/classes/buildhistory.bbclass628
-rw-r--r--meta/classes/buildstats.bbclass282
-rw-r--r--meta/classes/ccache.bbclass8
-rw-r--r--meta/classes/chrpath.bbclass135
-rw-r--r--meta/classes/clutter.bbclass23
-rw-r--r--meta/classes/cmake.bbclass118
-rw-r--r--meta/classes/cml1.bbclass40
-rw-r--r--meta/classes/copyleft_compliance.bbclass67
-rw-r--r--meta/classes/core-image.bbclass80
-rw-r--r--meta/classes/cpan-base.bbclass51
-rw-r--r--meta/classes/cpan.bbclass55
-rw-r--r--meta/classes/cpan_build.bbclass51
-rw-r--r--meta/classes/cross-canadian.bbclass97
-rw-r--r--meta/classes/cross.bbclass80
-rw-r--r--meta/classes/crosssdk.bbclass35
-rw-r--r--meta/classes/debian.bbclass125
-rw-r--r--meta/classes/deploy.bbclass11
-rw-r--r--meta/classes/devshell.bbclass33
-rw-r--r--meta/classes/distro_features_check.bbclass28
-rw-r--r--meta/classes/distrodata.bbclass925
-rw-r--r--meta/classes/distutils-base.bbclass5
-rw-r--r--meta/classes/distutils-common-base.bbclass24
-rw-r--r--meta/classes/distutils-native-base.bbclass3
-rw-r--r--meta/classes/distutils.bbclass79
-rw-r--r--meta/classes/externalsrc.bbclass68
-rw-r--r--meta/classes/extrausers.bbclass61
-rw-r--r--meta/classes/fontcache.bbclass36
-rw-r--r--meta/classes/gconf.bbclass70
-rw-r--r--meta/classes/gettext.bbclass19
-rw-r--r--meta/classes/gnome.bbclass3
-rw-r--r--meta/classes/gnomebase.bbclass30
-rw-r--r--meta/classes/grub-efi.bbclass140
-rw-r--r--meta/classes/gsettings.bbclass37
-rw-r--r--meta/classes/gtk-doc.bbclass23
-rw-r--r--meta/classes/gtk-icon-cache.bbclass62
-rw-r--r--meta/classes/gtk-immodules-cache.bbclass83
-rw-r--r--meta/classes/gzipnative.bbclass5
-rw-r--r--meta/classes/icecc.bbclass255
-rw-r--r--meta/classes/image-empty.bbclass0
-rw-r--r--meta/classes/image-live.bbclass15
-rw-r--r--meta/classes/image-mklibs.bbclass73
-rw-r--r--meta/classes/image-prelink.bbclass35
-rw-r--r--meta/classes/image-swab.bbclass94
-rw-r--r--meta/classes/image-vmdk.bbclass32
-rw-r--r--meta/classes/image.bbclass661
-rw-r--r--meta/classes/image_types.bbclass251
-rw-r--r--meta/classes/image_types_uboot.bbclass23
-rw-r--r--meta/classes/insane.bbclass954
-rw-r--r--meta/classes/insserv.bbclass5
-rw-r--r--meta/classes/kernel-arch.bbclass58
-rw-r--r--meta/classes/kernel-module-split.bbclass185
-rw-r--r--meta/classes/kernel-yocto.bbclass413
-rw-r--r--meta/classes/kernel.bbclass474
-rw-r--r--meta/classes/lib_package.bbclass7
-rw-r--r--meta/classes/libc-common.bbclass36
-rw-r--r--meta/classes/libc-package.bbclass389
-rw-r--r--meta/classes/license.bbclass359
-rw-r--r--meta/classes/linux-kernel-base.bbclass32
-rw-r--r--meta/classes/logging.bbclass72
-rw-r--r--meta/classes/meta.bbclass4
-rw-r--r--meta/classes/metadata_scm.bbclass80
-rw-r--r--meta/classes/migrate_localcount.bbclass46
-rw-r--r--meta/classes/mime.bbclass58
-rw-r--r--meta/classes/mirrors.bbclass68
-rw-r--r--meta/classes/module-base.bbclass23
-rw-r--r--meta/classes/module.bbclass32
-rw-r--r--meta/classes/module_strip.bbclass0
-rw-r--r--meta/classes/multilib.bbclass140
-rw-r--r--meta/classes/multilib_global.bbclass47
-rw-r--r--meta/classes/multilib_header.bbclass47
-rw-r--r--meta/classes/native.bbclass163
-rw-r--r--meta/classes/nativesdk.bbclass92
-rw-r--r--meta/classes/oelint.bbclass174
-rw-r--r--meta/classes/own-mirrors.bbclass12
-rw-r--r--meta/classes/package.bbclass1983
-rw-r--r--meta/classes/package_deb.bbclass449
-rw-r--r--meta/classes/package_ipk.bbclass443
-rw-r--r--meta/classes/package_rpm.bbclass1201
-rw-r--r--meta/classes/package_tar.bbclass68
-rw-r--r--meta/classes/packagedata.bbclass26
-rw-r--r--meta/classes/packagegroup.bbclass47
-rw-r--r--meta/classes/packageinfo.bbclass22
-rw-r--r--meta/classes/patch.bbclass187
-rw-r--r--meta/classes/perlnative.bbclass3
-rw-r--r--meta/classes/pixbufcache.bbclass69
-rw-r--r--meta/classes/pkg_distribute.bbclass29
-rw-r--r--meta/classes/pkg_metainfo.bbclass22
-rw-r--r--meta/classes/pkgconfig.bbclass2
-rw-r--r--meta/classes/populate_sdk.bbclass7
-rw-r--r--meta/classes/populate_sdk_base.bbclass337
-rw-r--r--meta/classes/populate_sdk_deb.bbclass95
-rw-r--r--meta/classes/populate_sdk_ipk.bbclass80
-rw-r--r--meta/classes/populate_sdk_rpm.bbclass172
-rw-r--r--meta/classes/prexport.bbclass58
-rw-r--r--meta/classes/primport.bbclass21
-rw-r--r--meta/classes/prserv.bbclass33
-rw-r--r--meta/classes/ptest.bbclass59
-rw-r--r--meta/classes/python-dir.bbclass3
-rw-r--r--meta/classes/pythonnative.bbclass3
-rw-r--r--meta/classes/qemu.bbclass35
-rw-r--r--meta/classes/qmake2.bbclass27
-rw-r--r--meta/classes/qmake_base.bbclass119
-rw-r--r--meta/classes/qt4e.bbclass24
-rw-r--r--meta/classes/qt4x11.bbclass14
-rw-r--r--meta/classes/recipe_sanity.bbclass168
-rw-r--r--meta/classes/relocatable.bbclass7
-rw-r--r--meta/classes/rm_work.bbclass99
-rw-r--r--meta/classes/rootfs_deb.bbclass137
-rw-r--r--meta/classes/rootfs_ipk.bbclass188
-rw-r--r--meta/classes/rootfs_rpm.bbclass224
-rw-r--r--meta/classes/sanity.bbclass756
-rw-r--r--meta/classes/scons.bbclass15
-rw-r--r--meta/classes/sdl.bbclass6
-rw-r--r--meta/classes/setuptools.bbclass9
-rw-r--r--meta/classes/sip.bbclass63
-rw-r--r--meta/classes/siteconfig.bbclass33
-rw-r--r--meta/classes/siteinfo.bbclass149
-rw-r--r--meta/classes/spdx.bbclass321
-rw-r--r--meta/classes/sstate.bbclass737
-rw-r--r--meta/classes/staging.bbclass113
-rw-r--r--meta/classes/syslinux.bbclass181
-rw-r--r--meta/classes/systemd.bbclass193
-rw-r--r--meta/classes/terminal.bbclass88
-rw-r--r--meta/classes/testimage-auto.bbclass23
-rw-r--r--meta/classes/testimage.bbclass179
-rw-r--r--meta/classes/tinderclient.bbclass368
-rw-r--r--meta/classes/toolchain-scripts.bbclass188
-rw-r--r--meta/classes/typecheck.bbclass12
-rw-r--r--meta/classes/uboot-config.bbclass59
-rw-r--r--meta/classes/update-alternatives.bbclass267
-rw-r--r--meta/classes/update-rc.d.bbclass103
-rw-r--r--meta/classes/useradd.bbclass193
-rw-r--r--meta/classes/useradd_base.bbclass230
-rw-r--r--meta/classes/utility-tasks.bbclass62
-rw-r--r--meta/classes/utils.bbclass368
-rw-r--r--meta/classes/vala.bbclass18
-rw-r--r--meta/classes/waf.bbclass13
151 files changed, 22673 insertions, 0 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
new file mode 100644
index 0000000000..5e13a5b8a8
--- /dev/null
+++ b/meta/classes/allarch.bbclass
@@ -0,0 +1,32 @@
1#
2# This class is used for architecture independent recipes/data files (usally scripts)
3#
4
5# Expand STAGING_DIR_HOST since for cross-canadian/native/nativesdk, this will
6# point elsewhere after these changes.
7STAGING_DIR_HOST := "${STAGING_DIR_HOST}"
8
9PACKAGE_ARCH = "all"
10
11python () {
12 # Allow this class to be included but overridden - only set
13 # the values if we're still "all" package arch.
14 if d.getVar("PACKAGE_ARCH") == "all":
15 # No need for virtual/libc or a cross compiler
16 d.setVar("INHIBIT_DEFAULT_DEPS","1")
17
18 # Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
19 # naming anyway
20 d.setVar("TARGET_ARCH", "allarch")
21 d.setVar("TARGET_OS", "linux")
22 d.setVar("TARGET_CC_ARCH", "none")
23 d.setVar("TARGET_LD_ARCH", "none")
24 d.setVar("TARGET_AS_ARCH", "none")
25 d.setVar("PACKAGE_EXTRA_ARCHS", "")
26
27 # No need to do shared library processing or debug symbol handling
28 d.setVar("EXCLUDE_FROM_SHLIBS", "1")
29 d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
30 d.setVar("INHIBIT_PACKAGE_STRIP", "1")
31}
32
diff --git a/meta/classes/archive-configured-source.bbclass b/meta/classes/archive-configured-source.bbclass
new file mode 100644
index 0000000000..54c234406a
--- /dev/null
+++ b/meta/classes/archive-configured-source.bbclass
@@ -0,0 +1,67 @@
1# This file is for getting archiving packages with configured
2# sources(archive ${S} after configure stage), logs(archive 'temp' after
3# package_write_rpm), dump data and creating diff file(get all
4# environment variables and functions in building and mapping all
5# content in ${S} including patches to xxx.diff.gz. All archived
6# packages will be deployed in ${DEPLOY_DIR}/sources
7
8inherit archiver
9
10# Get archiving package with configured sources including patches
11addtask do_archive_configured_sources after do_configure
12
13# Get archiving package with temp(logs) and scripts(.bb and inc files)
14addtask do_archive_scripts_logs
15
16# Get dump date and create diff file
17addtask do_dumpdata_create_diff_gz
18
19python () {
20 pn = d.getVar('PN', True)
21 packaging = d.getVar('IMAGE_PKGTYPE', True)
22
23 if tar_filter(d):
24 return
25
26 d.appendVarFlag('do_dumpdata_create_diff_gz', 'depends', ' %s:do_package_write_' %pn + packaging)
27 build_deps = ' %s:do_dumpdata_create_diff_gz' %pn
28
29 if d.getVar('SOURCE_ARCHIVE_LOG_WITH_SCRIPTS', True) == 'logs_with_scripts':
30 d.appendVarFlag('do_archive_scripts_logs', 'depends', ' %s:do_package_write_' %pn + packaging)
31 build_deps += ' %s:do_archive_scripts_logs' %pn
32
33 if not not_tarball(d):
34 d.appendVarFlag('do_compile', 'depends', ' %s:do_archive_configured_sources' %pn)
35 build_deps += ' %s:do_archive_configured_sources' %pn
36
37 if bb.data.inherits_class('image', d):
38 d.appendVarFlag('do_rootfs', 'depends', build_deps)
39 else:
40 d.appendVarFlag('do_build', 'depends', build_deps)
41}
42
43ARCHIVE_SSTATE_OUTDIR = "${DEPLOY_DIR}/sources/"
44ARCHIVE_SSTATE_SCRIPTS_LOGS_INDIR = "${WORKDIR}/script-logs/"
45ARCHIVE_SSTATE_DIFFGZ_ENVDATA_INDIR = "${WORKDIR}/diffgz-envdata/"
46
47SSTATETASKS += "do_archive_scripts_logs"
48do_archive_scripts_logs[sstate-name] = "archive_scripts_logs"
49do_archive_scripts_logs[sstate-inputdirs] = "${ARCHIVE_SSTATE_SCRIPTS_LOGS_INDIR}"
50do_archive_scripts_logs[sstate-outputdirs] = "${ARCHIVE_SSTATE_OUTDIR}"
51
52python do_archive_scripts_logs_setscene () {
53 sstate_setscene(d)
54}
55
56addtask do_archive_scripts_logs_setscene
57
58SSTATETASKS += "do_dumpdata_create_diff_gz"
59do_dumpdata_create_diff_gz[sstate-name] = "dumpdata_create_diff_gz"
60do_dumpdata_create_diff_gz[sstate-inputdirs] = "${ARCHIVE_SSTATE_DIFFGZ_ENVDATA_INDIR}"
61do_dumpdata_create_diff_gz[sstate-outputdirs] = "${ARCHIVE_SSTATE_OUTDIR}"
62
63python do_dumpdata_create_diff_gz_setscene () {
64 sstate_setscene(d)
65}
66
67addtask do_dumpdata_create_diff_gz_setscene
diff --git a/meta/classes/archive-original-source.bbclass b/meta/classes/archive-original-source.bbclass
new file mode 100644
index 0000000000..7e9ed6d111
--- /dev/null
+++ b/meta/classes/archive-original-source.bbclass
@@ -0,0 +1,67 @@
1# This file is for getting archiving packages with original
2# sources(archive ${S} after unpack stage), patches, logs(archive 'temp'
3# after package_write_rpm), dump data and creating diff file(get all
4# environment variables and functions in building and mapping all
5# content in ${S} including patches to xxx.diff.gz. All archived packages
6# will be deployed in ${DEPLOY_DIR}/sources
7
8inherit archiver
9
10# Get original sources archiving package with patches
11addtask do_archive_original_sources_patches after do_unpack
12
13# Get archiving package with temp(logs) and scripts(.bb and inc files)
14addtask do_archive_scripts_logs
15
16# Get dump date and create diff file
17addtask do_dumpdata_create_diff_gz
18
19python () {
20 pn = d.getVar('PN', True)
21 packaging = d.getVar('IMAGE_PKGTYPE', True)
22
23 if tar_filter(d):
24 return
25
26 d.appendVarFlag('do_dumpdata_create_diff_gz', 'depends', ' %s:do_package_write_' %pn + packaging)
27 build_deps = ' %s:do_dumpdata_create_diff_gz' %pn
28
29 if d.getVar('SOURCE_ARCHIVE_LOG_WITH_SCRIPTS', True) == 'logs_with_scripts':
30 d.appendVarFlag('do_archive_scripts_logs', 'depends', ' %s:do_package_write_' %pn + packaging)
31 build_deps += ' %s:do_archive_scripts_logs' %pn
32
33 if not not_tarball(d):
34 d.appendVarFlag('do_patch', 'depends', ' %s:do_archive_original_sources_patches' %pn)
35 build_deps += ' %s:do_archive_original_sources_patches' %pn
36
37 if bb.data.inherits_class('image', d):
38 d.appendVarFlag('do_rootfs', 'depends', build_deps)
39 else:
40 d.appendVarFlag('do_build', 'depends', build_deps)
41}
42
43ARCHIVE_SSTATE_OUTDIR = "${DEPLOY_DIR}/sources/"
44ARCHIVE_SSTATE_SCRIPTS_LOGS_INDIR = "${WORKDIR}/script-logs/"
45ARCHIVE_SSTATE_DIFFGZ_ENVDATA_INDIR = "${WORKDIR}/diffgz-envdata/"
46
47SSTATETASKS += "do_archive_scripts_logs"
48do_archive_scripts_logs[sstate-name] = "archive_scripts_logs"
49do_archive_scripts_logs[sstate-inputdirs] = "${ARCHIVE_SSTATE_SCRIPTS_LOGS_INDIR}"
50do_archive_scripts_logs[sstate-outputdirs] = "${ARCHIVE_SSTATE_OUTDIR}"
51
52python do_archive_scripts_logs_setscene () {
53 sstate_setscene(d)
54}
55
56addtask do_archive_scripts_logs_setscene
57
58SSTATETASKS += "do_dumpdata_create_diff_gz"
59do_dumpdata_create_diff_gz[sstate-name] = "dumpdata_create_diff_gz"
60do_dumpdata_create_diff_gz[sstate-inputdirs] = "${ARCHIVE_SSTATE_DIFFGZ_ENVDATA_INDIR}"
61do_dumpdata_create_diff_gz[sstate-outputdirs] = "${ARCHIVE_SSTATE_OUTDIR}"
62
63python do_dumpdata_create_diff_gz_setscene () {
64 sstate_setscene(d)
65}
66
67addtask do_dumpdata_create_diff_gz_setscene
diff --git a/meta/classes/archive-patched-source.bbclass b/meta/classes/archive-patched-source.bbclass
new file mode 100644
index 0000000000..d8d494a45d
--- /dev/null
+++ b/meta/classes/archive-patched-source.bbclass
@@ -0,0 +1,67 @@
1# This file is for getting archiving packages with patched
2# sources(archive ${S} before do_patch stage), logs(archive 'temp' after
3# package_write_rpm), dump data and creating diff file(get all
4# environment variables and functions in building and mapping all
5# content in ${S} including patches to xxx.diff.gz. All archived
6# packages will be deployed in ${DEPLOY_DIR}/sources
7
8inherit archiver
9
10# Get archiving package with patched sources including patches
11addtask do_archive_patched_sources after do_patch
12
13# Get archiving package with logs(temp) and scripts(.bb and .inc files)
14addtask do_archive_scripts_logs
15
16# Get dump date and create diff file
17addtask do_dumpdata_create_diff_gz
18
19python () {
20 pn = d.getVar('PN', True)
21 packaging = d.getVar('IMAGE_PKGTYPE', True)
22
23 if tar_filter(d):
24 return
25
26 d.appendVarFlag('do_dumpdata_create_diff_gz', 'depends', ' %s:do_package_write_' %pn + packaging)
27 build_deps = ' %s:do_dumpdata_create_diff_gz' %pn
28
29 if d.getVar('SOURCE_ARCHIVE_LOG_WITH_SCRIPTS', True) == 'logs_with_scripts':
30 d.appendVarFlag('do_archive_scripts_logs', 'depends', ' %s:do_package_write_' %pn + packaging)
31 build_deps += ' %s:do_archive_scripts_logs' %pn
32
33 if not not_tarball(d):
34 d.appendVarFlag('do_configure', 'depends', ' %s:do_archive_patched_sources' %pn)
35 build_deps += ' %s:do_archive_patched_sources' %pn
36
37 if bb.data.inherits_class('image', d):
38 d.appendVarFlag('do_rootfs', 'depends', build_deps)
39 else:
40 d.appendVarFlag('do_build', 'depends', build_deps)
41}
42
43ARCHIVE_SSTATE_OUTDIR = "${DEPLOY_DIR}/sources/"
44ARCHIVE_SSTATE_SCRIPTS_LOGS_INDIR = "${WORKDIR}/script-logs/"
45ARCHIVE_SSTATE_DIFFGZ_ENVDATA_INDIR = "${WORKDIR}/diffgz-envdata/"
46
47SSTATETASKS += "do_archive_scripts_logs"
48do_archive_scripts_logs[sstate-name] = "archive_scripts_logs"
49do_archive_scripts_logs[sstate-inputdirs] = "${ARCHIVE_SSTATE_SCRIPTS_LOGS_INDIR}"
50do_archive_scripts_logs[sstate-outputdirs] = "${ARCHIVE_SSTATE_OUTDIR}"
51
52python do_archive_scripts_logs_setscene () {
53 sstate_setscene(d)
54}
55
56addtask do_archive_scripts_logs_setscene
57
58SSTATETASKS += "do_dumpdata_create_diff_gz"
59do_dumpdata_create_diff_gz[sstate-name] = "dumpdata_create_diff_gz"
60do_dumpdata_create_diff_gz[sstate-inputdirs] = "${ARCHIVE_SSTATE_DIFFGZ_ENVDATA_INDIR}"
61do_dumpdata_create_diff_gz[sstate-outputdirs] = "${ARCHIVE_SSTATE_OUTDIR}"
62
63python do_dumpdata_create_diff_gz_setscene () {
64 sstate_setscene(d)
65}
66
67addtask do_dumpdata_create_diff_gz_setscene
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
new file mode 100644
index 0000000000..66efe7d54b
--- /dev/null
+++ b/meta/classes/archiver.bbclass
@@ -0,0 +1,569 @@
1# ex:ts=4:sw=4:sts=4:et
2# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
3#
4# This file is used for archiving sources, patches, and logs to a
5# tarball. It also output building environment to xxx.dump.data and
6# create xxx.diff.gz to record all content in ${S} to a diff file.
7#
8
9ARCHIVE_EXCLUDE_FROM ?= ".pc autom4te.cache"
10ARCHIVE_TYPE ?= "tar srpm"
11PATCHES_ARCHIVE_WITH_SERIES = 'yes'
12SOURCE_ARCHIVE_LOG_WITH_SCRIPTS ?= '${@d.getVarFlag('ARCHIVER_MODE', 'log_type') \
13 if d.getVarFlag('ARCHIVER_MODE', 'log_type') != 'none' else 'logs_with_scripts'}'
14SOURCE_ARCHIVE_PACKAGE_TYPE ?= '${@d.getVarFlag('ARCHIVER_MODE', 'type') \
15 if d.getVarFlag('ARCHIVER_MODE', 'log_type') != 'none' else 'tar'}'
16FILTER ?= '${@d.getVarFlag('ARCHIVER_MODE', 'filter') \
17 if d.getVarFlag('ARCHIVER_MODE', 'filter')!= 'none' else 'no'}'
18
19
20COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL*'
21COPYLEFT_LICENSE_INCLUDE[type] = 'list'
22COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which include licenses'
23
24COPYLEFT_LICENSE_EXCLUDE ?= 'CLOSED Proprietary'
25COPYLEFT_LICENSE_EXCLUDE[type] = 'list'
26COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which exclude licenses'
27
28COPYLEFT_RECIPE_TYPE ?= '${@copyleft_recipe_type(d)}'
29COPYLEFT_RECIPE_TYPE[doc] = 'The "type" of the current recipe (e.g. target, native, cross)'
30
31COPYLEFT_RECIPE_TYPES ?= 'target'
32COPYLEFT_RECIPE_TYPES[type] = 'list'
33COPYLEFT_RECIPE_TYPES[doc] = 'Space separated list of recipe types to include'
34
35COPYLEFT_AVAILABLE_RECIPE_TYPES = 'target native nativesdk cross crosssdk cross-canadian'
36COPYLEFT_AVAILABLE_RECIPE_TYPES[type] = 'list'
37COPYLEFT_AVAILABLE_RECIPE_TYPES[doc] = 'Space separated list of available recipe types'
38
39def copyleft_recipe_type(d):
40 for recipe_type in oe.data.typed_value('COPYLEFT_AVAILABLE_RECIPE_TYPES', d):
41 if oe.utils.inherits(d, recipe_type):
42 return recipe_type
43 return 'target'
44
45def copyleft_should_include(d):
46 """
47 Determine if this recipe's sources should be deployed for compliance
48 """
49 import ast
50 import oe.license
51 from fnmatch import fnmatchcase as fnmatch
52
53 recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE', True)
54 if recipe_type not in oe.data.typed_value('COPYLEFT_RECIPE_TYPES', d):
55 return False, 'recipe type "%s" is excluded' % recipe_type
56
57 include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d)
58 exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d)
59
60 try:
61 is_included, reason = oe.license.is_included(d.getVar('LICENSE', True), include, exclude)
62 except oe.license.LicenseError as exc:
63 bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
64 else:
65 if is_included:
66 if reason:
67 return True, 'recipe has included licenses: %s' % ', '.join(reason)
68 else:
69 return False, 'recipe does not include a copyleft license'
70 else:
71 return False, 'recipe has excluded licenses: %s' % ', '.join(reason)
72
73def tar_filter(d):
74 """
75 Only archive the package belongs to COPYLEFT_LICENSE_INCLUDE
76 and ignore the one in COPYLEFT_LICENSE_EXCLUDE. Don't exclude any
77 packages when \"FILTER\" is \"no\"
78 """
79 if d.getVar('FILTER', True) == "yes":
80 included, reason = copyleft_should_include(d)
81 return not included
82 else:
83 return False
84
85def get_bb_inc(d):
86 """
87 create a directory "script-logs" including .bb and .inc file in ${WORKDIR}
88 """
89 import re
90 import shutil
91
92 bbinc = []
93 pat=re.compile('require\s*([^\s]*\.*)(.*)')
94 work_dir = d.getVar('WORKDIR', True)
95 bbfile = d.getVar('FILE', True)
96 bbdir = os.path.dirname(bbfile)
97 target_sys = d.getVar('TARGET_SYS', True)
98 pf = d.getVar('PF', True)
99 licenses = get_licenses(d)
100 script_logs = os.path.join(work_dir, 'script-logs/'+ target_sys + '/' + licenses + '/' + pf + '/script-logs')
101 bb_inc = os.path.join(script_logs, 'bb_inc')
102 bb.utils.mkdirhier(bb_inc)
103
104 def find_file(dir, file):
105 for root, dirs, files in os.walk(dir):
106 if file in files:
107 return os.path.join(root, file)
108
109 def get_inc (file):
110 f = open(file, 'r')
111 for line in f.readlines():
112 if 'require' not in line:
113 bbinc.append(file)
114 else:
115 try:
116 incfile = pat.match(line).group(1)
117 incfile = bb.data.expand(os.path.basename(incfile), d)
118 abs_incfile = find_file(bbdir, incfile)
119 if abs_incfile:
120 bbinc.append(abs_incfile)
121 get_inc(abs_incfile)
122 except AttributeError:
123 pass
124 get_inc(bbfile)
125 bbinc = list(set(bbinc))
126 for bbincfile in bbinc:
127 shutil.copy(bbincfile, bb_inc)
128
129 return script_logs
130
131def get_logs(d):
132 """
133 create a directory "script-logs" in ${WORKDIR}
134 """
135 work_dir = d.getVar('WORKDIR', True)
136 target_sys = d.getVar('TARGET_SYS', True)
137 pf = d.getVar('PF', True)
138 licenses = get_licenses(d)
139 script_logs = os.path.join(work_dir, 'script-logs/'+ target_sys + '/' + licenses + '/' + pf + '/script-logs')
140
141 try:
142 bb.utils.mkdirhier(os.path.join(script_logs, 'temp'))
143 oe.path.copytree(os.path.join(work_dir, 'temp'), os.path.join(script_logs, 'temp'))
144 except (IOError, AttributeError):
145 pass
146 return script_logs
147
148def get_series(d):
149 """
150 copy patches and series file to a pointed directory which will be
151 archived to tarball in ${WORKDIR}
152 """
153 import shutil
154
155 src_patches=[]
156 pf = d.getVar('PF', True)
157 work_dir = d.getVar('WORKDIR', True)
158 s = d.getVar('S', True)
159 dest = os.path.join(work_dir, pf + '-series')
160 shutil.rmtree(dest, ignore_errors=True)
161 bb.utils.mkdirhier(dest)
162
163 src_uri = d.getVar('SRC_URI', True).split()
164 fetch = bb.fetch2.Fetch(src_uri, d)
165 locals = (fetch.localpath(url) for url in fetch.urls)
166 for local in locals:
167 src_patches.append(local)
168 if not cmp(work_dir, s):
169 tmp_list = src_patches
170 else:
171 tmp_list = src_patches[1:]
172
173 for patch in tmp_list:
174 try:
175 shutil.copy(patch, dest)
176 except IOError:
177 if os.path.isdir(patch):
178 bb.utils.mkdirhier(os.path.join(dest, patch))
179 oe.path.copytree(patch, os.path.join(dest, patch))
180 return dest
181
182def get_applying_patches(d):
183 """
184 only copy applying patches to a pointed directory which will be
185 archived to tarball
186 """
187 import shutil
188
189 pf = d.getVar('PF', True)
190 work_dir = d.getVar('WORKDIR', True)
191 dest = os.path.join(work_dir, pf + '-patches')
192 shutil.rmtree(dest, ignore_errors=True)
193 bb.utils.mkdirhier(dest)
194
195 patches = src_patches(d)
196 for patch in patches:
197 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
198 if local:
199 shutil.copy(local, dest)
200 return dest
201
202def not_tarball(d):
203 """
204 packages including key words 'work-shared', 'native', 'packagegroup-' will be passed
205 """
206 workdir = d.getVar('WORKDIR', True)
207 s = d.getVar('S', True)
208 if 'work-shared' in s or 'packagegroup-' in workdir or 'native' in workdir:
209 return True
210 else:
211 return False
212
213def get_source_from_downloads(d, stage_name):
214 """
215 copy tarball of $P to $WORKDIR when this tarball exists in $DL_DIR
216 """
217 if stage_name in 'patched' 'configured':
218 return
219 pf = d.getVar('PF', True)
220 dl_dir = d.getVar('DL_DIR', True)
221 try:
222 source = os.path.join(dl_dir, os.path.basename(d.getVar('SRC_URI', True).split()[0]))
223 if os.path.exists(source) and not os.path.isdir(source):
224 return source
225 except (IndexError, OSError):
226 pass
227 return ''
228
229def do_tarball(workdir, srcdir, tarname):
230 """
231 tar "srcdir" under "workdir" to "tarname"
232 """
233 import tarfile
234
235 sav_dir = os.getcwd()
236 os.chdir(workdir)
237 if (len(os.listdir(srcdir))) != 0:
238 tar = tarfile.open(tarname, "w:gz")
239 tar.add(srcdir)
240 tar.close()
241 else:
242 tarname = ''
243 os.chdir(sav_dir)
244 return tarname
245
246def archive_sources_from_directory(d, stage_name):
247 """
248 archive sources codes tree to tarball when tarball of $P doesn't
249 exist in $DL_DIR
250 """
251
252 s = d.getVar('S', True)
253 work_dir=d.getVar('WORKDIR', True)
254 PF = d.getVar('PF', True)
255 tarname = PF + '-' + stage_name + ".tar.gz"
256
257 if os.path.exists(s) and work_dir in s:
258 try:
259 source_dir = os.path.join(work_dir, [ i for i in s.replace(work_dir, '').split('/') if i][0])
260 except IndexError:
261 if not cmp(s, work_dir):
262 return ''
263 else:
264 return ''
265 source = os.path.basename(source_dir)
266 return do_tarball(work_dir, source, tarname)
267
268def archive_sources(d, stage_name):
269 """
270 copy tarball from $DL_DIR to $WORKDIR if have tarball, archive
271 source codes tree in $WORKDIR if $P is directory instead of tarball
272 """
273 import shutil
274
275 work_dir = d.getVar('WORKDIR', True)
276 file = get_source_from_downloads(d, stage_name)
277 if file:
278 shutil.copy(file, work_dir)
279 file = os.path.basename(file)
280 else:
281 file = archive_sources_from_directory(d, stage_name)
282 return file
283
284def archive_patches(d, patchdir, series):
285 """
286 archive patches to tarball and also include series files if 'series' is True
287 """
288 import shutil
289
290 s = d.getVar('S', True)
291 work_dir = d.getVar('WORKDIR', True)
292 patch_dir = os.path.basename(patchdir)
293 tarname = patch_dir + ".tar.gz"
294 if series == 'all' and os.path.exists(os.path.join(s, 'patches/series')):
295 shutil.copy(os.path.join(s, 'patches/series'), patchdir)
296 tarname = do_tarball(work_dir, patch_dir, tarname)
297 shutil.rmtree(patchdir, ignore_errors=True)
298 return tarname
299
300def select_archive_patches(d, option):
301 """
302 select to archive all patches including non-applying and series or
303 applying patches
304 """
305 if option == "all":
306 patchdir = get_series(d)
307 elif option == "applying":
308 patchdir = get_applying_patches(d)
309 try:
310 os.rmdir(patchdir)
311 except OSError:
312 tarpatch = archive_patches(d, patchdir, option)
313 return tarpatch
314 return
315
316def archive_logs(d, logdir, bbinc=False):
317 """
318 archive logs in temp to tarball and .bb and .inc files if bbinc is True
319 """
320 import shutil
321
322 pf = d.getVar('PF', True)
323 work_dir = d.getVar('WORKDIR', True)
324 log_dir = os.path.basename(logdir)
325 tarname = pf + '-' + log_dir + ".tar.gz"
326 archive_dir = os.path.join( logdir, '..' )
327 tarname = do_tarball(archive_dir, log_dir, tarname)
328 if bbinc:
329 shutil.rmtree(logdir, ignore_errors=True)
330 return tarname
331
332def get_licenses(d):
333 """get licenses for running .bb file"""
334 import oe.license
335
336 licenses_type = d.getVar('LICENSE', True) or ""
337 lics = oe.license.is_included(licenses_type)[1:][0]
338 lice = ''
339 for lic in lics:
340 licens = d.getVarFlag('SPDXLICENSEMAP', lic)
341 if licens != None:
342 lice += licens
343 else:
344 lice += lic
345 return lice
346
347
348def move_tarball_deploy(d, tarball_list):
349 """move tarball in location to ${DEPLOY_DIR}/sources"""
350 import shutil
351
352 if tarball_list is []:
353 return
354 target_sys = d.getVar('TARGET_SYS', True)
355 pf = d.getVar('PF', True)
356 licenses = get_licenses(d)
357 work_dir = d.getVar('WORKDIR', True)
358 tar_sources = d.getVar('DEPLOY_DIR', True) + '/sources/' + target_sys + '/' + licenses + '/' + pf
359 if not os.path.exists(tar_sources):
360 bb.utils.mkdirhier(tar_sources)
361 for source in tarball_list:
362 if source:
363 if os.path.exists(os.path.join(tar_sources, source)):
364 os.remove(os.path.join(tar_sources, source))
365 shutil.move(os.path.join(work_dir, source), tar_sources)
366
367def check_archiving_type(d):
368 """check the type for archiving package('tar' or 'srpm')"""
369 if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) not in d.getVar('ARCHIVE_TYPE', True).split():
370 bb.fatal("\"SOURCE_ARCHIVE_PACKAGE_TYPE\" is \'tar\' or \'srpm\', no other types")
371
372def store_package(d, package_name):
373 """
374 store tarbablls name to file "tar-package"
375 """
376 f = open(os.path.join(d.getVar('WORKDIR', True), 'tar-package'), 'a')
377 f.write(package_name + ' ')
378 f.close()
379
380def get_package(d):
381 """
382 get tarballs name from "tar-package"
383 """
384 work_dir = (d.getVar('WORKDIR', True))
385 tarlist = os.path.join(work_dir, 'tar-package')
386 if os.path.exists(tarlist):
387 f = open(tarlist, 'r')
388 line = f.readline().rstrip('\n').split()
389 f.close()
390 return line
391 return []
392
393
394def archive_sources_patches(d, stage_name):
395 """
396 archive sources and patches to tarball. stage_name will append
397 strings ${stage_name} to ${PR} as middle name. for example,
398 zlib-1.4.6-prepatch(stage_name).tar.gz
399 """
400 import shutil
401
402 check_archiving_type(d)
403
404 source_tar_name = archive_sources(d, stage_name)
405 if stage_name == "prepatch":
406 if d.getVar('PATCHES_ARCHIVE_WITH_SERIES', True) == 'yes':
407 patch_tar_name = select_archive_patches(d, "all")
408 elif d.getVar('PATCHES_ARCHIVE_WITH_SERIES', True) == 'no':
409 patch_tar_name = select_archive_patches(d, "applying")
410 else:
411 bb.fatal("Please define 'PATCHES_ARCHIVE_WITH_SERIES' to 'yes' or 'no' ")
412 else:
413 patch_tar_name = ''
414
415 if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) != 'srpm':
416 move_tarball_deploy(d, [source_tar_name, patch_tar_name])
417 else:
418 tarlist = os.path.join(d.getVar('WORKDIR', True), 'tar-package')
419 if os.path.exists(tarlist):
420 os.remove(tarlist)
421 for package in os.path.basename(source_tar_name), patch_tar_name:
422 if package:
423 store_package(d, str(package) + ' ')
424
425def archive_scripts_logs(d):
426 """
427 archive scripts and logs. scripts include .bb and .inc files and
428 logs include stuff in "temp".
429 """
430 import shutil
431
432 work_dir = d.getVar('WORKDIR', True)
433 temp_dir = os.path.join(work_dir, 'temp')
434 source_archive_log_with_scripts = d.getVar('SOURCE_ARCHIVE_LOG_WITH_SCRIPTS', True)
435 if source_archive_log_with_scripts == 'logs_with_scripts':
436 logdir = get_logs(d)
437 logdir = get_bb_inc(d)
438 elif source_archive_log_with_scripts == 'logs':
439 logdir = get_logs(d)
440 else:
441 return
442
443 tarlog = archive_logs(d, logdir, True)
444
445 if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) == 'srpm':
446 store_package(d, tarlog)
447
448def dumpdata(d):
449 """
450 dump environment to "${P}-${PR}.showdata.dump" including all
451 kinds of variables and functions when running a task
452 """
453
454 workdir = bb.data.getVar('WORKDIR', d, 1)
455 distro = bb.data.getVar('DISTRO', d, 1)
456 s = d.getVar('S', True)
457 pf = d.getVar('PF', True)
458 target_sys = d.getVar('TARGET_SYS', True)
459 licenses = get_licenses(d)
460 dumpdir = os.path.join(workdir, 'diffgz-envdata/'+ target_sys + '/' + licenses + '/' + pf )
461 if not os.path.exists(dumpdir):
462 bb.utils.mkdirhier(dumpdir)
463
464 dumpfile = os.path.join(dumpdir, bb.data.expand("${P}-${PR}.showdata.dump", d))
465
466 bb.note("Dumping metadata into '%s'" % dumpfile)
467 f = open(dumpfile, "w")
468 # emit variables and shell functions
469 bb.data.emit_env(f, d, True)
470 # emit the metadata which isn't valid shell
471 for e in d.keys():
472 if bb.data.getVarFlag(e, 'python', d):
473 f.write("\npython %s () {\n%s}\n" % (e, bb.data.getVar(e, d, 1)))
474 f.close()
475
476def create_diff_gz(d):
477 """
478 creating .diff.gz in ${DEPLOY_DIR_SRC}/${P}-${PR}.diff.g gz for
479 mapping all content in 's' including patches to xxx.diff.gz
480 """
481 import shutil
482 import subprocess
483
484 work_dir = d.getVar('WORKDIR', True)
485 exclude_from = d.getVar('ARCHIVE_EXCLUDE_FROM', True).split()
486 pf = d.getVar('PF', True)
487 licenses = get_licenses(d)
488 target_sys = d.getVar('TARGET_SYS', True)
489 diff_dir = os.path.join(work_dir, 'diffgz-envdata/'+ target_sys + '/' + licenses + '/' + pf )
490 diff_file = os.path.join(diff_dir, bb.data.expand("${P}-${PR}.diff.gz",d))
491
492 f = open(os.path.join(work_dir,'temp/exclude-from-file'), 'a')
493 for i in exclude_from:
494 f.write(i)
495 f.write("\n")
496 f.close()
497
498 s=d.getVar('S', True)
499 distro = d.getVar('DISTRO',True) or ""
500 dest = s + '/' + distro + '/files'
501 if not os.path.exists(dest):
502 bb.utils.mkdirhier(dest)
503 for i in os.listdir(os.getcwd()):
504 if os.path.isfile(i):
505 try:
506 shutil.copy(i, dest)
507 except IOError:
508 subprocess.call('fakeroot cp -rf ' + i + " " + dest, shell=True)
509
510 bb.note("Creating .diff.gz in ${DEPLOY_DIR_SRC}/${P}-${PR}.diff.gz")
511 cmd = "LC_ALL=C TZ=UTC0 diff --exclude-from=" + work_dir + "/temp/exclude-from-file -Naur " + s + '.org' + ' ' + s + " | gzip -c > " + diff_file
512 d.setVar('DIFF', cmd + "\n")
513 d.setVarFlag('DIFF', 'func', '1')
514 bb.build.exec_func('DIFF', d)
515 shutil.rmtree(s + '.org', ignore_errors=True)
516
517# This function will run when user want to get tarball for sources and
518# patches after do_unpack
519python do_archive_original_sources_patches(){
520 archive_sources_patches(d, 'prepatch')
521}
522
523# This function will run when user want to get tarball for patched
524# sources after do_patch
525python do_archive_patched_sources(){
526 archive_sources_patches(d, 'patched')
527}
528
529# This function will run when user want to get tarball for configured
530# sources after do_configure
531python do_archive_configured_sources(){
532 archive_sources_patches(d, 'configured')
533}
534
535# This function will run when user want to get tarball for logs or both
536# logs and scripts(.bb and .inc files)
537python do_archive_scripts_logs(){
538 archive_scripts_logs(d)
539}
540
541# This function will run when user want to know what variable and
542# functions in a running task are and also can get a diff file including
543# all content a package should include.
544python do_dumpdata_create_diff_gz(){
545 dumpdata(d)
546 create_diff_gz(d)
547}
548
549# This functions prepare for archiving "linux-yocto" because this
550# package create directory 's' before do_patch instead of after
551# do_unpack. This is special control for archiving linux-yocto only.
552python do_archive_linux_yocto(){
553 s = d.getVar('S', True)
554 if 'linux-yocto' in s:
555 source_tar_name = archive_sources(d, '')
556 if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) != 'srpm':
557 move_tarball_deploy(d, [source_tar_name, ''])
558}
559do_kernel_checkout[postfuncs] += "do_archive_linux_yocto "
560
561# remove tarball for sources, patches and logs after creating srpm.
562python do_delete_tarlist(){
563 work_dir = d.getVar('WORKDIR', True)
564 tarlist = os.path.join(work_dir, 'tar-package')
565 if os.path.exists(tarlist):
566 os.remove(tarlist)
567}
568do_delete_tarlist[deptask] = "do_archive_scripts_logs"
569do_package_write_rpm[postfuncs] += "do_delete_tarlist "
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
new file mode 100644
index 0000000000..883eb06e26
--- /dev/null
+++ b/meta/classes/autotools.bbclass
@@ -0,0 +1,248 @@
1def autotools_dep_prepend(d):
2 if d.getVar('INHIBIT_AUTOTOOLS_DEPS', True):
3 return ''
4
5 pn = d.getVar('PN', True)
6 deps = ''
7
8 if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
9 return deps
10 deps += 'autoconf-native automake-native '
11
12 if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
13 deps += 'libtool-native '
14 if not bb.data.inherits_class('native', d) \
15 and not bb.data.inherits_class('nativesdk', d) \
16 and not bb.data.inherits_class('cross', d) \
17 and not d.getVar('INHIBIT_DEFAULT_DEPS', True):
18 deps += 'libtool-cross '
19
20 return deps + 'gnu-config-native '
21
22EXTRA_OEMAKE = ""
23
24DEPENDS_prepend = "${@autotools_dep_prepend(d)}"
25
26inherit siteinfo
27
28# Space separated list of shell scripts with variables defined to supply test
29# results for autoconf tests we cannot run at build time.
30export CONFIG_SITE = "${@siteinfo_get_files(d)}"
31
32acpaths = "default"
33EXTRA_AUTORECONF = "--exclude=autopoint"
34
35export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
36
37# When building tools for use at build-time it's recommended for the build
38# system to use these variables when cross-compiling.
39# (http://sources.redhat.com/autobook/autobook/autobook_270.html)
40export CPP_FOR_BUILD = "${BUILD_CPP}"
41export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}"
42
43export CC_FOR_BUILD = "${BUILD_CC}"
44export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}"
45
46export CXX_FOR_BUILD = "${BUILD_CXX}"
47export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}"
48
49export LD_FOR_BUILD = "${BUILD_LD}"
50export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}"
51
52def autotools_set_crosscompiling(d):
53 if not bb.data.inherits_class('native', d):
54 return " cross_compiling=yes"
55 return ""
56
57def append_libtool_sysroot(d):
58 # Only supply libtool sysroot option for non-native packages
59 if not bb.data.inherits_class('native', d):
60 return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
61 return ""
62
63# EXTRA_OECONF_append = "${@autotools_set_crosscompiling(d)}"
64
65CONFIGUREOPTS = " --build=${BUILD_SYS} \
66 --host=${HOST_SYS} \
67 --target=${TARGET_SYS} \
68 --prefix=${prefix} \
69 --exec_prefix=${exec_prefix} \
70 --bindir=${bindir} \
71 --sbindir=${sbindir} \
72 --libexecdir=${libexecdir} \
73 --datadir=${datadir} \
74 --sysconfdir=${sysconfdir} \
75 --sharedstatedir=${sharedstatedir} \
76 --localstatedir=${localstatedir} \
77 --libdir=${libdir} \
78 --includedir=${includedir} \
79 --oldincludedir=${oldincludedir} \
80 --infodir=${infodir} \
81 --mandir=${mandir} \
82 --disable-silent-rules \
83 ${CONFIGUREOPT_DEPTRACK} \
84 ${@append_libtool_sysroot(d)}"
85CONFIGUREOPT_DEPTRACK = "--disable-dependency-tracking"
86
87
88oe_runconf () {
89 cfgscript="${S}/configure"
90 if [ -x "$cfgscript" ] ; then
91 bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
92 set +e
93 ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"
94 if [ "$?" != "0" ]; then
95 echo "Configure failed. The contents of all config.log files follows to aid debugging"
96 find ${S} -name config.log -print -exec cat {} \;
97 bbfatal "oe_runconf failed"
98 fi
99 set -e
100 else
101 bbfatal "no configure script found at $cfgscript"
102 fi
103}
104
105AUTOTOOLS_AUXDIR ?= "${S}"
106
107CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
108
109autotools_preconfigure() {
110 if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
111 if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
112 if [ "${S}" != "${B}" ]; then
113 echo "Previously configured separate build directory detected, cleaning ${B}"
114 rm -rf ${B}
115 mkdir ${B}
116 else
117 # At least remove the .la files since automake won't automatically
118 # regenerate them even if CFLAGS/LDFLAGS are different
119 cd ${S}; find ${S} -name \*.la -delete
120 fi
121 fi
122 fi
123}
124
125autotools_postconfigure(){
126 if [ -n "${CONFIGURESTAMPFILE}" ]; then
127 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
128 fi
129}
130
131EXTRACONFFUNCS ??= ""
132
133do_configure[prefuncs] += "autotools_preconfigure ${EXTRACONFFUNCS}"
134do_configure[postfuncs] += "autotools_postconfigure"
135
136ACLOCALDIR = "${B}/aclocal-copy"
137
138autotools_copy_aclocal () {
139 # Remove any previous copy of the m4 macros
140 rm -rf ${ACLOCALDIR}/
141
142 # The aclocal directory could get modified by other processes
143 # uninstalling data from the sysroot. See Yocto #861 for details.
144 # We avoid this by taking a copy here and then files cannot disappear.
145 # We copy native first, then target. This avoids certain races since cp-noerror
146 # won't overwrite existing files.
147 mkdir -p ${ACLOCALDIR}/
148 if [ -d ${STAGING_DATADIR_NATIVE}/aclocal ]; then
149 cp-noerror ${STAGING_DATADIR_NATIVE}/aclocal/ ${ACLOCALDIR}/
150 fi
151 if [ -d ${STAGING_DATADIR}/aclocal -a "${STAGING_DATADIR_NATIVE}/aclocal" != "${STAGING_DATADIR}/aclocal" ]; then
152 cp-noerror ${STAGING_DATADIR}/aclocal/ ${ACLOCALDIR}/
153 fi
154}
155
156autotools_do_configure() {
157 # WARNING: gross hack follows:
158 # An autotools built package generally needs these scripts, however only
159 # automake or libtoolize actually install the current versions of them.
160 # This is a problem in builds that do not use libtool or automake, in the case
161 # where we -need- the latest version of these scripts. e.g. running a build
162 # for a package whose autotools are old, on an x86_64 machine, which the old
163 # config.sub does not support. Work around this by installing them manually
164 # regardless.
165 ( for ac in `find ${S} -name configure.in -o -name configure.ac`; do
166 rm -f `dirname $ac`/configure
167 done )
168 if [ -e ${S}/configure.in -o -e ${S}/configure.ac ]; then
169 olddir=`pwd`
170 cd ${S}
171 autotools_copy_aclocal
172 ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
173 if [ x"${acpaths}" = xdefault ]; then
174 acpaths=
175 for i in `find ${S} -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
176 grep -v 'acinclude.m4' | grep -v 'aclocal-copy' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
177 acpaths="$acpaths -I $i"
178 done
179 else
180 acpaths="${acpaths}"
181 fi
182 AUTOV=`automake --version |head -n 1 |sed "s/.* //;s/\.[0-9]\+$//"`
183 automake --version
184 echo "AUTOV is $AUTOV"
185 if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
186 ACLOCAL="$ACLOCAL --automake-acdir=${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
187 fi
188 # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
189 # like it was auto-generated. Work around this by blowing it away
190 # by hand, unless the package specifically asked not to run aclocal.
191 if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
192 rm -f aclocal.m4
193 fi
194 if [ -e configure.in ]; then
195 CONFIGURE_AC=configure.in
196 else
197 CONFIGURE_AC=configure.ac
198 fi
199 if grep "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
200 if grep "sed.*POTFILES" $CONFIGURE_AC >/dev/null; then
201 : do nothing -- we still have an old unmodified configure.ac
202 else
203 bbnote Executing glib-gettextize --force --copy
204 echo "no" | glib-gettextize --force --copy
205 fi
206 else if grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
207 # We'd call gettextize here if it wasn't so broken...
208 cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
209 if [ -d ${S}/po/ ]; then
210 cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
211 if [ ! -e ${S}/po/remove-potcdate.sin ]; then
212 cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
213 fi
214 fi
215 for i in gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4; do
216 for j in `find ${S} -name $i | grep -v aclocal-copy`; do
217 rm $j
218 done
219 done
220 fi
221 fi
222 mkdir -p m4
223 if grep "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
224 bbnote Executing intltoolize --copy --force --automake
225 intltoolize --copy --force --automake
226 fi
227 bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
228 ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || bbfatal "autoreconf execution failed."
229 cd $olddir
230 fi
231 if [ -e ${S}/configure ]; then
232 oe_runconf
233 else
234 bbnote "nothing to configure"
235 fi
236}
237
238autotools_do_install() {
239 oe_runmake 'DESTDIR=${D}' install
240 # Info dir listing isn't interesting at this point so remove it if it exists.
241 if [ -e "${D}${infodir}/dir" ]; then
242 rm -f ${D}${infodir}/dir
243 fi
244}
245
246inherit siteconfig
247
248EXPORT_FUNCTIONS do_configure do_install
diff --git a/meta/classes/autotools_stage.bbclass b/meta/classes/autotools_stage.bbclass
new file mode 100644
index 0000000000..b3c41e4b4d
--- /dev/null
+++ b/meta/classes/autotools_stage.bbclass
@@ -0,0 +1,2 @@
1inherit autotools
2
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
new file mode 100644
index 0000000000..dfa580c583
--- /dev/null
+++ b/meta/classes/base.bbclass
@@ -0,0 +1,654 @@
1BB_DEFAULT_TASK ?= "build"
2CLASSOVERRIDE ?= "class-target"
3
4inherit patch
5inherit staging
6
7inherit mirrors
8inherit utils
9inherit utility-tasks
10inherit metadata_scm
11inherit logging
12
13OE_IMPORTS += "os sys time oe.path oe.utils oe.data oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath"
14OE_IMPORTS[type] = "list"
15
16def oe_import(d):
17 import sys
18
19 bbpath = d.getVar("BBPATH", True).split(":")
20 sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath]
21
22 def inject(name, value):
23 """Make a python object accessible from the metadata"""
24 if hasattr(bb.utils, "_context"):
25 bb.utils._context[name] = value
26 else:
27 __builtins__[name] = value
28
29 import oe.data
30 for toimport in oe.data.typed_value("OE_IMPORTS", d):
31 imported = __import__(toimport)
32 inject(toimport.split(".", 1)[0], imported)
33
34python oe_import_eh () {
35 oe_import(e.data)
36 e.data.setVar("NATIVELSBSTRING", lsb_distro_identifier(e.data))
37}
38
39addhandler oe_import_eh
40oe_import_eh[eventmask] = "bb.event.ConfigParsed"
41
42def lsb_distro_identifier(d):
43 adjust = d.getVar('LSB_DISTRO_ADJUST', True)
44 adjust_func = None
45 if adjust:
46 try:
47 adjust_func = globals()[adjust]
48 except KeyError:
49 pass
50 return oe.lsb.distro_identifier(adjust_func)
51
52die() {
53 bbfatal "$*"
54}
55
56oe_runmake() {
57 bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
58 ${MAKE} ${EXTRA_OEMAKE} "$@" || die "oe_runmake failed"
59}
60
61
62def base_dep_prepend(d):
63 #
64 # Ideally this will check a flag so we will operate properly in
65 # the case where host == build == target, for now we don't work in
66 # that case though.
67 #
68
69 deps = ""
70 # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
71 # we need that built is the responsibility of the patch function / class, not
72 # the application.
73 if not d.getVar('INHIBIT_DEFAULT_DEPS'):
74 if (d.getVar('HOST_SYS', True) != d.getVar('BUILD_SYS', True)):
75 deps += " virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc "
76 return deps
77
78BASEDEPENDS = "${@base_dep_prepend(d)}"
79
80DEPENDS_prepend="${BASEDEPENDS} "
81
82FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
83# THISDIR only works properly with imediate expansion as it has to run
84# in the context of the location its used (:=)
85THISDIR = "${@os.path.dirname(d.getVar('FILE', True))}"
86
87def extra_path_elements(d):
88 path = ""
89 elements = (d.getVar('EXTRANATIVEPATH', True) or "").split()
90 for e in elements:
91 path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
92 return path
93
94PATH_prepend = "${@extra_path_elements(d)}"
95
96addtask fetch
97do_fetch[dirs] = "${DL_DIR}"
98do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
99python base_do_fetch() {
100
101 src_uri = (d.getVar('SRC_URI', True) or "").split()
102 if len(src_uri) == 0:
103 return
104
105 localdata = bb.data.createCopy(d)
106 bb.data.update_data(localdata)
107
108 try:
109 fetcher = bb.fetch2.Fetch(src_uri, localdata)
110 fetcher.download()
111 except bb.fetch2.BBFetchException as e:
112 raise bb.build.FuncFailed(e)
113}
114
115addtask unpack after do_fetch
116do_unpack[dirs] = "${WORKDIR}"
117do_unpack[cleandirs] = "${S}/patches"
118python base_do_unpack() {
119 src_uri = (d.getVar('SRC_URI', True) or "").split()
120 if len(src_uri) == 0:
121 return
122
123 localdata = bb.data.createCopy(d)
124 bb.data.update_data(localdata)
125
126 rootdir = localdata.getVar('WORKDIR', True)
127
128 try:
129 fetcher = bb.fetch2.Fetch(src_uri, localdata)
130 fetcher.unpack(rootdir)
131 except bb.fetch2.BBFetchException as e:
132 raise bb.build.FuncFailed(e)
133}
134
135def pkgarch_mapping(d):
136 # Compatibility mappings of TUNE_PKGARCH (opt in)
137 if d.getVar("PKGARCHCOMPAT_ARMV7A", True):
138 if d.getVar("TUNE_PKGARCH", True) == "armv7a-vfp-neon":
139 d.setVar("TUNE_PKGARCH", "armv7a")
140
141def preferred_ml_updates(d):
142 # If any PREFERRED_PROVIDER or PREFERRED_VERSION are set,
143 # we need to mirror these variables in the multilib case;
144 multilibs = d.getVar('MULTILIBS', True) or ""
145 if not multilibs:
146 return
147
148 prefixes = []
149 for ext in multilibs.split():
150 eext = ext.split(':')
151 if len(eext) > 1 and eext[0] == 'multilib':
152 prefixes.append(eext[1])
153
154 versions = []
155 providers = []
156 for v in d.keys():
157 if v.startswith("PREFERRED_VERSION_"):
158 versions.append(v)
159 if v.startswith("PREFERRED_PROVIDER_"):
160 providers.append(v)
161
162 for v in versions:
163 val = d.getVar(v, False)
164 pkg = v.replace("PREFERRED_VERSION_", "")
165 if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
166 continue
167 if 'cross-canadian' in pkg:
168 for p in prefixes:
169 localdata = bb.data.createCopy(d)
170 override = ":virtclass-multilib-" + p
171 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
172 bb.data.update_data(localdata)
173 newname = localdata.expand(v)
174 if newname != v:
175 newval = localdata.expand(val)
176 d.setVar(newname, newval)
177 # Avoid future variable key expansion
178 vexp = d.expand(v)
179 if v != vexp and d.getVar(v, False):
180 d.renameVar(v, vexp)
181 continue
182 for p in prefixes:
183 newname = "PREFERRED_VERSION_" + p + "-" + pkg
184 if not d.getVar(newname, False):
185 d.setVar(newname, val)
186
187 for prov in providers:
188 val = d.getVar(prov, False)
189 pkg = prov.replace("PREFERRED_PROVIDER_", "")
190 if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
191 continue
192 if 'cross-canadian' in pkg:
193 for p in prefixes:
194 localdata = bb.data.createCopy(d)
195 override = ":virtclass-multilib-" + p
196 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
197 bb.data.update_data(localdata)
198 newname = localdata.expand(prov)
199 if newname != prov:
200 newval = localdata.expand(val)
201 d.setVar(newname, newval)
202 # Avoid future variable key expansion
203 provexp = d.expand(prov)
204 if prov != provexp and d.getVar(prov, False):
205 d.renameVar(prov, provexp)
206 continue
207 virt = ""
208 if pkg.startswith("virtual/"):
209 pkg = pkg.replace("virtual/", "")
210 virt = "virtual/"
211 for p in prefixes:
212 if pkg != "kernel":
213 val = p + "-" + val
214
215 # implement variable keys
216 localdata = bb.data.createCopy(d)
217 override = ":virtclass-multilib-" + p
218 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
219 bb.data.update_data(localdata)
220 newname = localdata.expand(prov)
221 if newname != prov and not d.getVar(newname, False):
222 d.setVar(newname, localdata.expand(val))
223
224 # implement alternative multilib name
225 newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg)
226 if not d.getVar(newname, False):
227 d.setVar(newname, val)
228 # Avoid future variable key expansion
229 provexp = d.expand(prov)
230 if prov != provexp and d.getVar(prov, False):
231 d.renameVar(prov, provexp)
232
233
234 mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
235 extramp = []
236 for p in mp:
237 if p.endswith(("-native", "-crosssdk")) or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
238 continue
239 virt = ""
240 if p.startswith("virtual/"):
241 p = p.replace("virtual/", "")
242 virt = "virtual/"
243 for pref in prefixes:
244 extramp.append(virt + pref + "-" + p)
245 d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
246
247
248def get_layers_branch_rev(d):
249 layers = (d.getVar("BBLAYERS", True) or "").split()
250 layers_branch_rev = ["%-17s = \"%s:%s\"" % (os.path.basename(i), \
251 base_get_metadata_git_branch(i, None).strip(), \
252 base_get_metadata_git_revision(i, None)) \
253 for i in layers]
254 i = len(layers_branch_rev)-1
255 p1 = layers_branch_rev[i].find("=")
256 s1 = layers_branch_rev[i][p1:]
257 while i > 0:
258 p2 = layers_branch_rev[i-1].find("=")
259 s2= layers_branch_rev[i-1][p2:]
260 if s1 == s2:
261 layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
262 i -= 1
263 else:
264 i -= 1
265 p1 = layers_branch_rev[i].find("=")
266 s1= layers_branch_rev[i][p1:]
267 return layers_branch_rev
268
269
270BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
271BUILDCFG_FUNCS[type] = "list"
272
273def buildcfg_vars(d):
274 statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
275 for var in statusvars:
276 value = d.getVar(var, True)
277 if value is not None:
278 yield '%-17s = "%s"' % (var, value)
279
280def buildcfg_neededvars(d):
281 needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
282 pesteruser = []
283 for v in needed_vars:
284 val = d.getVar(v, True)
285 if not val or val == 'INVALID':
286 pesteruser.append(v)
287
288 if pesteruser:
289 bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
290
291addhandler base_eventhandler
292base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.BuildStarted"
293python base_eventhandler() {
294 if isinstance(e, bb.event.ConfigParsed):
295 e.data.setVar('BB_VERSION', bb.__version__)
296 pkgarch_mapping(e.data)
297 preferred_ml_updates(e.data)
298 oe.utils.features_backfill("DISTRO_FEATURES", e.data)
299 oe.utils.features_backfill("MACHINE_FEATURES", e.data)
300
301 if isinstance(e, bb.event.BuildStarted):
302 localdata = bb.data.createCopy(e.data)
303 bb.data.update_data(localdata)
304 statuslines = []
305 for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
306 g = globals()
307 if func not in g:
308 bb.warn("Build configuration function '%s' does not exist" % func)
309 else:
310 flines = g[func](localdata)
311 if flines:
312 statuslines.extend(flines)
313
314 statusheader = e.data.getVar('BUILDCFG_HEADER', True)
315 bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
316}
317
318addtask configure after do_patch
319do_configure[dirs] = "${S} ${B}"
320do_configure[deptask] = "do_populate_sysroot"
321base_do_configure() {
322 :
323}
324
325addtask compile after do_configure
326do_compile[dirs] = "${S} ${B}"
327base_do_compile() {
328 if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
329 oe_runmake || die "make failed"
330 else
331 bbnote "nothing to compile"
332 fi
333}
334
335addtask install after do_compile
336do_install[dirs] = "${D} ${S} ${B}"
337# Remove and re-create ${D} so that is it guaranteed to be empty
338do_install[cleandirs] = "${D}"
339
340base_do_install() {
341 :
342}
343
344base_do_package() {
345 :
346}
347
348addtask build after do_populate_sysroot
349do_build = ""
350do_build[func] = "1"
351do_build[noexec] = "1"
352do_build[recrdeptask] += "do_deploy"
353do_build () {
354 :
355}
356
357def set_packagetriplet(d):
358 archs = []
359 tos = []
360 tvs = []
361
362 archs.append(d.getVar("PACKAGE_ARCHS", True).split())
363 tos.append(d.getVar("TARGET_OS", True))
364 tvs.append(d.getVar("TARGET_VENDOR", True))
365
366 def settriplet(d, varname, archs, tos, tvs):
367 triplets = []
368 for i in range(len(archs)):
369 for arch in archs[i]:
370 triplets.append(arch + tvs[i] + "-" + tos[i])
371 triplets.reverse()
372 d.setVar(varname, " ".join(triplets))
373
374 settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
375
376 variants = d.getVar("MULTILIB_VARIANTS", True) or ""
377 for item in variants.split():
378 localdata = bb.data.createCopy(d)
379 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
380 localdata.setVar("OVERRIDES", overrides)
381 bb.data.update_data(localdata)
382
383 archs.append(localdata.getVar("PACKAGE_ARCHS", True).split())
384 tos.append(localdata.getVar("TARGET_OS", True))
385 tvs.append(localdata.getVar("TARGET_VENDOR", True))
386
387 settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
388
389python () {
390 import string, re
391
392 # Handle PACKAGECONFIG
393 #
394 # These take the form:
395 #
396 # PACKAGECONFIG ??= "<default options>"
397 # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends"
398 pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
399 if pkgconfigflags:
400 pkgconfig = (d.getVar('PACKAGECONFIG', True) or "").split()
401 pn = d.getVar("PN", True)
402 mlprefix = d.getVar("MLPREFIX", True)
403
404 def expandFilter(appends, extension, prefix):
405 appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
406 newappends = []
407 for a in appends:
408 if a.endswith("-native") or a.endswith("-cross"):
409 newappends.append(a)
410 elif a.startswith("virtual/"):
411 subs = a.split("/", 1)[1]
412 newappends.append("virtual/" + prefix + subs + extension)
413 else:
414 if a.startswith(prefix):
415 newappends.append(a + extension)
416 else:
417 newappends.append(prefix + a + extension)
418 return newappends
419
420 def appendVar(varname, appends):
421 if not appends:
422 return
423 if varname.find("DEPENDS") != -1:
424 if pn.startswith("nativesdk-"):
425 appends = expandFilter(appends, "", "nativesdk-")
426 if pn.endswith("-native"):
427 appends = expandFilter(appends, "-native", "")
428 if mlprefix:
429 appends = expandFilter(appends, "", mlprefix)
430 varname = d.expand(varname)
431 d.appendVar(varname, " " + " ".join(appends))
432
433 extradeps = []
434 extrardeps = []
435 extraconf = []
436 for flag, flagval in pkgconfigflags.items():
437 if flag == "defaultval":
438 continue
439 items = flagval.split(",")
440 num = len(items)
441 if num > 4:
442 bb.error("Only enable,disable,depend,rdepend can be specified!")
443
444 if flag in pkgconfig:
445 if num >= 3 and items[2]:
446 extradeps.append(items[2])
447 if num >= 4 and items[3]:
448 extrardeps.append(items[3])
449 if num >= 1 and items[0]:
450 extraconf.append(items[0])
451 elif num >= 2 and items[1]:
452 extraconf.append(items[1])
453 appendVar('DEPENDS', extradeps)
454 appendVar('RDEPENDS_${PN}', extrardeps)
455 if bb.data.inherits_class('cmake', d):
456 appendVar('EXTRA_OECMAKE', extraconf)
457 else:
458 appendVar('EXTRA_OECONF', extraconf)
459
460 # If PRINC is set, try and increase the PR value by the amount specified
461 princ = d.getVar('PRINC', True)
462 if princ and princ != "0":
463 pr = d.getVar('PR', True)
464 pr_prefix = re.search("\D+",pr)
465 prval = re.search("\d+",pr)
466 if pr_prefix is None or prval is None:
467 bb.error("Unable to analyse format of PR variable: %s" % pr)
468 nval = int(prval.group(0)) + int(princ)
469 pr = pr_prefix.group(0) + str(nval) + pr[prval.end():]
470 d.setVar('PR', pr)
471
472 pn = d.getVar('PN', True)
473 license = d.getVar('LICENSE', True)
474 if license == "INVALID":
475 bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
476
477 if bb.data.inherits_class('license', d):
478 unmatched_license_flag = check_license_flags(d)
479 if unmatched_license_flag:
480 bb.debug(1, "Skipping %s because it has a restricted license not"
481 " whitelisted in LICENSE_FLAGS_WHITELIST" % pn)
482 raise bb.parse.SkipPackage("because it has a restricted license not"
483 " whitelisted in LICENSE_FLAGS_WHITELIST")
484
485 # If we're building a target package we need to use fakeroot (pseudo)
486 # in order to capture permissions, owners, groups and special files
487 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
488 d.setVarFlag('do_configure', 'umask', 022)
489 d.setVarFlag('do_compile', 'umask', 022)
490 d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
491 d.setVarFlag('do_install', 'fakeroot', 1)
492 d.setVarFlag('do_install', 'umask', 022)
493 d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
494 d.setVarFlag('do_package', 'fakeroot', 1)
495 d.setVarFlag('do_package', 'umask', 022)
496 d.setVarFlag('do_package_setscene', 'fakeroot', 1)
497 d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
498 d.setVarFlag('do_devshell', 'fakeroot', 1)
499 d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
500 source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', 0)
501 if not source_mirror_fetch:
502 need_host = d.getVar('COMPATIBLE_HOST', True)
503 if need_host:
504 import re
505 this_host = d.getVar('HOST_SYS', True)
506 if not re.match(need_host, this_host):
507 raise bb.parse.SkipPackage("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
508
509 need_machine = d.getVar('COMPATIBLE_MACHINE', True)
510 if need_machine:
511 import re
512 compat_machines = (d.getVar('MACHINEOVERRIDES', True) or "").split(":")
513 for m in compat_machines:
514 if re.match(need_machine, m):
515 break
516 else:
517 raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE', True))
518
519
520 bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
521
522 check_license = False if pn.startswith("nativesdk-") else True
523 for t in ["-native", "-cross", "-cross-initial", "-cross-intermediate",
524 "-crosssdk-intermediate", "-crosssdk", "-crosssdk-initial",
525 "-cross-canadian-" + d.getVar('TRANSLATED_TARGET_ARCH', True)]:
526 if pn.endswith(t):
527 check_license = False
528
529 if check_license and bad_licenses:
530 whitelist = []
531 for lic in bad_licenses:
532 for w in ["HOSTTOOLS_WHITELIST_", "LGPLv2_WHITELIST_", "WHITELIST_"]:
533 whitelist.extend((d.getVar(w + lic, True) or "").split())
534 spdx_license = return_spdx(d, lic)
535 if spdx_license:
536 whitelist.extend((d.getVar('HOSTTOOLS_WHITELIST_%s' % spdx_license, True) or "").split())
537 if not pn in whitelist:
538 recipe_license = d.getVar('LICENSE', True)
539 pkgs = d.getVar('PACKAGES', True).split()
540 skipped_pkgs = []
541 unskipped_pkgs = []
542 for pkg in pkgs:
543 if incompatible_license(d, bad_licenses, pkg):
544 skipped_pkgs.append(pkg)
545 else:
546 unskipped_pkgs.append(pkg)
547 all_skipped = skipped_pkgs and not unskipped_pkgs
548 if unskipped_pkgs:
549 for pkg in skipped_pkgs:
550 bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + recipe_license)
551 d.setVar('LICENSE_EXCLUSION-' + pkg, 1)
552 for pkg in unskipped_pkgs:
553 bb.debug(1, "INCLUDING the package " + pkg)
554 elif all_skipped or incompatible_license(d, bad_licenses):
555 bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, recipe_license))
556 raise bb.parse.SkipPackage("incompatible with license %s" % recipe_license)
557
558 srcuri = d.getVar('SRC_URI', True)
559 # Svn packages should DEPEND on subversion-native
560 if "svn://" in srcuri:
561 d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
562
563 # Git packages should DEPEND on git-native
564 if "git://" in srcuri:
565 d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
566
567 # Mercurial packages should DEPEND on mercurial-native
568 elif "hg://" in srcuri:
569 d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
570
571 # OSC packages should DEPEND on osc-native
572 elif "osc://" in srcuri:
573 d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
574
575 # *.xz should depends on xz-native for unpacking
576 # Not endswith because of "*.patch.xz;patch=1". Need bb.fetch.decodeurl in future
577 if '.xz' in srcuri:
578 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
579
580 # unzip-native should already be staged before unpacking ZIP recipes
581 if ".zip" in srcuri:
582 d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
583
584 set_packagetriplet(d)
585
586 # 'multimachine' handling
587 mach_arch = d.getVar('MACHINE_ARCH', True)
588 pkg_arch = d.getVar('PACKAGE_ARCH', True)
589
590 if (pkg_arch == mach_arch):
591 # Already machine specific - nothing further to do
592 return
593
594 #
595 # We always try to scan SRC_URI for urls with machine overrides
596 # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
597 #
598 override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', True)
599 if override != '0':
600 paths = []
601 fpaths = (d.getVar('FILESPATH', True) or '').split(':')
602 machine = d.getVar('MACHINE', True)
603 for p in fpaths:
604 if os.path.basename(p) == machine and os.path.isdir(p):
605 paths.append(p)
606
607 if len(paths) != 0:
608 for s in srcuri.split():
609 if not s.startswith("file://"):
610 continue
611 fetcher = bb.fetch2.Fetch([s], d)
612 local = fetcher.localpath(s)
613 for mp in paths:
614 if local.startswith(mp):
615 #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
616 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
617 return
618
619 packages = d.getVar('PACKAGES', True).split()
620 for pkg in packages:
621 pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg, True)
622
623 # We could look for != PACKAGE_ARCH here but how to choose
624 # if multiple differences are present?
625 # Look through PACKAGE_ARCHS for the priority order?
626 if pkgarch and pkgarch == mach_arch:
627 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
628 bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN", True))
629}
630
631addtask cleansstate after do_clean
632python do_cleansstate() {
633 sstate_clean_cachefiles(d)
634}
635
636addtask cleanall after do_cleansstate
637python do_cleanall() {
638 src_uri = (d.getVar('SRC_URI', True) or "").split()
639 if len(src_uri) == 0:
640 return
641
642 localdata = bb.data.createCopy(d)
643 bb.data.update_data(localdata)
644
645 try:
646 fetcher = bb.fetch2.Fetch(src_uri, localdata)
647 fetcher.clean()
648 except bb.fetch2.BBFetchException, e:
649 raise bb.build.FuncFailed(e)
650}
651do_cleanall[nostamp] = "1"
652
653
654EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
new file mode 100644
index 0000000000..a52b75be5c
--- /dev/null
+++ b/meta/classes/bin_package.bbclass
@@ -0,0 +1,36 @@
1#
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4#
5# Common variable and task for the binary package recipe.
6# Basic principle:
7# * The files have been unpacked to ${S} by base.bbclass
8# * Skip do_configure and do_compile
9# * Use do_install to install the files to ${D}
10#
11# Note:
12# The "subdir" parameter in the SRC_URI is useful when the input package
13# is rpm, ipk, deb and so on, for example:
14#
15# SRC_URI = "http://foo.com/foo-1.0-r1.i586.rpm;subdir=foo-1.0"
16#
17# Then the files would be unpacked to ${WORKDIR}/foo-1.0, otherwise
18# they would be in ${WORKDIR}.
19#
20
21# Skip the unwanted steps
22do_configure[noexec] = "1"
23do_compile[noexec] = "1"
24
25# Install the files to ${D}
26bin_package_do_install () {
27 # Do it carefully
28 [ -d "${S}" ] || exit 1
29 cd ${S} || exit 1
30 tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
31 | tar --no-same-owner -xpf - -C ${D}
32}
33
34FILES_${PN} = "/"
35
36EXPORT_FUNCTIONS do_install
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
new file mode 100644
index 0000000000..4c42602aff
--- /dev/null
+++ b/meta/classes/binconfig.bbclass
@@ -0,0 +1,59 @@
1FILES_${PN}-dev += "${bindir}/*-config"
2
3# The namespaces can clash here hence the two step replace
4def get_binconfig_mangle(d):
5 s = "-e ''"
6 if not bb.data.inherits_class('native', d):
7 optional_quote = r"\(\"\?\)"
8 s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
9 s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
10 s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
11 s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote
12 s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
13 s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
14 s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
15 s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
16 s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
17 s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
18 s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
19 s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
20 s += " -e 's:-I${WORKDIR}:-I${STAGING_INCDIR}:'"
21 s += " -e 's:-L${WORKDIR}:-L${STAGING_LIBDIR}:'"
22 if bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d):
23 s += bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d)
24
25 return s
26
27BINCONFIG_GLOB ?= "*-config"
28
29PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
30
31binconfig_package_preprocess () {
32 for config in `find ${PKGD} -name '${BINCONFIG_GLOB}'`; do
33 sed -i \
34 -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
35 -e 's:${STAGING_INCDIR}:${includedir}:g;' \
36 -e 's:${STAGING_DATADIR}:${datadir}:' \
37 -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
38 $config
39 done
40 for lafile in `find ${PKGD} -name "*.la"` ; do
41 sed -i \
42 -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
43 -e 's:${STAGING_INCDIR}:${includedir}:g;' \
44 -e 's:${STAGING_DATADIR}:${datadir}:' \
45 -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
46 $lafile
47 done
48}
49
50SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
51
52binconfig_sysroot_preprocess () {
53 for config in `find ${S} -name '${BINCONFIG_GLOB}'` `find ${B} -name '${BINCONFIG_GLOB}'`; do
54 configname=`basename $config`
55 install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
56 cat $config | sed ${@get_binconfig_mangle(d)} > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
57 chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
58 done
59}
diff --git a/meta/classes/blacklist.bbclass b/meta/classes/blacklist.bbclass
new file mode 100644
index 0000000000..a0141a82c0
--- /dev/null
+++ b/meta/classes/blacklist.bbclass
@@ -0,0 +1,45 @@
1# anonymous support class from originally from angstrom
2#
3# To use the blacklist, a distribution should include this
4# class in the INHERIT_DISTRO
5#
6# No longer use ANGSTROM_BLACKLIST, instead use a table of
7# recipes in PNBLACKLIST
8#
9# Features:
10#
11# * To add a package to the blacklist, set:
12# PNBLACKLIST[pn] = "message"
13#
14
15# Cope with PNBLACKLIST flags for multilib case
16addhandler blacklist_multilib_eventhandler
17blacklist_multilib_eventhandler[eventmask] = "bb.event.ConfigParsed"
18python blacklist_multilib_eventhandler() {
19 multilibs = e.data.getVar('MULTILIBS', True)
20 if not multilibs:
21 return
22
23 # this block has been copied from base.bbclass so keep it in sync
24 prefixes = []
25 for ext in multilibs.split():
26 eext = ext.split(':')
27 if len(eext) > 1 and eext[0] == 'multilib':
28 prefixes.append(eext[1])
29
30 blacklists = e.data.getVarFlags('PNBLACKLIST') or {}
31 for pkg, reason in blacklists.items():
32 if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in pkg:
33 continue
34 for p in prefixes:
35 newpkg = p + "-" + pkg
36 if not e.data.getVarFlag('PNBLACKLIST', newpkg, True):
37 e.data.setVarFlag('PNBLACKLIST', newpkg, reason)
38}
39
40python () {
41 blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN', True), True)
42
43 if blacklist:
44 raise bb.parse.SkipPackage("Recipe is blacklisted: %s" % (blacklist))
45}
diff --git a/meta/classes/boot-directdisk.bbclass b/meta/classes/boot-directdisk.bbclass
new file mode 100644
index 0000000000..55357283ac
--- /dev/null
+++ b/meta/classes/boot-directdisk.bbclass
@@ -0,0 +1,168 @@
1# boot-directdisk.bbclass
2# (loosly based off bootimg.bbclass Copyright (C) 2004, Advanced Micro Devices, Inc.)
3#
4# Create an image which can be placed directly onto a harddisk using dd and then
5# booted.
6#
7# This uses syslinux. extlinux would have been nice but required the ext2/3
8# partition to be mounted. grub requires to run itself as part of the install
9# process.
10#
11# The end result is a 512 boot sector populated with an MBR and partition table
12# followed by an msdos fat16 partition containing syslinux and a linux kernel
13# completed by the ext2/3 rootfs.
14#
15# We have to push the msdos parition table size > 16MB so fat 16 is used as parted
16# won't touch fat12 partitions.
17
18# External variables needed
19
20# ${ROOTFS} - the rootfs image to incorporate
21
22do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
23 syslinux:do_populate_sysroot \
24 syslinux-native:do_populate_sysroot \
25 parted-native:do_populate_sysroot \
26 mtools-native:do_populate_sysroot "
27
28PACKAGES = " "
29EXCLUDE_FROM_WORLD = "1"
30
31BOOTDD_VOLUME_ID ?= "boot"
32BOOTDD_EXTRA_SPACE ?= "16384"
33
34EFI = "${@base_contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
35EFI_CLASS = "${@base_contains("MACHINE_FEATURES", "efi", "grub-efi", "", d)}"
36
37# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
38# contain "efi". This way legacy is supported by default if neither is
39# specified, maintaining the original behavior.
40def pcbios(d):
41 pcbios = base_contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
42 if pcbios == "0":
43 pcbios = base_contains("MACHINE_FEATURES", "efi", "0", "1", d)
44 return pcbios
45
46def pcbios_class(d):
47 if d.getVar("PCBIOS", True) == "1":
48 return "syslinux"
49 return ""
50
51PCBIOS = "${@pcbios(d)}"
52PCBIOS_CLASS = "${@pcbios_class(d)}"
53
54inherit ${PCBIOS_CLASS}
55inherit ${EFI_CLASS}
56
57# Get the build_syslinux_cfg() function from the syslinux class
58
59AUTO_SYSLINUXCFG = "1"
60DISK_SIGNATURE ?= "${DISK_SIGNATURE_GENERATED}"
61SYSLINUX_ROOT ?= "root=/dev/sda2"
62SYSLINUX_TIMEOUT ?= "10"
63
64boot_direct_populate() {
65 dest=$1
66 install -d $dest
67
68 # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
69 install -m 0644 ${STAGING_KERNEL_DIR}/bzImage $dest/vmlinuz
70
71 if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
72 install -m 0644 ${INITRD} $dest/initrd
73 fi
74
75}
76
77build_boot_dd() {
78 HDDDIR="${S}/hdd/boot"
79 HDDIMG="${S}/hdd.image"
80 IMAGE=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect
81
82 boot_direct_populate $HDDDIR
83
84 if [ "${PCBIOS}" = "1" ]; then
85 syslinux_hddimg_populate $HDDDIR
86 fi
87 if [ "${EFI}" = "1" ]; then
88 grubefi_hddimg_populate $HDDDIR
89 fi
90
91 BLOCKS=`du -bks $HDDDIR | cut -f 1`
92 BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
93
94 # Ensure total sectors is an integral number of sectors per
95 # track or mcopy will complain. Sectors are 512 bytes, and we
96 # generate images with 32 sectors per track. This calculation is
97 # done in blocks, thus the mod by 16 instead of 32.
98 BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
99
100 mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C $HDDIMG $BLOCKS
101 mcopy -i $HDDIMG -s $HDDDIR/* ::/
102
103 if [ "${PCBIOS}" = "1" ]; then
104 syslinux_hdddirect_install $HDDIMG
105 fi
106 chmod 644 $HDDIMG
107
108 ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1`
109 TOTALSIZE=`expr $BLOCKS + $ROOTFSBLOCKS`
110 END1=`expr $BLOCKS \* 1024`
111 END2=`expr $END1 + 512`
112 END3=`expr \( $ROOTFSBLOCKS \* 1024 \) + $END1`
113
114 echo $ROOTFSBLOCKS $TOTALSIZE $END1 $END2 $END3
115 rm -rf $IMAGE
116 dd if=/dev/zero of=$IMAGE bs=1024 seek=$TOTALSIZE count=1
117
118 parted $IMAGE mklabel msdos
119 parted $IMAGE mkpart primary fat16 0 ${END1}B
120 parted $IMAGE unit B mkpart primary ext2 ${END2}B ${END3}B
121 parted $IMAGE set 1 boot on
122 parted $IMAGE print
123
124 awk "BEGIN { printf \"$(echo ${DISK_SIGNATURE} | fold -w 2 | tac | paste -sd '' | sed 's/\(..\)/\\x&/g')\" }" | \
125 dd of=$IMAGE bs=1 seek=440 conv=notrunc
126
127 OFFSET=`expr $END2 / 512`
128 if [ "${PCBIOS}" = "1" ]; then
129 dd if=${STAGING_DATADIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
130 fi
131 dd if=$HDDIMG of=$IMAGE conv=notrunc seek=1 bs=512
132 dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
133
134 cd ${DEPLOY_DIR_IMAGE}
135 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
136 ln -s ${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
137}
138
139python do_bootdirectdisk() {
140 validate_disk_signature(d)
141 if d.getVar("PCBIOS", True) == "1":
142 bb.build.exec_func('build_syslinux_cfg', d)
143 if d.getVar("EFI", True) == "1":
144 bb.build.exec_func('build_grub_cfg', d)
145 bb.build.exec_func('build_boot_dd', d)
146}
147
148def generate_disk_signature():
149 import uuid
150
151 signature = str(uuid.uuid4())[:8]
152
153 if signature != '00000000':
154 return signature
155 else:
156 return 'ffffffff'
157
158def validate_disk_signature(d):
159 import re
160
161 disk_signature = d.getVar("DISK_SIGNATURE", True)
162
163 if not re.match(r'^[0-9a-fA-F]{8}$', disk_signature):
164 bb.fatal("DISK_SIGNATURE '%s' must be an 8 digit hex string" % disk_signature)
165
166DISK_SIGNATURE_GENERATED := "${@generate_disk_signature()}"
167
168addtask bootdirectdisk before do_build
diff --git a/meta/classes/bootimg.bbclass b/meta/classes/bootimg.bbclass
new file mode 100644
index 0000000000..395085d0ab
--- /dev/null
+++ b/meta/classes/bootimg.bbclass
@@ -0,0 +1,235 @@
1# Copyright (C) 2004, Advanced Micro Devices, Inc. All Rights Reserved
2# Released under the MIT license (see packages/COPYING)
3
4# Creates a bootable image using syslinux, your kernel and an optional
5# initrd
6
7#
8# End result is two things:
9#
10# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
11# an initrd and a rootfs image. These can be written to harddisks directly and
12# also booted on USB flash disks (write them there with dd).
13#
14# 2. A CD .iso image
15
16# Boot process is that the initrd will boot and process which label was selected
17# in syslinux. Actions based on the label are then performed (e.g. installing to
18# an hdd)
19
20# External variables (also used by syslinux.bbclass)
21# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
22# ${COMPRESSISO} - Transparent compress ISO, reduce size ~40% if set to 1
23# ${NOISO} - skip building the ISO image if set to 1
24# ${NOHDD} - skip building the HDD image if set to 1
25# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
26
27do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
28 mtools-native:do_populate_sysroot \
29 cdrtools-native:do_populate_sysroot \
30 ${@oe.utils.ifelse(d.getVar('COMPRESSISO'),'zisofs-tools-native:do_populate_sysroot','')}"
31
32PACKAGES = " "
33EXCLUDE_FROM_WORLD = "1"
34
35HDDDIR = "${S}/hddimg"
36ISODIR = "${S}/iso"
37EFIIMGDIR = "${S}/efi_img"
38COMPACT_ISODIR = "${S}/iso.z"
39COMPRESSISO ?= "0"
40
41BOOTIMG_VOLUME_ID ?= "boot"
42BOOTIMG_EXTRA_SPACE ?= "512"
43
44EFI = "${@base_contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
45EFI_CLASS = "${@base_contains("MACHINE_FEATURES", "efi", "grub-efi", "", d)}"
46
47# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
48# contain "efi". This way legacy is supported by default if neither is
49# specified, maintaining the original behavior.
50def pcbios(d):
51 pcbios = base_contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
52 if pcbios == "0":
53 pcbios = base_contains("MACHINE_FEATURES", "efi", "0", "1", d)
54 return pcbios
55
56PCBIOS = "${@pcbios(d)}"
57
58# The syslinux is required for the isohybrid command and boot catalog
59inherit syslinux
60inherit ${EFI_CLASS}
61
62populate() {
63 DEST=$1
64 install -d ${DEST}
65
66 # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
67 install -m 0644 ${STAGING_KERNEL_DIR}/bzImage ${DEST}/vmlinuz
68
69 if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
70 install -m 0644 ${INITRD} ${DEST}/initrd
71 fi
72
73 if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
74 install -m 0644 ${ROOTFS} ${DEST}/rootfs.img
75 fi
76
77}
78
79build_iso() {
80 # Only create an ISO if we have an INITRD and NOISO was not set
81 if [ -z "${INITRD}" ] || [ ! -s "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
82 bbnote "ISO image will not be created."
83 return
84 fi
85
86 populate ${ISODIR}
87
88 if [ "${PCBIOS}" = "1" ]; then
89 syslinux_iso_populate ${ISODIR}
90 fi
91 if [ "${EFI}" = "1" ]; then
92 grubefi_iso_populate ${ISODIR}
93 build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img
94 fi
95
96 # EFI only
97 if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then
98 # Work around bug in isohybrid where it requires isolinux.bin
99 # In the boot catalog, even though it is not used
100 mkdir -p ${ISODIR}/${ISOLINUXDIR}
101 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
102 fi
103
104 if [ "${COMPRESSISO}" = "1" ] ; then
105 # create compact directory, compress iso
106 mkdir -p ${COMPACT_ISODIR}
107 mkzftree -z 9 -p 4 -F ${ISODIR}/rootfs.img ${COMPACT_ISODIR}/rootfs.img
108
109 # move compact iso to iso, then remove compact directory
110 mv ${COMPACT_ISODIR}/rootfs.img ${ISODIR}/rootfs.img
111 rm -Rf ${COMPACT_ISODIR}
112 mkisofs_compress_opts="-R -z -D -l"
113 else
114 mkisofs_compress_opts="-r"
115 fi
116
117 if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
118 # PCBIOS only media
119 mkisofs -V ${BOOTIMG_VOLUME_ID} \
120 -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
121 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
122 $mkisofs_compress_opts \
123 ${MKISOFS_OPTIONS} ${ISODIR}
124 else
125 # EFI only OR EFI+PCBIOS
126 mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
127 -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
128 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
129 $mkisofs_compress_opts ${MKISOFS_OPTIONS} \
130 -eltorito-alt-boot -eltorito-platform efi \
131 -b efi.img -no-emul-boot \
132 ${ISODIR}
133 isohybrid_args="-u"
134 fi
135
136 isohybrid $isohybrid_args ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso
137
138 cd ${DEPLOY_DIR_IMAGE}
139 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
140 ln -s ${IMAGE_NAME}.iso ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
141}
142
143build_fat_img() {
144 FATSOURCEDIR=$1
145 FATIMG=$2
146
147 # Calculate the size required for the final image including the
148 # data and filesystem overhead.
149 # Sectors: 512 bytes
150 # Blocks: 1024 bytes
151
152 # Determine the sector count just for the data
153 SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
154
155 # Account for the filesystem overhead. This includes directory
156 # entries in the clusters as well as the FAT itself.
157 # Assumptions:
158 # FAT32 (12 or 16 may be selected by mkdosfs, but the extra
159 # padding will be minimal on those smaller images and not
160 # worth the logic here to caclulate the smaller FAT sizes)
161 # < 16 entries per directory
162 # 8.3 filenames only
163
164 # 32 bytes per dir entry
165 DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
166 # 32 bytes for every end-of-directory dir entry
167 DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
168 # 4 bytes per FAT entry per sector of data
169 FAT_BYTES=$(expr $SECTORS \* 4)
170 # 4 bytes per FAT entry per end-of-cluster list
171 FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
172
173 # Use a ceiling function to determine FS overhead in sectors
174 DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
175 # There are two FATs on the image
176 FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
177 SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
178
179 # Determine the final size in blocks accounting for some padding
180 BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
181
182 # Ensure total sectors is an integral number of sectors per
183 # track or mcopy will complain. Sectors are 512 bytes, and we
184 # generate images with 32 sectors per track. This calculation is
185 # done in blocks, thus the mod by 16 instead of 32.
186 BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
187
188 # mkdosfs will sometimes use FAT16 when it is not appropriate,
189 # resulting in a boot failure from SYSLINUX. Use FAT32 for
190 # images larger than 512MB, otherwise let mkdosfs decide.
191 if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
192 FATSIZE="-F 32"
193 fi
194
195 mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} ${BLOCKS}
196 # Copy FATSOURCEDIR recursively into the image file directly
197 mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
198}
199
200build_hddimg() {
201 # Create an HDD image
202 if [ "${NOHDD}" != "1" ] ; then
203 populate ${HDDDIR}
204
205 if [ "${PCBIOS}" = "1" ]; then
206 syslinux_hddimg_populate ${HDDDIR}
207 fi
208 if [ "${EFI}" = "1" ]; then
209 grubefi_hddimg_populate ${HDDDIR}
210 fi
211
212 build_fat_img ${HDDDIR} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
213
214 if [ "${PCBIOS}" = "1" ]; then
215 syslinux_hddimg_install
216 fi
217
218 chmod 644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
219
220 cd ${DEPLOY_DIR_IMAGE}
221 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
222 ln -s ${IMAGE_NAME}.hddimg ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
223 fi
224}
225
226python do_bootimg() {
227 if d.getVar("PCBIOS", True) == "1":
228 bb.build.exec_func('build_syslinux_cfg', d)
229 if d.getVar("EFI", True) == "1":
230 bb.build.exec_func('build_grub_cfg', d)
231 bb.build.exec_func('build_hddimg', d)
232 bb.build.exec_func('build_iso', d)
233}
234
235addtask bootimg before do_build
diff --git a/meta/classes/bugzilla.bbclass b/meta/classes/bugzilla.bbclass
new file mode 100644
index 0000000000..3fc8956428
--- /dev/null
+++ b/meta/classes/bugzilla.bbclass
@@ -0,0 +1,187 @@
1#
2# Small event handler to automatically open URLs and file
3# bug reports at a bugzilla of your choiche
4# it uses XML-RPC interface, so you must have it enabled
5#
6# Before using you must define BUGZILLA_USER, BUGZILLA_PASS credentials,
7# BUGZILLA_XMLRPC - uri of xmlrpc.cgi,
8# BUGZILLA_PRODUCT, BUGZILLA_COMPONENT - a place in BTS for build bugs
9# BUGZILLA_VERSION - version against which to report new bugs
10#
11
12def bugzilla_find_bug_report(debug_file, server, args, bugname):
13 args['summary'] = bugname
14 bugs = server.Bug.search(args)
15 if len(bugs['bugs']) == 0:
16 print >> debug_file, "Bugs not found"
17 return (False,None)
18 else: # silently pick the first result
19 print >> debug_file, "Result of bug search is "
20 print >> debug_file, bugs
21 status = bugs['bugs'][0]['status']
22 id = bugs['bugs'][0]['id']
23 return (not status in ["CLOSED", "RESOLVED", "VERIFIED"],id)
24
25def bugzilla_file_bug(debug_file, server, args, name, text, version):
26 args['summary'] = name
27 args['comment'] = text
28 args['version'] = version
29 args['op_sys'] = 'Linux'
30 args['platform'] = 'Other'
31 args['severity'] = 'normal'
32 args['priority'] = 'Normal'
33 try:
34 return server.Bug.create(args)['id']
35 except Exception, e:
36 print >> debug_file, repr(e)
37 return None
38
39def bugzilla_reopen_bug(debug_file, server, args, bug_number):
40 args['ids'] = [bug_number]
41 args['status'] = "CONFIRMED"
42 try:
43 server.Bug.update(args)
44 return True
45 except Exception, e:
46 print >> debug_file, repr(e)
47 return False
48
49def bugzilla_create_attachment(debug_file, server, args, bug_number, text, file_name, log, logdescription):
50 args['ids'] = [bug_number]
51 args['file_name'] = file_name
52 args['summary'] = logdescription
53 args['content_type'] = "text/plain"
54 args['data'] = log
55 args['comment'] = text
56 try:
57 server.Bug.add_attachment(args)
58 return True
59 except Exception, e:
60 print >> debug_file, repr(e)
61 return False
62
63def bugzilla_add_comment(debug_file, server, args, bug_number, text):
64 args['id'] = bug_number
65 args['comment'] = text
66 try:
67 server.Bug.add_comment(args)
68 return True
69 except Exception, e:
70 print >> debug_file, repr(e)
71 return False
72
73addhandler bugzilla_eventhandler
74bugzilla_eventhandler[eventmask] = "bb.event.MsgNote bb.build.TaskFailed"
75python bugzilla_eventhandler() {
76 import glob
77 import xmlrpclib, httplib
78
79 class ProxiedTransport(xmlrpclib.Transport):
80 def __init__(self, proxy, use_datetime = 0):
81 xmlrpclib.Transport.__init__(self, use_datetime)
82 self.proxy = proxy
83 self.user = None
84 self.password = None
85
86 def set_user(self, user):
87 self.user = user
88
89 def set_password(self, password):
90 self.password = password
91
92 def make_connection(self, host):
93 self.realhost = host
94 return httplib.HTTP(self.proxy)
95
96 def send_request(self, connection, handler, request_body):
97 connection.putrequest("POST", 'http://%s%s' % (self.realhost, handler))
98 if self.user != None:
99 if self.password != None:
100 auth = "%s:%s" % (self.user, self.password)
101 else:
102 auth = self.user
103 connection.putheader("Proxy-authorization", "Basic " + base64.encodestring(auth))
104
105 event = e
106 data = e.data
107 name = bb.event.getName(event)
108 if name == "MsgNote":
109 # avoid recursion
110 return
111
112 if name == "TaskFailed":
113 xmlrpc = data.getVar("BUGZILLA_XMLRPC", True)
114 user = data.getVar("BUGZILLA_USER", True)
115 passw = data.getVar("BUGZILLA_PASS", True)
116 product = data.getVar("BUGZILLA_PRODUCT", True)
117 compon = data.getVar("BUGZILLA_COMPONENT", True)
118 version = data.getVar("BUGZILLA_VERSION", True)
119
120 proxy = data.getVar('http_proxy', True )
121 if (proxy):
122 import urllib2
123 s, u, p, hostport = urllib2._parse_proxy(proxy)
124 transport = ProxiedTransport(hostport)
125 else:
126 transport = None
127
128 server = xmlrpclib.ServerProxy(xmlrpc, transport=transport, verbose=0)
129 args = {
130 'Bugzilla_login': user,
131 'Bugzilla_password': passw,
132 'product': product,
133 'component': compon}
134
135 # evil hack to figure out what is going on
136 debug_file = open(os.path.join(data.getVar("TMPDIR", True),"..","bugzilla-log"),"a")
137
138 file = None
139 bugname = "%(package)s-%(pv)s-autobuild" % { "package" : data.getVar("PN", True),
140 "pv" : data.getVar("PV", True),
141 }
142 log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
143 text = "The %s step in %s failed at %s for machine %s" % (e.task, data.getVar("PN", True), data.getVar('DATETIME', True), data.getVar( 'MACHINE', True ) )
144 if len(log_file) != 0:
145 print >> debug_file, "Adding log file %s" % log_file[0]
146 file = open(log_file[0], 'r')
147 log = file.read()
148 file.close();
149 else:
150 print >> debug_file, "No log file found for the glob"
151 log = None
152
153 (bug_open, bug_number) = bugzilla_find_bug_report(debug_file, server, args.copy(), bugname)
154 print >> debug_file, "Bug is open: %s and bug number: %s" % (bug_open, bug_number)
155
156 # The bug is present and still open, attach an error log
157 if not bug_number:
158 bug_number = bugzilla_file_bug(debug_file, server, args.copy(), bugname, text, version)
159 if not bug_number:
160 print >> debug_file, "Couldn't acquire a new bug_numer, filing a bugreport failed"
161 else:
162 print >> debug_file, "The new bug_number: '%s'" % bug_number
163 elif not bug_open:
164 if not bugzilla_reopen_bug(debug_file, server, args.copy(), bug_number):
165 print >> debug_file, "Failed to reopen the bug #%s" % bug_number
166 else:
167 print >> debug_file, "Reopened the bug #%s" % bug_number
168
169 if bug_number and log:
170 print >> debug_file, "The bug is known as '%s'" % bug_number
171 desc = "Build log for machine %s" % (data.getVar('MACHINE', True))
172 if not bugzilla_create_attachment(debug_file, server, args.copy(), bug_number, text, log_file[0], log, desc):
173 print >> debug_file, "Failed to attach the build log for bug #%s" % bug_number
174 else:
175 print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
176 else:
177 print >> debug_file, "Not trying to create an attachment for bug #%s" % bug_number
178 if not bugzilla_add_comment(debug_file, server, args.copy(), bug_number, text, ):
179 print >> debug_file, "Failed to create a comment the build log for bug #%s" % bug_number
180 else:
181 print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
182
183 # store bug number for oestats-client
184 if bug_number:
185 data.setVar('OESTATS_BUG_NUMBER', bug_number)
186}
187
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
new file mode 100644
index 0000000000..3da03c8485
--- /dev/null
+++ b/meta/classes/buildhistory.bbclass
@@ -0,0 +1,628 @@
1#
2# Records history of build output in order to detect regressions
3#
4# Based in part on testlab.bbclass and packagehistory.bbclass
5#
6# Copyright (C) 2013 Intel Corporation
7# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
8#
9
10BUILDHISTORY_FEATURES ?= "image package sdk"
11BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory"
12BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}"
13BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}/${PN}"
14BUILDHISTORY_DIR_SDK = "${BUILDHISTORY_DIR}/sdk/${SDK_NAME}/${IMAGE_BASENAME}"
15BUILDHISTORY_IMAGE_FILES ?= "/etc/passwd /etc/group"
16BUILDHISTORY_COMMIT ?= "0"
17BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
18BUILDHISTORY_PUSH_REPO ?= ""
19
20# Must inherit package first before changing PACKAGEFUNCS
21inherit package
22PACKAGEFUNCS += "buildhistory_emit_pkghistory"
23
24# We don't want to force a rerun of do_package for everything
25# if the buildhistory_emit_pkghistory function or any of the
26# variables it refers to changes
27do_package[vardepsexclude] += "buildhistory_emit_pkghistory"
28
29#
30# Called during do_package to write out metadata about this package
31# for comparision when writing future packages
32#
33python buildhistory_emit_pkghistory() {
34 import re
35
36 if not "package" in (d.getVar('BUILDHISTORY_FEATURES', True) or "").split():
37 return 0
38
39 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
40
41 class RecipeInfo:
42 def __init__(self, name):
43 self.name = name
44 self.pe = "0"
45 self.pv = "0"
46 self.pr = "r0"
47 self.depends = ""
48 self.packages = ""
49 self.bbfile = ""
50 self.src_uri = ""
51 self.srcrev = ""
52 self.srcrev_autorev = ""
53
54
55 class PackageInfo:
56 def __init__(self, name):
57 self.name = name
58 self.pe = "0"
59 self.pv = "0"
60 self.pr = "r0"
61 # pkg/pkge/pkgv/pkgr should be empty because we want to be able to default them
62 self.pkg = ""
63 self.pkge = ""
64 self.pkgv = ""
65 self.pkgr = ""
66 self.size = 0
67 self.depends = ""
68 self.rprovides = ""
69 self.rdepends = ""
70 self.rrecommends = ""
71 self.rsuggests = ""
72 self.rreplaces = ""
73 self.rconflicts = ""
74 self.files = ""
75 self.filelist = ""
76 # Variables that need to be written to their own separate file
77 self.filevars = dict.fromkeys(['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'])
78
79 # Should check PACKAGES here to see if anything removed
80
81 def getpkgvar(pkg, var):
82 val = bb.data.getVar('%s_%s' % (var, pkg), d, 1)
83 if val:
84 return val
85 val = bb.data.getVar('%s' % (var), d, 1)
86
87 return val
88
89 def readPackageInfo(pkg, histfile):
90 pkginfo = PackageInfo(pkg)
91 with open(histfile, "r") as f:
92 for line in f:
93 lns = line.split('=')
94 name = lns[0].strip()
95 value = lns[1].strip(" \t\r\n").strip('"')
96 if name == "PE":
97 pkginfo.pe = value
98 elif name == "PV":
99 pkginfo.pv = value
100 elif name == "PR":
101 pkginfo.pr = value
102 elif name == "PKG":
103 pkginfo.pkg = value
104 elif name == "PKGE":
105 pkginfo.pkge = value
106 elif name == "PKGV":
107 pkginfo.pkgv = value
108 elif name == "PKGR":
109 pkginfo.pkgr = value
110 elif name == "RPROVIDES":
111 pkginfo.rprovides = value
112 elif name == "RDEPENDS":
113 pkginfo.rdepends = value
114 elif name == "RRECOMMENDS":
115 pkginfo.rrecommends = value
116 elif name == "RSUGGESTS":
117 pkginfo.rsuggests = value
118 elif name == "RREPLACES":
119 pkginfo.rreplaces = value
120 elif name == "RCONFLICTS":
121 pkginfo.rconflicts = value
122 elif name == "PKGSIZE":
123 pkginfo.size = long(value)
124 elif name == "FILES":
125 pkginfo.files = value
126 elif name == "FILELIST":
127 pkginfo.filelist = value
128 # Apply defaults
129 if not pkginfo.pkg:
130 pkginfo.pkg = pkginfo.name
131 if not pkginfo.pkge:
132 pkginfo.pkge = pkginfo.pe
133 if not pkginfo.pkgv:
134 pkginfo.pkgv = pkginfo.pv
135 if not pkginfo.pkgr:
136 pkginfo.pkgr = pkginfo.pr
137 return pkginfo
138
139 def getlastpkgversion(pkg):
140 try:
141 histfile = os.path.join(pkghistdir, pkg, "latest")
142 return readPackageInfo(pkg, histfile)
143 except EnvironmentError:
144 return None
145
146 def sortpkglist(string):
147 pkgiter = re.finditer(r'[a-zA-Z0-9.+-]+( \([><=]+ [^ )]+\))?', string, 0)
148 pkglist = [p.group(0) for p in pkgiter]
149 pkglist.sort()
150 return ' '.join(pkglist)
151
152 def sortlist(string):
153 items = string.split(' ')
154 items.sort()
155 return ' '.join(items)
156
157 pn = d.getVar('PN', True)
158 pe = d.getVar('PE', True) or "0"
159 pv = d.getVar('PV', True)
160 pr = d.getVar('PR', True)
161
162 bbfile = d.getVar('BB_FILENAME', True)
163 src_uri = d.getVar('SRC_URI', True)
164 srcrev = d.getVar('SRCREV', True)
165 srcrev_autorev = 'yes' if d.getVar('SRCREV', False) == 'AUTOINC' else 'no'
166
167 packages = squashspaces(d.getVar('PACKAGES', True))
168
169 packagelist = packages.split()
170 if not os.path.exists(pkghistdir):
171 bb.utils.mkdirhier(pkghistdir)
172 else:
173 # Remove files for packages that no longer exist
174 for item in os.listdir(pkghistdir):
175 if item != "latest" and item != "latest_srcrev":
176 if item not in packagelist:
177 subdir = os.path.join(pkghistdir, item)
178 for subfile in os.listdir(subdir):
179 os.unlink(os.path.join(subdir, subfile))
180 os.rmdir(subdir)
181
182 rcpinfo = RecipeInfo(pn)
183 rcpinfo.pe = pe
184 rcpinfo.pv = pv
185 rcpinfo.pr = pr
186 rcpinfo.depends = sortlist(squashspaces(d.getVar('DEPENDS', True) or ""))
187 rcpinfo.bbfile = bbfile
188 rcpinfo.src_uri = src_uri
189 rcpinfo.srcrev = srcrev
190 rcpinfo.srcrev_autorev = srcrev_autorev
191 rcpinfo.packages = packages
192 write_recipehistory(rcpinfo, d)
193
194 pkgdest = d.getVar('PKGDEST', True)
195 for pkg in packagelist:
196 pkge = getpkgvar(pkg, 'PKGE') or "0"
197 pkgv = getpkgvar(pkg, 'PKGV')
198 pkgr = getpkgvar(pkg, 'PKGR')
199 #
200 # Find out what the last version was
201 # Make sure the version did not decrease
202 #
203 lastversion = getlastpkgversion(pkg)
204 if lastversion:
205 last_pkge = lastversion.pkge
206 last_pkgv = lastversion.pkgv
207 last_pkgr = lastversion.pkgr
208 r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
209 if r < 0:
210 msg = "Package version for package %s went backwards which would break package feeds from (%s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
211 package_qa_handle_error("version-going-backwards", msg, d)
212
213 pkginfo = PackageInfo(pkg)
214 # Apparently the version can be different on a per-package basis (see Python)
215 pkginfo.pe = getpkgvar(pkg, 'PE') or "0"
216 pkginfo.pv = getpkgvar(pkg, 'PV')
217 pkginfo.pr = getpkgvar(pkg, 'PR')
218 pkginfo.pkg = getpkgvar(pkg, 'PKG') or pkg
219 pkginfo.pkge = pkge
220 pkginfo.pkgv = pkgv
221 pkginfo.pkgr = pkgr
222 pkginfo.rprovides = sortpkglist(squashspaces(getpkgvar(pkg, 'RPROVIDES') or ""))
223 pkginfo.rdepends = sortpkglist(squashspaces(getpkgvar(pkg, 'RDEPENDS') or ""))
224 pkginfo.rrecommends = sortpkglist(squashspaces(getpkgvar(pkg, 'RRECOMMENDS') or ""))
225 pkginfo.rsuggests = sortpkglist(squashspaces(getpkgvar(pkg, 'RSUGGESTS') or ""))
226 pkginfo.rreplaces = sortpkglist(squashspaces(getpkgvar(pkg, 'RREPLACES') or ""))
227 pkginfo.rconflicts = sortpkglist(squashspaces(getpkgvar(pkg, 'RCONFLICTS') or ""))
228 pkginfo.files = squashspaces(getpkgvar(pkg, 'FILES') or "")
229 for filevar in pkginfo.filevars:
230 pkginfo.filevars[filevar] = getpkgvar(pkg, filevar)
231
232 # Gather information about packaged files
233 pkgdestpkg = os.path.join(pkgdest, pkg)
234 filelist = []
235 pkginfo.size = 0
236 for f in pkgfiles[pkg]:
237 relpth = os.path.relpath(f, pkgdestpkg)
238 fstat = os.lstat(f)
239 pkginfo.size += fstat.st_size
240 filelist.append(os.sep + relpth)
241 filelist.sort()
242 pkginfo.filelist = " ".join(filelist)
243
244 write_pkghistory(pkginfo, d)
245}
246
247
248def write_recipehistory(rcpinfo, d):
249 bb.debug(2, "Writing recipe history")
250
251 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
252
253 infofile = os.path.join(pkghistdir, "latest")
254 with open(infofile, "w") as f:
255 if rcpinfo.pe != "0":
256 f.write("PE = %s\n" % rcpinfo.pe)
257 f.write("PV = %s\n" % rcpinfo.pv)
258 f.write("PR = %s\n" % rcpinfo.pr)
259 f.write("DEPENDS = %s\n" % rcpinfo.depends)
260 f.write("PACKAGES = %s\n" % rcpinfo.packages)
261
262
263def write_pkghistory(pkginfo, d):
264 bb.debug(2, "Writing package history for package %s" % pkginfo.name)
265
266 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
267
268 pkgpath = os.path.join(pkghistdir, pkginfo.name)
269 if not os.path.exists(pkgpath):
270 bb.utils.mkdirhier(pkgpath)
271
272 infofile = os.path.join(pkgpath, "latest")
273 with open(infofile, "w") as f:
274 if pkginfo.pe != "0":
275 f.write("PE = %s\n" % pkginfo.pe)
276 f.write("PV = %s\n" % pkginfo.pv)
277 f.write("PR = %s\n" % pkginfo.pr)
278
279 pkgvars = {}
280 pkgvars['PKG'] = pkginfo.pkg if pkginfo.pkg != pkginfo.name else ''
281 pkgvars['PKGE'] = pkginfo.pkge if pkginfo.pkge != pkginfo.pe else ''
282 pkgvars['PKGV'] = pkginfo.pkgv if pkginfo.pkgv != pkginfo.pv else ''
283 pkgvars['PKGR'] = pkginfo.pkgr if pkginfo.pkgr != pkginfo.pr else ''
284 for pkgvar in pkgvars:
285 val = pkgvars[pkgvar]
286 if val:
287 f.write("%s = %s\n" % (pkgvar, val))
288
289 f.write("RPROVIDES = %s\n" % pkginfo.rprovides)
290 f.write("RDEPENDS = %s\n" % pkginfo.rdepends)
291 f.write("RRECOMMENDS = %s\n" % pkginfo.rrecommends)
292 if pkginfo.rsuggests:
293 f.write("RSUGGESTS = %s\n" % pkginfo.rsuggests)
294 if pkginfo.rreplaces:
295 f.write("RREPLACES = %s\n" % pkginfo.rreplaces)
296 if pkginfo.rconflicts:
297 f.write("RCONFLICTS = %s\n" % pkginfo.rconflicts)
298 f.write("PKGSIZE = %d\n" % pkginfo.size)
299 f.write("FILES = %s\n" % pkginfo.files)
300 f.write("FILELIST = %s\n" % pkginfo.filelist)
301
302 for filevar in pkginfo.filevars:
303 filevarpath = os.path.join(pkgpath, "latest.%s" % filevar)
304 val = pkginfo.filevars[filevar]
305 if val:
306 with open(filevarpath, "w") as f:
307 f.write(val)
308 else:
309 if os.path.exists(filevarpath):
310 os.unlink(filevarpath)
311
312
313buildhistory_get_installed() {
314 mkdir -p $1
315
316 # Get list of installed packages
317 pkgcache="$1/installed-packages.tmp"
318 list_installed_packages file | sort > $pkgcache
319
320 cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt
321 if [ -s $pkgcache ] ; then
322 cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt
323 else
324 printf "" > $1/installed-packages.txt
325 fi
326
327 # Produce dependency graph
328 # First, quote each name to handle characters that cause issues for dot
329 rootfs_list_installed_depends | sed 's:\([^| ]*\):"\1":g' > $1/depends.tmp
330 # Change delimiter from pipe to -> and set style for recommend lines
331 sed -i -e 's:|: -> :' -e 's:"\[REC\]":[style=dotted]:' -e 's:$:;:' $1/depends.tmp
332 # Add header, sorted and de-duped contents and footer and then delete the temp file
333 printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
334 cat $1/depends.tmp | sort | uniq >> $1/depends.dot
335 echo "}" >> $1/depends.dot
336 rm $1/depends.tmp
337
338 # Produce installed package sizes list
339 printf "" > $1/installed-package-sizes.tmp
340 cat $pkgcache | while read pkg pkgfile pkgarch
341 do
342 for vendor in ${TARGET_VENDOR} ${MULTILIB_VENDORS} ; do
343 size=`oe-pkgdata-util read-value ${PKGDATA_DIR} "PKGSIZE" ${pkg}_${pkgarch}`
344 if [ "$size" != "" ] ; then
345 echo "$size $pkg" >> $1/installed-package-sizes.tmp
346 fi
347 done
348 done
349 cat $1/installed-package-sizes.tmp | sort -n -r | awk '{print $1 "\tKiB " $2}' > $1/installed-package-sizes.txt
350 rm $1/installed-package-sizes.tmp
351
352 # We're now done with the cache, delete it
353 rm $pkgcache
354
355 if [ "$2" != "sdk" ] ; then
356 # Produce some cut-down graphs (for readability)
357 grep -v kernel_image $1/depends.dot | grep -v kernel-2 | grep -v kernel-3 > $1/depends-nokernel.dot
358 grep -v libc6 $1/depends-nokernel.dot | grep -v libgcc > $1/depends-nokernel-nolibc.dot
359 grep -v update- $1/depends-nokernel-nolibc.dot > $1/depends-nokernel-nolibc-noupdate.dot
360 grep -v kernel-module $1/depends-nokernel-nolibc-noupdate.dot > $1/depends-nokernel-nolibc-noupdate-nomodules.dot
361 fi
362
363 # add complementary package information
364 if [ -e ${WORKDIR}/complementary_pkgs.txt ]; then
365 cp ${WORKDIR}/complementary_pkgs.txt $1
366 fi
367}
368
369buildhistory_get_image_installed() {
370 # Anything requiring the use of the packaging system should be done in here
371 # in case the packaging files are going to be removed for this image
372
373 if [ "${@base_contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
374 return
375 fi
376
377 buildhistory_get_installed ${BUILDHISTORY_DIR_IMAGE}
378}
379
380buildhistory_get_sdk_installed() {
381 # Anything requiring the use of the packaging system should be done in here
382 # in case the packaging files are going to be removed for this SDK
383
384 if [ "${@base_contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then
385 return
386 fi
387
388 buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk
389}
390
391buildhistory_list_files() {
392 # List the files in the specified directory, but exclude date/time etc.
393 # This awk script is somewhat messy, but handles where the size is not printed for device files under pseudo
394 ( cd $1 && find . -ls | awk '{ if ( $7 ~ /[0-9]/ ) printf "%s %10-s %10-s %10s %s %s %s\n", $3, $5, $6, $7, $11, $12, $13 ; else printf "%s %10-s %10-s %10s %s %s %s\n", $3, $5, $6, 0, $10, $11, $12 }' | sort -k5 | sed 's/ *$//' > $2 )
395}
396
397
398buildhistory_get_imageinfo() {
399 if [ "${@base_contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
400 return
401 fi
402
403 buildhistory_list_files ${IMAGE_ROOTFS} ${BUILDHISTORY_DIR_IMAGE}/files-in-image.txt
404
405 # Collect files requested in BUILDHISTORY_IMAGE_FILES
406 rm -rf ${BUILDHISTORY_DIR_IMAGE}/image-files
407 for f in ${BUILDHISTORY_IMAGE_FILES}; do
408 if [ -f ${IMAGE_ROOTFS}/$f ] ; then
409 mkdir -p ${BUILDHISTORY_DIR_IMAGE}/image-files/`dirname $f`
410 cp ${IMAGE_ROOTFS}/$f ${BUILDHISTORY_DIR_IMAGE}/image-files/$f
411 fi
412 done
413
414 # Record some machine-readable meta-information about the image
415 printf "" > ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
416 cat >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt <<END
417${@buildhistory_get_imagevars(d)}
418END
419 imagesize=`du -ks ${IMAGE_ROOTFS} | awk '{ print $1 }'`
420 echo "IMAGESIZE = $imagesize" >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
421
422 # Add some configuration information
423 echo "${MACHINE}: ${IMAGE_BASENAME} configured for ${DISTRO} ${DISTRO_VERSION}" > ${BUILDHISTORY_DIR_IMAGE}/build-id
424
425 cat >> ${BUILDHISTORY_DIR_IMAGE}/build-id <<END
426${@buildhistory_get_layers(d)}
427END
428}
429
430buildhistory_get_sdkinfo() {
431 if [ "${@base_contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then
432 return
433 fi
434
435 buildhistory_list_files ${SDK_OUTPUT} ${BUILDHISTORY_DIR_SDK}/files-in-sdk.txt
436
437 # Record some machine-readable meta-information about the SDK
438 printf "" > ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
439 cat >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt <<END
440${@buildhistory_get_sdkvars(d)}
441END
442 sdksize=`du -ks ${SDK_OUTPUT} | awk '{ print $1 }'`
443 echo "SDKSIZE = $sdksize" >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
444}
445
446# By prepending we get in before the removal of packaging files
447ROOTFS_POSTPROCESS_COMMAND =+ "buildhistory_get_image_installed ; "
448
449IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; "
450
451# We want these to be the last run so that we get called after complementary package installation
452POPULATE_SDK_POST_TARGET_COMMAND_append = "buildhistory_get_sdk_installed target ; "
453POPULATE_SDK_POST_HOST_COMMAND_append = "buildhistory_get_sdk_installed host ; "
454
455SDK_POSTPROCESS_COMMAND += "buildhistory_get_sdkinfo ; "
456
457def buildhistory_get_layers(d):
458 layertext = "Configured metadata layers:\n%s\n" % '\n'.join(get_layers_branch_rev(d))
459 return layertext
460
461def buildhistory_get_metadata_revs(d):
462 # We want an easily machine-readable format here, so get_layers_branch_rev isn't quite what we want
463 layers = (d.getVar("BBLAYERS", True) or "").split()
464 medadata_revs = ["%-17s = %s:%s" % (os.path.basename(i), \
465 base_get_metadata_git_branch(i, None).strip(), \
466 base_get_metadata_git_revision(i, None)) \
467 for i in layers]
468 return '\n'.join(medadata_revs)
469
470
471def squashspaces(string):
472 import re
473 return re.sub("\s+", " ", string).strip()
474
475def outputvars(vars, listvars, d):
476 vars = vars.split()
477 listvars = listvars.split()
478 ret = ""
479 for var in vars:
480 value = d.getVar(var, True) or ""
481 if var in listvars:
482 # Squash out spaces
483 value = squashspaces(value)
484 ret += "%s = %s\n" % (var, value)
485 return ret.rstrip('\n')
486
487def buildhistory_get_imagevars(d):
488 imagevars = "DISTRO DISTRO_VERSION USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS ROOTFS_POSTPROCESS_COMMAND IMAGE_POSTPROCESS_COMMAND"
489 listvars = "USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS"
490 return outputvars(imagevars, listvars, d)
491
492def buildhistory_get_sdkvars(d):
493 sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS"
494 listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS"
495 return outputvars(sdkvars, listvars, d)
496
497
498def buildhistory_get_cmdline(d):
499 if sys.argv[0].endswith('bin/bitbake'):
500 bincmd = 'bitbake'
501 else:
502 bincmd = sys.argv[0]
503 return '%s %s' % (bincmd, ' '.join(sys.argv[1:]))
504
505
506buildhistory_commit() {
507 if [ ! -d ${BUILDHISTORY_DIR} ] ; then
508 # Code above that creates this dir never executed, so there can't be anything to commit
509 return
510 fi
511
512 # Create a machine-readable list of metadata revisions for each layer
513 cat > ${BUILDHISTORY_DIR}/metadata-revs <<END
514${@buildhistory_get_metadata_revs(d)}
515END
516
517 ( cd ${BUILDHISTORY_DIR}/
518 # Initialise the repo if necessary
519 if [ ! -d .git ] ; then
520 git init -q
521 else
522 git tag -f build-minus-3 build-minus-2 > /dev/null 2>&1 || true
523 git tag -f build-minus-2 build-minus-1 > /dev/null 2>&1 || true
524 git tag -f build-minus-1 > /dev/null 2>&1 || true
525 fi
526 # Check if there are new/changed files to commit (other than metadata-revs)
527 repostatus=`git status --porcelain | grep -v " metadata-revs$"`
528 HOSTNAME=`hostname 2>/dev/null || echo unknown`
529 CMDLINE="${@buildhistory_get_cmdline(d)}"
530 if [ "$repostatus" != "" ] ; then
531 git add -A .
532 # porcelain output looks like "?? packages/foo/bar"
533 # Ensure we commit metadata-revs with the first commit
534 for entry in `echo "$repostatus" | awk '{print $2}' | awk -F/ '{print $1}' | sort | uniq` ; do
535 git commit $entry metadata-revs -m "$entry: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" -m "cmd: $CMDLINE" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
536 done
537 if [ "${BUILDHISTORY_PUSH_REPO}" != "" ] ; then
538 git push -q ${BUILDHISTORY_PUSH_REPO}
539 fi
540 else
541 git commit ${BUILDHISTORY_DIR}/ --allow-empty -m "No changes: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" -m "cmd: $CMDLINE" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
542 fi) || true
543}
544
545python buildhistory_eventhandler() {
546 if e.data.getVar('BUILDHISTORY_FEATURES', True).strip():
547 if e.data.getVar("BUILDHISTORY_COMMIT", True) == "1":
548 bb.note("Writing buildhistory")
549 bb.build.exec_func("buildhistory_commit", e.data)
550}
551
552addhandler buildhistory_eventhandler
553buildhistory_eventhandler[eventmask] = "bb.event.BuildCompleted"
554
555
556# FIXME this ought to be moved into the fetcher
557def _get_srcrev_values(d):
558 """
559 Return the version strings for the current recipe
560 """
561
562 scms = []
563 fetcher = bb.fetch.Fetch(d.getVar('SRC_URI', True).split(), d)
564 urldata = fetcher.ud
565 for u in urldata:
566 if urldata[u].method.supports_srcrev():
567 scms.append(u)
568
569 autoinc_templ = 'AUTOINC+'
570 dict_srcrevs = {}
571 dict_tag_srcrevs = {}
572 for scm in scms:
573 ud = urldata[scm]
574 for name in ud.names:
575 rev = ud.method.sortable_revision(scm, ud, d, name)
576 # Clean this up when we next bump bitbake version
577 if type(rev) != str:
578 autoinc, rev = rev
579 elif rev.startswith(autoinc_templ):
580 rev = rev[len(autoinc_templ):]
581 dict_srcrevs[name] = rev
582 if 'tag' in ud.parm:
583 tag = ud.parm['tag'];
584 key = name+'_'+tag
585 dict_tag_srcrevs[key] = rev
586 return (dict_srcrevs, dict_tag_srcrevs)
587
588do_fetch[postfuncs] += "write_srcrev"
589python write_srcrev() {
590 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
591 srcrevfile = os.path.join(pkghistdir, 'latest_srcrev')
592
593 srcrevs, tag_srcrevs = _get_srcrev_values(d)
594 if srcrevs:
595 if not os.path.exists(pkghistdir):
596 bb.utils.mkdirhier(pkghistdir)
597 old_tag_srcrevs = {}
598 if os.path.exists(srcrevfile):
599 with open(srcrevfile) as f:
600 for line in f:
601 if line.startswith('# tag_'):
602 key, value = line.split("=", 1)
603 key = key.replace('# tag_', '').strip()
604 value = value.replace('"', '').strip()
605 old_tag_srcrevs[key] = value
606 with open(srcrevfile, 'w') as f:
607 orig_srcrev = d.getVar('SRCREV', False) or 'INVALID'
608 if orig_srcrev != 'INVALID':
609 f.write('# SRCREV = "%s"\n' % orig_srcrev)
610 if len(srcrevs) > 1:
611 for name, srcrev in srcrevs.items():
612 orig_srcrev = d.getVar('SRCREV_%s' % name, False)
613 if orig_srcrev:
614 f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
615 f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
616 else:
617 f.write('SRCREV = "%s"\n' % srcrevs.itervalues().next())
618 if len(tag_srcrevs) > 0:
619 for name, srcrev in tag_srcrevs.items():
620 f.write('# tag_%s = "%s"\n' % (name, srcrev))
621 if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
622 pkg = d.getVar('PN', True)
623 bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
624
625 else:
626 if os.path.exists(srcrevfile):
627 os.remove(srcrevfile)
628}
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
new file mode 100644
index 0000000000..72fff1167f
--- /dev/null
+++ b/meta/classes/buildstats.bbclass
@@ -0,0 +1,282 @@
1BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
2BNFILE = "${BUILDSTATS_BASE}/.buildname"
3DEVFILE = "${BUILDSTATS_BASE}/.device"
4
5################################################################################
6# Build statistics gathering.
7#
8# The CPU and Time gathering/tracking functions and bbevent inspiration
9# were written by Christopher Larson and can be seen here:
10# http://kergoth.pastey.net/142813
11#
12################################################################################
13
14def get_process_cputime(pid):
15 with open("/proc/%d/stat" % pid, "r") as f:
16 fields = f.readline().rstrip().split()
17 # 13: utime, 14: stime, 15: cutime, 16: cstime
18 return sum(int(field) for field in fields[13:16])
19
20def get_cputime():
21 with open("/proc/stat", "r") as f:
22 fields = f.readline().rstrip().split()[1:]
23 return sum(int(field) for field in fields)
24
25def set_bn(e):
26 bn = e.getPkgs()[0] + "-" + e.data.getVar('MACHINE', True)
27 try:
28 os.remove(e.data.getVar('BNFILE', True))
29 except:
30 pass
31 with open(e.data.getVar('BNFILE', True), "w") as f:
32 f.write(os.path.join(bn, e.data.getVar('BUILDNAME', True)))
33
34def get_bn(e):
35 with open(e.data.getVar('BNFILE', True)) as f:
36 bn = f.readline()
37 return bn
38
39def set_device(e):
40 tmpdir = e.data.getVar('TMPDIR', True)
41 try:
42 os.remove(e.data.getVar('DEVFILE', True))
43 except:
44 pass
45 ############################################################################
46 # We look for the volume TMPDIR lives on. To do all disks would make little
47 # sense and not give us any particularly useful data. In theory we could do
48 # something like stick DL_DIR on a different partition and this would
49 # throw stats gathering off. The same goes with SSTATE_DIR. However, let's
50 # get the basics in here and work on the cornercases later.
51 # A note. /proc/diskstats does not contain info on encryptfs, tmpfs, etc.
52 # If we end up hitting one of these fs, we'll just skip diskstats collection.
53 ############################################################################
54 device=os.stat(tmpdir)
55 majordev=os.major(device.st_dev)
56 minordev=os.minor(device.st_dev)
57 ############################################################################
58 # Bug 1700:
59 # Because tmpfs/encryptfs/ramfs etc inserts no entry in /proc/diskstats
60 # we set rdev to NoLogicalDevice and search for it later. If we find NLD
61 # we do not collect diskstats as the method to collect meaningful statistics
62 # for these fs types requires a bit more research.
63 ############################################################################
64 rdev="NoLogicalDevice"
65 try:
66 with open("/proc/diskstats", "r") as f:
67 for line in f:
68 if majordev == int(line.split()[0]) and minordev == int(line.split()[1]):
69 rdev=line.split()[2]
70 except:
71 pass
72 file = open(e.data.getVar('DEVFILE', True), "w")
73 file.write(rdev)
74 file.close()
75
76def get_device(e):
77 file = open(e.data.getVar('DEVFILE', True))
78 device = file.readline()
79 file.close()
80 return device
81
82def get_diskstats(dev):
83 import itertools
84 ############################################################################
85 # For info on what these are, see kernel doc file iostats.txt
86 ############################################################################
87 DSTAT_KEYS = ['ReadsComp', 'ReadsMerged', 'SectRead', 'TimeReads', 'WritesComp', 'SectWrite', 'TimeWrite', 'IOinProgress', 'TimeIO', 'WTimeIO']
88 try:
89 with open("/proc/diskstats", "r") as f:
90 for x in f:
91 if dev in x:
92 diskstats_val = x.rstrip().split()[4:]
93 except IOError as e:
94 return
95 diskstats = dict(itertools.izip(DSTAT_KEYS, diskstats_val))
96 return diskstats
97
98def set_diskdata(var, dev, data):
99 data.setVar(var, get_diskstats(dev))
100
101def get_diskdata(var, dev, data):
102 olddiskdata = data.getVar(var, False)
103 diskdata = {}
104 if olddiskdata is None:
105 return
106 newdiskdata = get_diskstats(dev)
107 for key in olddiskdata.iterkeys():
108 diskdata["Start"+key] = str(int(olddiskdata[key]))
109 diskdata["End"+key] = str(int(newdiskdata[key]))
110 return diskdata
111
112def set_timedata(var, data):
113 import time
114 time = time.time()
115 cputime = get_cputime()
116 proctime = get_process_cputime(os.getpid())
117 data.setVar(var, (time, cputime, proctime))
118
119def get_timedata(var, data):
120 import time
121 timedata = data.getVar(var, False)
122 if timedata is None:
123 return
124 oldtime, oldcpu, oldproc = timedata
125 procdiff = get_process_cputime(os.getpid()) - oldproc
126 cpudiff = get_cputime() - oldcpu
127 timediff = time.time() - oldtime
128 if cpudiff > 0:
129 cpuperc = float(procdiff) * 100 / cpudiff
130 else:
131 cpuperc = None
132 return timediff, cpuperc
133
134def write_task_data(status, logfile, dev, e):
135 bn = get_bn(e)
136 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
137 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
138 file = open(os.path.join(logfile), "a")
139 timedata = get_timedata("__timedata_task", e.data)
140 if timedata:
141 elapsedtime, cpu = timedata
142 file.write(bb.data.expand("${PF}: %s: Elapsed time: %0.2f seconds \n" %
143 (e.task, elapsedtime), e.data))
144 if cpu:
145 file.write("CPU usage: %0.1f%% \n" % cpu)
146 ############################################################################
147 # Here we gather up disk data. In an effort to avoid lying with stats
148 # I do a bare minimum of analysis of collected data.
149 # The simple fact is, doing disk io collection on a per process basis
150 # without effecting build time would be difficult.
151 # For the best information, running things with BB_TOTAL_THREADS = "1"
152 # would return accurate per task results.
153 ############################################################################
154 if dev != "NoLogicalDevice":
155 diskdata = get_diskdata("__diskdata_task", dev, e.data)
156 if diskdata:
157 for key in sorted(diskdata.iterkeys()):
158 file.write(key + ": " + diskdata[key] + "\n")
159 if status is "passed":
160 file.write("Status: PASSED \n")
161 else:
162 file.write("Status: FAILED \n")
163 file.write("Ended: %0.2f \n" % time.time())
164 file.close()
165
166python run_buildstats () {
167 import bb.build
168 import bb.event
169 import bb.data
170 import time, subprocess, platform
171
172 if isinstance(e, bb.event.BuildStarted):
173 ########################################################################
174 # at first pass make the buildstats heriarchy and then
175 # set the buildname
176 ########################################################################
177 try:
178 bb.utils.mkdirhier(e.data.getVar('BUILDSTATS_BASE', True))
179 except:
180 pass
181 set_bn(e)
182 bn = get_bn(e)
183 set_device(e)
184 device = get_device(e)
185
186 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
187 try:
188 bb.utils.mkdirhier(bsdir)
189 except:
190 pass
191 if device != "NoLogicalDevice":
192 set_diskdata("__diskdata_build", device, e.data)
193 set_timedata("__timedata_build", e.data)
194 build_time = os.path.join(bsdir, "build_stats")
195 # write start of build into build_time
196 file = open(build_time,"a")
197 host_info = platform.uname()
198 file.write("Host Info: ")
199 for x in host_info:
200 if x:
201 file.write(x + " ")
202 file.write("\n")
203 file.write("Build Started: %0.2f \n" % time.time())
204 file.close()
205
206 elif isinstance(e, bb.event.BuildCompleted):
207 bn = get_bn(e)
208 device = get_device(e)
209 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
210 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
211 build_time = os.path.join(bsdir, "build_stats")
212 file = open(build_time, "a")
213 ########################################################################
214 # Write build statistics for the build
215 ########################################################################
216 timedata = get_timedata("__timedata_build", e.data)
217 if timedata:
218 time, cpu = timedata
219 # write end of build and cpu used into build_time
220 file.write("Elapsed time: %0.2f seconds \n" % (time))
221 if cpu:
222 file.write("CPU usage: %0.1f%% \n" % cpu)
223 if device != "NoLogicalDevice":
224 diskio = get_diskdata("__diskdata_build", device, e.data)
225 if diskio:
226 for key in sorted(diskio.iterkeys()):
227 file.write(key + ": " + diskio[key] + "\n")
228 file.close()
229
230 if isinstance(e, bb.build.TaskStarted):
231 bn = get_bn(e)
232 device = get_device(e)
233 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
234 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
235 if device != "NoLogicalDevice":
236 set_diskdata("__diskdata_task", device, e.data)
237 set_timedata("__timedata_task", e.data)
238 try:
239 bb.utils.mkdirhier(taskdir)
240 except:
241 pass
242 # write into the task event file the name and start time
243 file = open(os.path.join(taskdir, e.task), "a")
244 file.write("Event: %s \n" % bb.event.getName(e))
245 file.write("Started: %0.2f \n" % time.time())
246 file.close()
247
248 elif isinstance(e, bb.build.TaskSucceeded):
249 bn = get_bn(e)
250 device = get_device(e)
251 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
252 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
253 write_task_data("passed", os.path.join(taskdir, e.task), device, e)
254 if e.task == "do_rootfs":
255 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
256 bs=os.path.join(bsdir, "build_stats")
257 file = open(bs,"a")
258 rootfs = e.data.getVar('IMAGE_ROOTFS', True)
259 rootfs_size = subprocess.Popen(["du", "-sh", rootfs], stdout=subprocess.PIPE).stdout.read()
260 file.write("Uncompressed Rootfs size: %s" % rootfs_size)
261 file.close()
262
263 elif isinstance(e, bb.build.TaskFailed):
264 bn = get_bn(e)
265 device = get_device(e)
266 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
267 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
268 write_task_data("failed", os.path.join(taskdir, e.task), device, e)
269 ########################################################################
270 # Lets make things easier and tell people where the build failed in
271 # build_status. We do this here because BuildCompleted triggers no
272 # matter what the status of the build actually is
273 ########################################################################
274 build_status = os.path.join(bsdir, "build_stats")
275 file = open(build_status,"a")
276 file.write(e.data.expand("Failed at: ${PF} at task: %s \n" % e.task))
277 file.close()
278}
279
280addhandler run_buildstats
281run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
282
diff --git a/meta/classes/ccache.bbclass b/meta/classes/ccache.bbclass
new file mode 100644
index 0000000000..2cdce46932
--- /dev/null
+++ b/meta/classes/ccache.bbclass
@@ -0,0 +1,8 @@
1CCACHE = "${@bb.utils.which(d.getVar('PATH', True), 'ccache') and 'ccache '}"
2export CCACHE_DIR ?= "${TMPDIR}/ccache/${MULTIMACH_HOST_SYS}/${PN}"
3CCACHE_DISABLE[unexport] = "1"
4
5do_configure[dirs] =+ "${CCACHE_DIR}"
6do_kernel_configme[dirs] =+ "${CCACHE_DIR}"
7
8do_clean[cleandirs] += "${CCACHE_DIR}"
diff --git a/meta/classes/chrpath.bbclass b/meta/classes/chrpath.bbclass
new file mode 100644
index 0000000000..61a24b3f5a
--- /dev/null
+++ b/meta/classes/chrpath.bbclass
@@ -0,0 +1,135 @@
1CHRPATH_BIN ?= "chrpath"
2PREPROCESS_RELOCATE_DIRS ?= ""
3
4def process_file_linux(cmd, fpath, basedir, tmpdir, d):
5 import subprocess as sub
6
7 p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
8 err, out = p.communicate()
9 # If returned succesfully, process stderr for results
10 if p.returncode != 0:
11 return
12
13 # Throw away everything other than the rpath list
14 curr_rpath = err.partition("RPATH=")[2]
15 #bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
16 rpaths = curr_rpath.split(":")
17 new_rpaths = []
18 for rpath in rpaths:
19 # If rpath is already dynamic copy it to new_rpath and continue
20 if rpath.find("$ORIGIN") != -1:
21 new_rpaths.append(rpath.strip())
22 continue
23 rpath = os.path.normpath(rpath)
24 # If the rpath shares a root with base_prefix determine a new dynamic rpath from the
25 # base_prefix shared root
26 if rpath.find(basedir) != -1:
27 depth = fpath.partition(basedir)[2].count('/')
28 libpath = rpath.partition(basedir)[2].strip()
29 # otherwise (i.e. cross packages) determine a shared root based on the TMPDIR
30 # NOTE: This will not work reliably for cross packages, particularly in the case
31 # where your TMPDIR is a short path (i.e. /usr/poky) as chrpath cannot insert an
32 # rpath longer than that which is already set.
33 elif rpath.find(tmpdir) != -1:
34 depth = fpath.rpartition(tmpdir)[2].count('/')
35 libpath = rpath.partition(tmpdir)[2].strip()
36 else:
37 new_rpaths.append(rpath.strip())
38 return
39 base = "$ORIGIN"
40 while depth > 1:
41 base += "/.."
42 depth-=1
43 new_rpaths.append("%s%s" % (base, libpath))
44
45 # if we have modified some rpaths call chrpath to update the binary
46 if len(new_rpaths):
47 args = ":".join(new_rpaths)
48 #bb.note("Setting rpath for %s to %s" %(fpath, args))
49 p = sub.Popen([cmd, '-r', args, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
50 out, err = p.communicate()
51 if p.returncode != 0:
52 bb.error("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN', True), p.returncode, out, err))
53 raise bb.build.FuncFailed
54
55def process_file_darwin(cmd, fpath, basedir, tmpdir, d):
56 import subprocess as sub
57
58 p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
59 err, out = p.communicate()
60 # If returned succesfully, process stderr for results
61 if p.returncode != 0:
62 return
63 for l in err.split("\n"):
64 if "(compatibility" not in l:
65 continue
66 rpath = l.partition("(compatibility")[0].strip()
67 if rpath.find(basedir) != -1:
68 depth = fpath.partition(basedir)[2].count('/')
69 libpath = rpath.partition(basedir)[2].strip()
70 else:
71 continue
72
73 base = "@loader_path"
74 while depth > 1:
75 base += "/.."
76 depth-=1
77 base = base + libpath
78 p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, base, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
79 err, out = p.communicate()
80
81def process_dir (directory, d):
82 import stat
83
84 cmd = d.expand('${CHRPATH_BIN}')
85 tmpdir = os.path.normpath(d.getVar('TMPDIR'))
86 basedir = os.path.normpath(d.expand('${base_prefix}'))
87 hostos = d.getVar("HOST_OS", True)
88
89 #bb.debug("Checking %s for binaries to process" % directory)
90 if not os.path.exists(directory):
91 return
92
93 if "linux" in hostos:
94 process_file = process_file_linux
95 elif "darwin" in hostos:
96 process_file = process_file_darwin
97 else:
98 # Relocations not supported
99 return
100
101 dirs = os.listdir(directory)
102 for file in dirs:
103 fpath = directory + "/" + file
104 fpath = os.path.normpath(fpath)
105 if os.path.islink(fpath):
106 # Skip symlinks
107 continue
108
109 if os.path.isdir(fpath):
110 process_dir(fpath, d)
111 else:
112 #bb.note("Testing %s for relocatability" % fpath)
113
114 # We need read and write permissions for chrpath, if we don't have
115 # them then set them temporarily. Take a copy of the files
116 # permissions so that we can restore them afterwards.
117 perms = os.stat(fpath)[stat.ST_MODE]
118 if os.access(fpath, os.W_OK|os.R_OK):
119 perms = None
120 else:
121 # Temporarily make the file writeable so we can chrpath it
122 os.chmod(fpath, perms|stat.S_IRWXU)
123 process_file(cmd, fpath, basedir, tmpdir, d)
124
125 if perms:
126 os.chmod(fpath, perms)
127
128def rpath_replace (path, d):
129 bindirs = d.expand("${bindir} ${sbindir} ${base_sbindir} ${base_bindir} ${libdir} ${base_libdir} ${libexecdir} ${PREPROCESS_RELOCATE_DIRS}").split()
130
131 for bindir in bindirs:
132 #bb.note ("Processing directory " + bindir)
133 directory = path + "/" + bindir
134 process_dir (directory, d)
135
diff --git a/meta/classes/clutter.bbclass b/meta/classes/clutter.bbclass
new file mode 100644
index 0000000000..66a49bc175
--- /dev/null
+++ b/meta/classes/clutter.bbclass
@@ -0,0 +1,23 @@
1
2def get_minor_dir(v):
3 import re
4 m = re.match("^([0-9]+)\.([0-9]+)", v)
5 return "%s.%s" % (m.group(1), m.group(2))
6
7def get_real_name(n):
8 import re
9 m = re.match("^([a-z]+(-[a-z]+)?)(-[0-9]+\.[0-9]+)?", n)
10 return "%s" % (m.group(1))
11
12VERMINOR = "${@get_minor_dir("${PV}")}"
13REALNAME = "${@get_real_name("${BPN}")}"
14FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${REALNAME}-${PV}", "${FILE_DIRNAME}/${REALNAME}-${VERMINOR}", "${FILE_DIRNAME}/${REALNAME}", "${FILE_DIRNAME}/files"], d)}"
15
16CLUTTER_SRC_FTP = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
17
18CLUTTER_SRC_GIT = "git://git.gnome.org/${REALNAME}"
19
20SRC_URI = "${CLUTTER_SRC_FTP}"
21S = "${WORKDIR}/${REALNAME}-${PV}"
22
23inherit autotools pkgconfig gtk-doc gettext
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
new file mode 100644
index 0000000000..30c1792ffa
--- /dev/null
+++ b/meta/classes/cmake.bbclass
@@ -0,0 +1,118 @@
1DEPENDS_prepend = "cmake-native "
2
3# We need to unset CCACHE otherwise cmake gets too confused
4CCACHE = ""
5
6# We want the staging and installing functions from autotools
7inherit autotools
8
9# Use in-tree builds by default but allow this to be changed
10# since some packages do not support them (e.g. llvm 2.5).
11OECMAKE_SOURCEPATH ?= "."
12
13# If declaring this, make sure you also set EXTRA_OEMAKE to
14# "-C ${OECMAKE_BUILDPATH}". So it will run the right makefiles.
15OECMAKE_BUILDPATH ?= ""
16B="${S}"
17
18# C/C++ Compiler (without cpu arch/tune arguments)
19OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
20OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
21
22# Compiler flags
23OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
24OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} -fpermissive"
25OECMAKE_C_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CFLAGS} -DNDEBUG"
26OECMAKE_CXX_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CXXFLAGS} -DNDEBUG"
27OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
28OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
29
30OECMAKE_RPATH ?= ""
31OECMAKE_PERLNATIVE_DIR ??= ""
32OECMAKE_EXTRA_ROOT_PATH ?= ""
33
34cmake_do_generate_toolchain_file() {
35 cat > ${WORKDIR}/toolchain.cmake <<EOF
36# CMake system name must be something like "Linux".
37# This is important for cross-compiling.
38set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).*/\1/'` )
39set( CMAKE_SYSTEM_PROCESSOR ${TARGET_ARCH} )
40set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
41set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
42set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
43set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
44set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "CFLAGS for release" )
45set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "CXXFLAGS for release" )
46set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
47set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
48
49# only search in the paths provided so cmake doesnt pick
50# up libraries and tools from the native build machine
51set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN})
52set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
53set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY )
54set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
55set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
56
57# Use qt.conf settings
58set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
59
60# We need to set the rpath to the correct directory as cmake does not provide any
61# directory as rpath by default
62set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
63
64# Use native cmake modules
65set( CMAKE_MODULE_PATH ${STAGING_DATADIR}/cmake/Modules/ )
66
67# add for non /usr/lib libdir, e.g. /usr/lib64
68set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
69
70EOF
71}
72
73addtask generate_toolchain_file after do_patch before do_configure
74
75cmake_do_configure() {
76 if [ ${OECMAKE_BUILDPATH} ]
77 then
78 mkdir -p ${OECMAKE_BUILDPATH}
79 cd ${OECMAKE_BUILDPATH}
80 fi
81
82 # Just like autotools cmake can use a site file to cache result that need generated binaries to run
83 if [ -e ${WORKDIR}/site-file.cmake ] ; then
84 OECMAKE_SITEFILE=" -C ${WORKDIR}/site-file.cmake"
85 else
86 OECMAKE_SITEFILE=""
87 fi
88
89 cmake \
90 ${OECMAKE_SITEFILE} \
91 ${OECMAKE_SOURCEPATH} \
92 -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
93 -DCMAKE_INSTALL_SO_NO_EXE=0 \
94 -DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
95 -DCMAKE_VERBOSE_MAKEFILE=1 \
96 ${EXTRA_OECMAKE} \
97 -Wno-dev
98}
99
100cmake_do_compile() {
101 if [ ${OECMAKE_BUILDPATH} ]
102 then
103 cd ${OECMAKE_BUILDPATH}
104 fi
105
106 base_do_compile
107}
108
109cmake_do_install() {
110 if [ ${OECMAKE_BUILDPATH} ];
111 then
112 cd ${OECMAKE_BUILDPATH}
113 fi
114
115 autotools_do_install
116}
117
118EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
new file mode 100644
index 0000000000..bb9563948c
--- /dev/null
+++ b/meta/classes/cml1.bbclass
@@ -0,0 +1,40 @@
1cml1_do_configure() {
2 set -e
3 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
4 oe_runmake oldconfig
5}
6
7EXPORT_FUNCTIONS do_configure
8addtask configure after do_unpack do_patch before do_compile
9
10inherit terminal
11
12OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS HOST_LOADLIBES TERMINFO"
13HOST_EXTRACFLAGS = "${BUILD_CFLAGS} ${BUILD_LDFLAGS}"
14HOSTLDFLAGS = "${BUILD_LDFLAGS}"
15HOST_LOADLIBES = "-lncurses"
16TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
17
18python do_menuconfig() {
19 try:
20 mtime = os.path.getmtime(".config")
21 except OSError:
22 mtime = 0
23
24 oe_terminal("${SHELL} -c \"make menuconfig; if [ $? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"", '${PN} Configuration', d)
25
26 # FIXME this check can be removed when the minimum bitbake version has been bumped
27 if hasattr(bb.build, 'write_taint'):
28 try:
29 newmtime = os.path.getmtime(".config")
30 except OSError:
31 newmtime = 0
32
33 if newmtime > mtime:
34 bb.note("Configuration changed, recompile will be forced")
35 bb.build.write_taint('do_compile', d)
36}
37do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
38do_menuconfig[nostamp] = "1"
39addtask menuconfig after do_configure
40
diff --git a/meta/classes/copyleft_compliance.bbclass b/meta/classes/copyleft_compliance.bbclass
new file mode 100644
index 0000000000..32aa7577f0
--- /dev/null
+++ b/meta/classes/copyleft_compliance.bbclass
@@ -0,0 +1,67 @@
1# Deploy sources for recipes for compliance with copyleft-style licenses
2# Defaults to using symlinks, as it's a quick operation, and one can easily
3# follow the links when making use of the files (e.g. tar with the -h arg).
4#
5# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary.
6#
7# vi:sts=4:sw=4:et
8
9# Need the copyleft_should_include
10inherit archiver
11
12COPYLEFT_SOURCES_DIR ?= '${DEPLOY_DIR}/copyleft_sources'
13
14python do_prepare_copyleft_sources () {
15 """Populate a tree of the recipe sources and emit patch series files"""
16 import os.path
17 import shutil
18
19 p = d.getVar('P', True)
20 included, reason = copyleft_should_include(d)
21 if not included:
22 bb.debug(1, 'copyleft: %s is excluded: %s' % (p, reason))
23 return
24 else:
25 bb.debug(1, 'copyleft: %s is included: %s' % (p, reason))
26
27 sources_dir = d.getVar('COPYLEFT_SOURCES_DIR', True)
28 dl_dir = d.getVar('DL_DIR', True)
29 src_uri = d.getVar('SRC_URI', True).split()
30 fetch = bb.fetch2.Fetch(src_uri, d)
31 ud = fetch.ud
32
33 pf = d.getVar('PF', True)
34 dest = os.path.join(sources_dir, pf)
35 shutil.rmtree(dest, ignore_errors=True)
36 bb.utils.mkdirhier(dest)
37
38 for u in ud.values():
39 local = os.path.normpath(fetch.localpath(u.url))
40 if local.endswith('.bb'):
41 continue
42 elif local.endswith('/'):
43 local = local[:-1]
44
45 if u.mirrortarball:
46 tarball_path = os.path.join(dl_dir, u.mirrortarball)
47 if os.path.exists(tarball_path):
48 local = tarball_path
49
50 oe.path.symlink(local, os.path.join(dest, os.path.basename(local)), force=True)
51
52 patches = src_patches(d)
53 for patch in patches:
54 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
55 patchdir = parm.get('patchdir')
56 if patchdir:
57 series = os.path.join(dest, 'series.subdir.%s' % patchdir.replace('/', '_'))
58 else:
59 series = os.path.join(dest, 'series')
60
61 with open(series, 'a') as s:
62 s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
63}
64
65addtask prepare_copyleft_sources after do_fetch before do_build
66do_prepare_copyleft_sources[dirs] = "${WORKDIR}"
67do_build[recrdeptask] += 'do_prepare_copyleft_sources'
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
new file mode 100644
index 0000000000..e7c34e2791
--- /dev/null
+++ b/meta/classes/core-image.bbclass
@@ -0,0 +1,80 @@
1# Common code for generating core reference images
2#
3# Copyright (C) 2007-2011 Linux Foundation
4
5LIC_FILES_CHKSUM = "file://${COREBASE}/LICENSE;md5=3f40d7994397109285ec7b81fdeb3b58 \
6 file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
7
8# IMAGE_FEATURES control content of the core reference images
9#
10# By default we install packagegroup-core-boot and packagegroup-base packages - this gives us
11# working (console only) rootfs.
12#
13# Available IMAGE_FEATURES:
14#
15# - x11 - X server
16# - x11-base - X server with minimal environment
17# - x11-sato - OpenedHand Sato environment
18# - tools-debug - debugging tools
19# - eclipse-debug - Eclipse remote debugging support
20# - tools-profile - profiling tools
21# - tools-testapps - tools usable to make some device tests
22# - tools-sdk - SDK (C/C++ compiler, autotools, etc.)
23# - nfs-server - NFS server
24# - ssh-server-dropbear - SSH server (dropbear)
25# - ssh-server-openssh - SSH server (openssh)
26# - qt4-pkgs - Qt4/X11 and demo applications
27# - hwcodecs - Install hardware acceleration codecs
28# - package-management - installs package management tools and preserves the package manager database
29# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
30# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
31# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
32# - doc-pkgs - documentation packages for all installed packages in the rootfs
33# - read-only-rootfs - tweaks an image to support read-only rootfs
34#
35PACKAGE_GROUP_x11 = "packagegroup-core-x11"
36PACKAGE_GROUP_x11-base = "packagegroup-core-x11-base"
37PACKAGE_GROUP_x11-sato = "packagegroup-core-x11-sato"
38PACKAGE_GROUP_tools-debug = "packagegroup-core-tools-debug"
39PACKAGE_GROUP_eclipse-debug = "packagegroup-core-eclipse-debug"
40PACKAGE_GROUP_tools-profile = "packagegroup-core-tools-profile"
41PACKAGE_GROUP_tools-testapps = "packagegroup-core-tools-testapps"
42PACKAGE_GROUP_tools-sdk = "packagegroup-core-sdk packagegroup-core-standalone-sdk-target"
43PACKAGE_GROUP_nfs-server = "packagegroup-core-nfs-server"
44PACKAGE_GROUP_ssh-server-dropbear = "packagegroup-core-ssh-dropbear"
45PACKAGE_GROUP_ssh-server-openssh = "packagegroup-core-ssh-openssh"
46PACKAGE_GROUP_package-management = "${ROOTFS_PKGMANAGE}"
47PACKAGE_GROUP_qt4-pkgs = "packagegroup-core-qt-demoapps"
48PACKAGE_GROUP_hwcodecs = "${MACHINE_HWCODECS}"
49
50
51# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2'
52# Including image feature foo would replace the image features bar1 and bar2
53IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear"
54
55# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2'
56# An error exception would be raised if both image features foo and bar1(or bar2) are included
57
58MACHINE_HWCODECS ??= ""
59
60CORE_IMAGE_BASE_INSTALL = '\
61 packagegroup-core-boot \
62 packagegroup-base-extended \
63 \
64 ${CORE_IMAGE_EXTRA_INSTALL} \
65 '
66
67CORE_IMAGE_EXTRA_INSTALL ?= ""
68
69IMAGE_INSTALL ?= "${CORE_IMAGE_BASE_INSTALL}"
70
71inherit image
72
73# Create /etc/timestamp during image construction to give a reasonably sane default time setting
74ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
75
76# Zap the root password if debug-tweaks feature is not enabled
77ROOTFS_POSTPROCESS_COMMAND += '${@base_contains("IMAGE_FEATURES", "debug-tweaks", "", "zap_root_password ; ",d)}'
78
79# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
80ROOTFS_POSTPROCESS_COMMAND += '${@base_contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass
new file mode 100644
index 0000000000..7e1e8d0d6b
--- /dev/null
+++ b/meta/classes/cpan-base.bbclass
@@ -0,0 +1,51 @@
1#
2# cpan-base providers various perl related information needed for building
3# cpan modules
4#
5FILES_${PN} += "${libdir}/perl ${datadir}/perl"
6
7DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
8RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
9
10PERL_OWN_DIR = "${@["", "/perl-native"][(bb.data.inherits_class('native', d))]}"
11
12# Determine the staged version of perl from the perl configuration file
13def get_perl_version(d):
14 import re
15 cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh')
16 try:
17 f = open(cfg, 'r')
18 except IOError:
19 return None
20 l = f.readlines();
21 f.close();
22 r = re.compile("^version='(\d*\.\d*\.\d*)'")
23 for s in l:
24 m = r.match(s)
25 if m:
26 return m.group(1)
27 return None
28
29# Determine where the library directories are
30def perl_get_libdirs(d):
31 libdir = d.getVar('libdir', True)
32 if is_target(d) == "no":
33 libdir += '/perl-native'
34 libdir += '/perl'
35 return libdir
36
37def is_target(d):
38 if not bb.data.inherits_class('native', d):
39 return "yes"
40 return "no"
41
42PERLLIBDIRS := "${@perl_get_libdirs(d)}"
43PERLVERSION := "${@get_perl_version(d)}"
44
45FILES_${PN}-dbg += "${PERLLIBDIRS}/auto/*/.debug \
46 ${PERLLIBDIRS}/auto/*/*/.debug \
47 ${PERLLIBDIRS}/auto/*/*/*/.debug \
48 ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/.debug \
49 ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/.debug \
50 ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/*/.debug \
51 "
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
new file mode 100644
index 0000000000..7088039fa0
--- /dev/null
+++ b/meta/classes/cpan.bbclass
@@ -0,0 +1,55 @@
1#
2# This is for perl modules that use the old Makefile.PL build system
3#
4inherit cpan-base perlnative
5
6EXTRA_CPANFLAGS ?= ""
7EXTRA_PERLFLAGS ?= ""
8
9# Env var which tells perl if it should use host (no) or target (yes) settings
10export PERLCONFIGTARGET = "${@is_target(d)}"
11
12# Env var which tells perl where the perl include files are
13export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}/CORE"
14export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
15export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
16export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
17
18cpan_do_configure () {
19 export PERL5LIB="${PERL_ARCHLIB}"
20 yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL ${EXTRA_CPANFLAGS}
21
22 # Makefile.PLs can exit with success without generating a
23 # Makefile, e.g. in cases of missing configure time
24 # dependencies. This is considered a best practice by
25 # cpantesters.org. See:
26 # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
27 # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
28 [ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL"
29
30 if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
31 . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh
32 # Use find since there can be a Makefile generated for each Makefile.PL
33 for f in `find -name Makefile.PL`; do
34 f2=`echo $f | sed -e 's/.PL//'`
35 test -f $f2 || continue
36 sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \
37 -e 's/perl.real/perl/' \
38 -e "s|^\(CCFLAGS =.*\)|\1 ${CFLAGS}|" \
39 $f2
40 done
41 fi
42}
43
44cpan_do_compile () {
45 oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}"
46}
47
48cpan_do_install () {
49 oe_runmake DESTDIR="${D}" install_vendor
50 for PERLSCRIPT in `grep -rIEl '#!${bindir}/perl-native.*/perl' ${D}`; do
51 sed -i -e 's|^#!${bindir}/perl-native.*/perl|#!/usr/bin/env nativeperl|' $PERLSCRIPT
52 done
53}
54
55EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cpan_build.bbclass b/meta/classes/cpan_build.bbclass
new file mode 100644
index 0000000000..eaba40a06f
--- /dev/null
+++ b/meta/classes/cpan_build.bbclass
@@ -0,0 +1,51 @@
1#
2# This is for perl modules that use the new Build.PL build system
3#
4inherit cpan-base perlnative
5
6# Env var which tells perl if it should use host (no) or target (yes) settings
7export PERLCONFIGTARGET = "${@is_target(d)}"
8export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
9export LD = "${CCLD}"
10
11#
12# We also need to have built libmodule-build-perl-native for
13# everything except libmodule-build-perl-native itself (which uses
14# this class, but uses itself as the provider of
15# libmodule-build-perl)
16#
17def cpan_build_dep_prepend(d):
18 if d.getVar('CPAN_BUILD_DEPS', True):
19 return ''
20 pn = d.getVar('PN', True)
21 if pn in ['libmodule-build-perl', 'libmodule-build-perl-native']:
22 return ''
23 return 'libmodule-build-perl-native '
24
25DEPENDS_prepend = "${@cpan_build_dep_prepend(d)}"
26
27cpan_build_do_configure () {
28 if [ "${@is_target(d)}" = "yes" ]; then
29 # build for target
30 . ${STAGING_LIBDIR}/perl/config.sh
31 fi
32
33 perl Build.PL --installdirs vendor \
34 --destdir ${D} \
35 --install_path lib="${datadir}/perl" \
36 --install_path arch="${libdir}/perl" \
37 --install_path script=${bindir} \
38 --install_path bin=${bindir} \
39 --install_path bindoc=${mandir}/man1 \
40 --install_path libdoc=${mandir}/man3
41}
42
43cpan_build_do_compile () {
44 perl Build
45}
46
47cpan_build_do_install () {
48 perl Build install
49}
50
51EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
new file mode 100644
index 0000000000..4387d05f78
--- /dev/null
+++ b/meta/classes/cross-canadian.bbclass
@@ -0,0 +1,97 @@
1#
2# NOTE - When using this class the user is repsonsible for ensuring that
3# TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH
4# is changed, another nativesdk xxx-canadian-cross can be installed
5#
6
7
8# SDK packages are built either explicitly by the user,
9# or indirectly via dependency. No need to be in 'world'.
10EXCLUDE_FROM_WORLD = "1"
11CLASSOVERRIDE = "class-cross-canadian"
12STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
13
14#
15# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
16#
17PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
18python () {
19 archs = d.getVar('PACKAGE_ARCHS', True).split()
20 sdkarchs = []
21 for arch in archs:
22 sdkarchs.append(arch + '-${SDKPKGSUFFIX}')
23 d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
24}
25MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
26
27INHIBIT_DEFAULT_DEPS = "1"
28
29STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
30
31TOOLCHAIN_OPTIONS = " --sysroot=${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
32
33PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
34PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
35
36HOST_ARCH = "${SDK_ARCH}"
37HOST_VENDOR = "${SDK_VENDOR}"
38HOST_OS = "${SDK_OS}"
39HOST_PREFIX = "${SDK_PREFIX}"
40HOST_CC_ARCH = "${SDK_CC_ARCH}"
41HOST_LD_ARCH = "${SDK_LD_ARCH}"
42HOST_AS_ARCH = "${SDK_AS_ARCH}"
43
44#assign DPKG_ARCH
45DPKG_ARCH = "${SDK_ARCH}"
46
47CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
48CFLAGS = "${BUILDSDK_CFLAGS}"
49CXXFLAGS = "${BUILDSDK_CFLAGS}"
50LDFLAGS = "${BUILDSDK_LDFLAGS} \
51 -Wl,-rpath-link,${STAGING_LIBDIR}/.. \
52 -Wl,-rpath,${libdir}/.. "
53
54DEPENDS_GETTEXT = "gettext-native nativesdk-gettext"
55
56#
57# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
58# binaries
59#
60DEPENDS_append = " chrpath-replacement-native"
61EXTRANATIVEPATH += "chrpath-native"
62
63# Path mangling needed by the cross packaging
64# Note that we use := here to ensure that libdir and includedir are
65# target paths.
66target_libdir := "${libdir}"
67target_includedir := "${includedir}"
68target_base_libdir := "${base_libdir}"
69target_prefix := "${prefix}"
70target_exec_prefix := "${exec_prefix}"
71
72# Change to place files in SDKPATH
73base_prefix = "${SDKPATHNATIVE}"
74prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
75exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
76bindir = "${exec_prefix}/bin/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
77sbindir = "${bindir}"
78base_bindir = "${bindir}"
79base_sbindir = "${bindir}"
80libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
81libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
82
83FILES_${PN} = "${prefix}"
84FILES_${PN}-dbg += "${prefix}/.debug \
85 ${prefix}/bin/.debug \
86 "
87
88export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
89export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
90
91do_populate_sysroot[stamp-extra-info] = ""
92
93USE_NLS = "${SDKUSE_NLS}"
94
95# We have to us TARGET_ARCH but we care about the absolute value
96# and not any particular tune that is enabled.
97TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
new file mode 100644
index 0000000000..bd1448965c
--- /dev/null
+++ b/meta/classes/cross.bbclass
@@ -0,0 +1,80 @@
1inherit relocatable
2
3# Cross packages are built indirectly via dependency,
4# no need for them to be a direct target of 'world'
5EXCLUDE_FROM_WORLD = "1"
6
7CLASSOVERRIDE = "class-cross"
8PACKAGES = ""
9PACKAGES_DYNAMIC = ""
10PACKAGES_DYNAMIC_class-native = ""
11
12HOST_ARCH = "${BUILD_ARCH}"
13HOST_VENDOR = "${BUILD_VENDOR}"
14HOST_OS = "${BUILD_OS}"
15HOST_PREFIX = "${BUILD_PREFIX}"
16HOST_CC_ARCH = "${BUILD_CC_ARCH}"
17HOST_LD_ARCH = "${BUILD_LD_ARCH}"
18HOST_AS_ARCH = "${BUILD_AS_ARCH}"
19
20STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
21
22export PKG_CONFIG_DIR = "${STAGING_DIR}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}${libdir}/pkgconfig"
23export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
24
25CPPFLAGS = "${BUILD_CPPFLAGS}"
26CFLAGS = "${BUILD_CFLAGS}"
27CXXFLAGS = "${BUILD_CFLAGS}"
28LDFLAGS = "${BUILD_LDFLAGS}"
29LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE}"
30
31TOOLCHAIN_OPTIONS = ""
32
33DEPENDS_GETTEXT = "gettext-native"
34
35# Path mangling needed by the cross packaging
36# Note that we use := here to ensure that libdir and includedir are
37# target paths.
38target_base_prefix := "${base_prefix}"
39target_prefix := "${prefix}"
40target_exec_prefix := "${exec_prefix}"
41target_base_libdir = "${target_base_prefix}/${baselib}"
42target_libdir = "${target_exec_prefix}/${baselib}"
43target_includedir := "${includedir}"
44
45# Overrides for paths
46CROSS_TARGET_SYS_DIR = "${MULTIMACH_TARGET_SYS}"
47prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
48base_prefix = "${STAGING_DIR_NATIVE}"
49exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
50bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}"
51sbindir = "${bindir}"
52base_bindir = "${bindir}"
53base_sbindir = "${bindir}"
54libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
55libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
56
57do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
58do_populate_sysroot[stamp-extra-info] = ""
59do_packagedata[stamp-extra-info] = ""
60
61python cross_virtclass_handler () {
62 classextend = e.data.getVar('BBCLASSEXTEND', True) or ""
63 if "cross" not in classextend:
64 return
65
66 pn = e.data.getVar("PN", True)
67 if not pn.endswith("-cross"):
68 return
69
70 bb.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-cross", e.data)
71}
72
73addhandler cross_virtclass_handler
74cross_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
75
76do_install () {
77 oe_runmake 'DESTDIR=${D}' install
78}
79
80USE_NLS = "no"
diff --git a/meta/classes/crosssdk.bbclass b/meta/classes/crosssdk.bbclass
new file mode 100644
index 0000000000..261a37465e
--- /dev/null
+++ b/meta/classes/crosssdk.bbclass
@@ -0,0 +1,35 @@
1inherit cross
2
3CLASSOVERRIDE = "class-crosssdk"
4PACKAGE_ARCH = "${SDK_ARCH}"
5python () {
6 # set TUNE_PKGARCH to SDK_ARCH
7 d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH', True))
8}
9
10STAGING_DIR_TARGET = "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}"
11STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
12
13TARGET_ARCH = "${SDK_ARCH}"
14TARGET_VENDOR = "${SDK_VENDOR}"
15TARGET_OS = "${SDK_OS}"
16TARGET_PREFIX = "${SDK_PREFIX}"
17TARGET_CC_ARCH = "${SDK_CC_ARCH}"
18TARGET_LD_ARCH = "${SDK_LD_ARCH}"
19TARGET_AS_ARCH = "${SDK_AS_ARCH}"
20TARGET_FPU = ""
21
22target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
23target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
24target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
25target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
26target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
27baselib = "lib"
28
29do_populate_sysroot[stamp-extra-info] = ""
30do_packagedata[stamp-extra-info] = ""
31
32# Need to force this to ensure consitency accross architectures
33EXTRA_OECONF_FPU = ""
34
35USE_NLS = "no"
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
new file mode 100644
index 0000000000..d7ea151a5d
--- /dev/null
+++ b/meta/classes/debian.bbclass
@@ -0,0 +1,125 @@
1# Debian package renaming only occurs when a package is built
2# We therefore have to make sure we build all runtime packages
3# before building the current package to make the packages runtime
4# depends are correct
5#
6# Custom library package names can be defined setting
7# DEBIANNAME_ + pkgname to the desired name.
8#
9# Better expressed as ensure all RDEPENDS package before we package
10# This means we can't have circular RDEPENDS/RRECOMMENDS
11DEBIANRDEP = "do_packagedata"
12do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
13do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
14do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
15do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
16
17python () {
18 if not d.getVar("PACKAGES", True):
19 d.setVar("DEBIANRDEP", "")
20}
21
22python debian_package_name_hook () {
23 import glob, copy, stat, errno, re
24
25 pkgdest = d.getVar('PKGDEST', True)
26 packages = d.getVar('PACKAGES', True)
27 bin_re = re.compile(".*/s?" + os.path.basename(d.getVar("bindir", True)) + "$")
28 lib_re = re.compile(".*/" + os.path.basename(d.getVar("libdir", True)) + "$")
29 so_re = re.compile("lib.*\.so")
30
31 def socrunch(s):
32 s = s.lower().replace('_', '-')
33 m = re.match("^(.*)(.)\.so\.(.*)$", s)
34 if m is None:
35 return None
36 if m.group(2) in '0123456789':
37 bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
38 else:
39 bin = m.group(1) + m.group(2) + m.group(3)
40 dev = m.group(1) + m.group(2)
41 return (bin, dev)
42
43 def isexec(path):
44 try:
45 s = os.stat(path)
46 except (os.error, AttributeError):
47 return 0
48 return (s[stat.ST_MODE] & stat.S_IEXEC)
49
50 def auto_libname(packages, orig_pkg):
51 sonames = []
52 has_bins = 0
53 has_libs = 0
54 for file in pkgfiles[orig_pkg]:
55 root = os.path.dirname(file)
56 if bin_re.match(root):
57 has_bins = 1
58 if lib_re.match(root):
59 has_libs = 1
60 if so_re.match(os.path.basename(file)):
61 cmd = (d.getVar('TARGET_PREFIX', True) or "") + "objdump -p " + file + " 2>/dev/null"
62 fd = os.popen(cmd)
63 lines = fd.readlines()
64 fd.close()
65 for l in lines:
66 m = re.match("\s+SONAME\s+([^\s]*)", l)
67 if m and not m.group(1) in sonames:
68 sonames.append(m.group(1))
69
70 bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
71 soname = None
72 if len(sonames) == 1:
73 soname = sonames[0]
74 elif len(sonames) > 1:
75 lead = d.getVar('LEAD_SONAME', True)
76 if lead:
77 r = re.compile(lead)
78 filtered = []
79 for s in sonames:
80 if r.match(s):
81 filtered.append(s)
82 if len(filtered) == 1:
83 soname = filtered[0]
84 elif len(filtered) > 1:
85 bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
86 else:
87 bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
88 else:
89 bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
90
91 if has_libs and not has_bins and soname:
92 soname_result = socrunch(soname)
93 if soname_result:
94 (pkgname, devname) = soname_result
95 for pkg in packages.split():
96 if (d.getVar('PKG_' + pkg) or d.getVar('DEBIAN_NOAUTONAME_' + pkg)):
97 continue
98 debian_pn = d.getVar('DEBIANNAME_' + pkg)
99 if debian_pn:
100 newpkg = debian_pn
101 elif pkg == orig_pkg:
102 newpkg = pkgname
103 else:
104 newpkg = pkg.replace(orig_pkg, devname, 1)
105 mlpre=d.getVar('MLPREFIX', True)
106 if mlpre:
107 if not newpkg.find(mlpre) == 0:
108 newpkg = mlpre + newpkg
109 if newpkg != pkg:
110 d.setVar('PKG_' + pkg, newpkg)
111
112 # reversed sort is needed when some package is substring of another
113 # ie in ncurses we get without reverse sort:
114 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
115 # and later
116 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
117 # so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
118 for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS', True) or "").split(), reverse=True):
119 auto_libname(packages, pkg)
120}
121
122EXPORT_FUNCTIONS package_name_hook
123
124DEBIAN_NAMES = "1"
125
diff --git a/meta/classes/deploy.bbclass b/meta/classes/deploy.bbclass
new file mode 100644
index 0000000000..c3371421d8
--- /dev/null
+++ b/meta/classes/deploy.bbclass
@@ -0,0 +1,11 @@
1DEPLOYDIR = "${WORKDIR}/deploy-${PN}"
2SSTATETASKS += "do_deploy"
3do_deploy[sstate-name] = "deploy"
4do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"
5do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
6
7python do_deploy_setscene () {
8 sstate_setscene(d)
9}
10addtask do_deploy_setscene
11do_deploy[dirs] = "${DEPLOYDIR} ${B}"
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
new file mode 100644
index 0000000000..92edb9ef25
--- /dev/null
+++ b/meta/classes/devshell.bbclass
@@ -0,0 +1,33 @@
1inherit terminal
2
3DEVSHELL = "${SHELL}"
4
5python do_devshell () {
6 if d.getVarFlag("do_devshell", "manualfakeroot"):
7 d.prependVar("DEVSHELL", "pseudo ")
8 fakeenv = d.getVar("FAKEROOTENV", True).split()
9 for f in fakeenv:
10 k = f.split("=")
11 d.setVar(k[0], k[1])
12 d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
13 d.delVarFlag("do_devshell", "fakeroot")
14
15 oe_terminal(d.getVar('DEVSHELL', True), 'OpenEmbedded Developer Shell', d)
16}
17
18addtask devshell after do_patch
19
20do_devshell[dirs] = "${S}"
21do_devshell[nostamp] = "1"
22
23# devshell and fakeroot/pseudo need careful handling since only the final
24# command should run under fakeroot emulation, any X connection should
25# be done as the normal user. We therfore carefully construct the envionment
26# manually
27python () {
28 if d.getVarFlag("do_devshell", "fakeroot"):
29 # We need to signal our code that we want fakeroot however we
30 # can't manipulate the environment and variables here yet (see YOCTO #4795)
31 d.setVarFlag("do_devshell", "manualfakeroot", "1")
32 d.delVarFlag("do_devshell", "fakeroot")
33}
diff --git a/meta/classes/distro_features_check.bbclass b/meta/classes/distro_features_check.bbclass
new file mode 100644
index 0000000000..61b11b7d53
--- /dev/null
+++ b/meta/classes/distro_features_check.bbclass
@@ -0,0 +1,28 @@
1# Allow checking of required and conflicting DISTRO_FEATURES
2#
3# REQUIRED_DISTRO_FEATURES: ensure every item on this list is included
4# in DISTRO_FEATURES.
5# CONFLICT_DISTRO_FEATURES: ensure no item in this list is included in
6# DISTRO_FEATURES.
7#
8# Copyright 2013 (C) O.S. Systems Software LTDA.
9
10python () {
11 required_distro_features = d.getVar('REQUIRED_DISTRO_FEATURES', True)
12 if required_distro_features:
13 required_distro_features = required_distro_features.split()
14 distro_features = (d.getVar('DISTRO_FEATURES', True) or "").split()
15 for f in required_distro_features:
16 if f in distro_features:
17 break
18 else:
19 raise bb.parse.SkipPackage("missing required distro feature %s (not in DISTRO_FEATURES)" % required_distro_features)
20
21 conflict_distro_features = d.getVar('CONFLICT_DISTRO_FEATURES', True)
22 if conflict_distro_features:
23 conflict_distro_features = conflict_distro_features.split()
24 distro_features = (d.getVar('DISTRO_FEATURES', True) or "").split()
25 for f in conflict_distro_features:
26 if f in distro_features:
27 raise bb.parse.SkipPackage("conflicting distro feature %s (in DISTRO_FEATURES)" % conflict_distro_features)
28}
diff --git a/meta/classes/distrodata.bbclass b/meta/classes/distrodata.bbclass
new file mode 100644
index 0000000000..085575a041
--- /dev/null
+++ b/meta/classes/distrodata.bbclass
@@ -0,0 +1,925 @@
1include conf/distro/include/package_regex.inc
2addhandler distro_eventhandler
3distro_eventhandler[eventmask] = "bb.event.BuildStarted"
4python distro_eventhandler() {
5 import oe.distro_check as dc
6 logfile = dc.create_log_file(e.data, "distrodata.csv")
7 lf = bb.utils.lockfile("%s.lock" % logfile)
8 f = open(logfile, "a")
9 f.write("Package,Description,Owner,License,VerMatch,Version,Upsteam,Reason,Recipe Status,Distro 1,Distro 2,Distro 3\n")
10 f.close()
11 bb.utils.unlockfile(lf)
12
13 return
14}
15
16addtask distrodata_np
17do_distrodata_np[nostamp] = "1"
18python do_distrodata_np() {
19 localdata = bb.data.createCopy(d)
20 pn = d.getVar("PN", True)
21 bb.note("Package Name: %s" % pn)
22
23 import oe.distro_check as dist_check
24 tmpdir = d.getVar('TMPDIR', True)
25 distro_check_dir = os.path.join(tmpdir, "distro_check")
26 datetime = localdata.getVar('DATETIME', True)
27 dist_check.update_distro_data(distro_check_dir, datetime)
28
29 if pn.find("-native") != -1:
30 pnstripped = pn.split("-native")
31 bb.note("Native Split: %s" % pnstripped)
32 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
33 bb.data.update_data(localdata)
34
35 if pn.find("-cross") != -1:
36 pnstripped = pn.split("-cross")
37 bb.note("cross Split: %s" % pnstripped)
38 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
39 bb.data.update_data(localdata)
40
41 if pn.find("-crosssdk") != -1:
42 pnstripped = pn.split("-crosssdk")
43 bb.note("cross Split: %s" % pnstripped)
44 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
45 bb.data.update_data(localdata)
46
47 if pn.startswith("nativesdk-"):
48 pnstripped = pn.replace("nativesdk-", "")
49 bb.note("NativeSDK Split: %s" % pnstripped)
50 localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
51 bb.data.update_data(localdata)
52
53
54 if pn.find("-initial") != -1:
55 pnstripped = pn.split("-initial")
56 bb.note("initial Split: %s" % pnstripped)
57 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
58 bb.data.update_data(localdata)
59
60 """generate package information from .bb file"""
61 pname = localdata.getVar('PN', True)
62 pcurver = localdata.getVar('PV', True)
63 pdesc = localdata.getVar('DESCRIPTION', True)
64 if pdesc is not None:
65 pdesc = pdesc.replace(',','')
66 pdesc = pdesc.replace('\n','')
67
68 pgrp = localdata.getVar('SECTION', True)
69 plicense = localdata.getVar('LICENSE', True).replace(',','_')
70
71 rstatus = localdata.getVar('RECIPE_COLOR', True)
72 if rstatus is not None:
73 rstatus = rstatus.replace(',','')
74
75 pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True)
76 if pcurver == pupver:
77 vermatch="1"
78 else:
79 vermatch="0"
80 noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
81 if noupdate_reason is None:
82 noupdate="0"
83 else:
84 noupdate="1"
85 noupdate_reason = noupdate_reason.replace(',','')
86
87 maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
88 rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True)
89 result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
90
91 bb.note("DISTRO: %s,%s,%s,%s,%s,%s,%s,%s,%s\n" % \
92 (pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus))
93 line = pn
94 for i in result:
95 line = line + "," + i
96 bb.note("%s\n" % line)
97}
98
99addtask distrodata
100do_distrodata[nostamp] = "1"
101python do_distrodata() {
102 logpath = d.getVar('LOG_DIR', True)
103 bb.utils.mkdirhier(logpath)
104 logfile = os.path.join(logpath, "distrodata.csv")
105
106 import oe.distro_check as dist_check
107 localdata = bb.data.createCopy(d)
108 tmpdir = d.getVar('TMPDIR', True)
109 distro_check_dir = os.path.join(tmpdir, "distro_check")
110 datetime = localdata.getVar('DATETIME', True)
111 dist_check.update_distro_data(distro_check_dir, datetime)
112
113 pn = d.getVar("PN", True)
114 bb.note("Package Name: %s" % pn)
115
116 if pn.find("-native") != -1:
117 pnstripped = pn.split("-native")
118 bb.note("Native Split: %s" % pnstripped)
119 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
120 bb.data.update_data(localdata)
121
122 if pn.startswith("nativesdk-"):
123 pnstripped = pn.replace("nativesdk-", "")
124 bb.note("NativeSDK Split: %s" % pnstripped)
125 localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
126 bb.data.update_data(localdata)
127
128 if pn.find("-cross") != -1:
129 pnstripped = pn.split("-cross")
130 bb.note("cross Split: %s" % pnstripped)
131 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
132 bb.data.update_data(localdata)
133
134 if pn.find("-crosssdk") != -1:
135 pnstripped = pn.split("-crosssdk")
136 bb.note("cross Split: %s" % pnstripped)
137 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
138 bb.data.update_data(localdata)
139
140 if pn.find("-initial") != -1:
141 pnstripped = pn.split("-initial")
142 bb.note("initial Split: %s" % pnstripped)
143 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
144 bb.data.update_data(localdata)
145
146 """generate package information from .bb file"""
147 pname = localdata.getVar('PN', True)
148 pcurver = localdata.getVar('PV', True)
149 pdesc = localdata.getVar('DESCRIPTION', True)
150 if pdesc is not None:
151 pdesc = pdesc.replace(',','')
152 pdesc = pdesc.replace('\n','')
153
154 pgrp = localdata.getVar('SECTION', True)
155 plicense = localdata.getVar('LICENSE', True).replace(',','_')
156
157 rstatus = localdata.getVar('RECIPE_COLOR', True)
158 if rstatus is not None:
159 rstatus = rstatus.replace(',','')
160
161 pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True)
162 if pcurver == pupver:
163 vermatch="1"
164 else:
165 vermatch="0"
166
167 noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
168 if noupdate_reason is None:
169 noupdate="0"
170 else:
171 noupdate="1"
172 noupdate_reason = noupdate_reason.replace(',','')
173
174 maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
175 rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True)
176 # do the comparison
177 result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
178
179 lf = bb.utils.lockfile("%s.lock" % logfile)
180 f = open(logfile, "a")
181 f.write("%s,%s,%s,%s,%s,%s,%s,%s,%s" % \
182 (pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus))
183 line = ""
184 for i in result:
185 line = line + "," + i
186 f.write(line + "\n")
187 f.close()
188 bb.utils.unlockfile(lf)
189}
190
191addtask distrodataall after do_distrodata
192do_distrodataall[recrdeptask] = "do_distrodataall do_distrodata"
193do_distrodataall[recideptask] = "do_${BB_DEFAULT_TASK}"
194do_distrodataall[nostamp] = "1"
195do_distrodataall() {
196 :
197}
198
199addhandler checkpkg_eventhandler
200checkpkg_eventhandler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted"
201python checkpkg_eventhandler() {
202 def parse_csv_file(filename):
203 package_dict = {}
204 fd = open(filename, "r")
205 lines = fd.read().rsplit("\n")
206 fd.close()
207
208 first_line = ''
209 index = 0
210 for line in lines:
211 #Skip the first line
212 if index == 0:
213 first_line = line
214 index += 1
215 continue
216 elif line == '':
217 continue
218 index += 1
219 package_name = line.rsplit("\t")[0]
220 if '-native' in package_name or 'nativesdk-' in package_name:
221 original_name = package_name.rsplit('-native')[0]
222 if original_name == '':
223 original_name = package_name.rsplit('nativesdk-')[0]
224 if original_name in package_dict:
225 continue
226 else:
227 package_dict[package_name] = line
228 else:
229 new_name = package_name + "-native"
230 if not(new_name in package_dict):
231 new_name = 'nativesdk-' + package_name
232 if new_name in package_dict:
233 del package_dict[new_name]
234 package_dict[package_name] = line
235
236 fd = open(filename, "w")
237 fd.write("%s\n"%first_line)
238 for el in package_dict:
239 fd.write(package_dict[el] + "\n")
240 fd.close()
241
242 del package_dict
243
244 if bb.event.getName(e) == "BuildStarted":
245 import oe.distro_check as dc
246 logfile = dc.create_log_file(e.data, "checkpkg.csv")
247
248 lf = bb.utils.lockfile("%s.lock" % logfile)
249 f = open(logfile, "a")
250 f.write("Package\tVersion\tUpver\tLicense\tSection\tHome\tRelease\tDepends\tBugTracker\tPE\tDescription\tStatus\tTracking\tURI\tMAINTAINER\tNoUpReason\n")
251 f.close()
252 bb.utils.unlockfile(lf)
253 elif bb.event.getName(e) == "BuildCompleted":
254 import os
255 filename = "tmp/log/checkpkg.csv"
256 if os.path.isfile(filename):
257 lf = bb.utils.lockfile("%s.lock"%filename)
258 parse_csv_file(filename)
259 bb.utils.unlockfile(lf)
260 return
261}
262
263addtask checkpkg
264do_checkpkg[nostamp] = "1"
265python do_checkpkg() {
266 localdata = bb.data.createCopy(d)
267 import re
268 import tempfile
269 import subprocess
270
271 """
272 sanity check to ensure same name and type. Match as many patterns as possible
273 such as:
274 gnome-common-2.20.0.tar.gz (most common format)
275 gtk+-2.90.1.tar.gz
276 xf86-input-synaptics-12.6.9.tar.gz
277 dri2proto-2.3.tar.gz
278 blktool_4.orig.tar.gz
279 libid3tag-0.15.1b.tar.gz
280 unzip552.tar.gz
281 icu4c-3_6-src.tgz
282 genext2fs_1.3.orig.tar.gz
283 gst-fluendo-mp3
284 """
285 prefix1 = "[a-zA-Z][a-zA-Z0-9]*([\-_][a-zA-Z]\w+)*\+?[\-_]" # match most patterns which uses "-" as separator to version digits
286 prefix2 = "[a-zA-Z]+" # a loose pattern such as for unzip552.tar.gz
287 prefix3 = "[0-9]+[\-]?[a-zA-Z]+" # a loose pattern such as for 80325-quicky-0.4.tar.gz
288 prefix = "(%s|%s|%s)" % (prefix1, prefix2, prefix3)
289 ver_regex = "(([A-Z]*\d+[a-zA-Z]*[\.\-_]*)+)"#"((\d+[\.\-_[a-z]])+)"
290 # src.rpm extension was added only for rpm package. Can be removed if the rpm
291 # packaged will always be considered as having to be manually upgraded
292 suffix = "(tar\.gz|tgz|tar\.bz2|zip|xz|rpm|bz2|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
293
294 suffixtuple = ("tar.gz", "tgz", "zip", "tar.bz2", "tar.xz", "bz2", "orig.tar.gz", "src.tar.gz", "src.rpm", "src.tgz", "svnr\d+.tar.bz2", "stable.tar.gz", "src.rpm")
295 sinterstr = "(?P<name>%s?)v?(?P<ver>%s)(\-source)?" % (prefix, ver_regex)
296 sdirstr = "(?P<name>%s)\.?v?(?P<ver>%s)(\-source)?[\.\-](?P<type>%s$)" % (prefix, ver_regex, suffix)
297
298 def parse_inter(s):
299 m = re.search(sinterstr, s)
300 if not m:
301 return None
302 else:
303 return (m.group('name'), m.group('ver'), "")
304
305 def parse_dir(s):
306 m = re.search(sdirstr, s)
307 if not m:
308 return None
309 else:
310 return (m.group('name'), m.group('ver'), m.group('type'))
311
312 def modelate_version(version):
313 if version[0] in ['.', '-']:
314 if version[1].isdigit():
315 version = version[1] + version[0] + version[2:len(version)]
316 else:
317 version = version[1:len(version)]
318
319 version = re.sub('\-', '.', version)
320 version = re.sub('_', '.', version)
321 version = re.sub('(rc)+', '.-1.', version)
322 version = re.sub('(alpha)+', '.-3.', version)
323 version = re.sub('(beta)+', '.-2.', version)
324 if version[0] == 'v':
325 version = version[1:len(version)]
326 return version
327
328 """
329 Check whether 'new' is newer than 'old' version. We use existing vercmp() for the
330 purpose. PE is cleared in comparison as it's not for build, and PV is cleared too
331 for simplicity as it's somehow difficult to get from various upstream format
332 """
333 def __vercmp(old, new):
334 (on, ov, ot) = old
335 (en, ev, et) = new
336 if on != en or (et and et not in suffixtuple):
337 return False
338 ov = modelate_version(ov)
339 ev = modelate_version(ev)
340
341 result = bb.utils.vercmp(("0", ov, ""), ("0", ev, ""))
342 if result < 0:
343 return True
344 else:
345 return False
346
347 """
348 wrapper for fetch upstream directory info
349 'url' - upstream link customized by regular expression
350 'd' - database
351 'tmpf' - tmpfile for fetcher output
352 We don't want to exit whole build due to one recipe error. So handle all exceptions
353 gracefully w/o leaking to outer.
354 """
355 def internal_fetch_wget(url, d, tmpf):
356 status = "ErrFetchUnknown"
357 """
358 Clear internal url cache as it's a temporary check. Not doing so will have
359 bitbake check url multiple times when looping through a single url
360 """
361 fn = d.getVar('FILE', True)
362 bb.fetch2.urldata_cache[fn] = {}
363
364 """
365 To avoid impacting bitbake build engine, this trick is required for reusing bitbake
366 interfaces. bb.fetch.go() is not appliable as it checks downloaded content in ${DL_DIR}
367 while we don't want to pollute that place. So bb.fetch2.checkstatus() is borrowed here
368 which is designed for check purpose but we override check command for our own purpose
369 """
370 ld = bb.data.createCopy(d)
371 d.setVar('CHECKCOMMAND_wget', "/usr/bin/env wget -t 1 --passive-ftp -O %s --user-agent=\"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12\" '${URI}'" \
372 % tmpf.name)
373 bb.data.update_data(ld)
374
375 try:
376 fetcher = bb.fetch2.Fetch([url], ld)
377 fetcher.checkstatus()
378 status = "SUCC"
379 except bb.fetch2.BBFetchException, e:
380 status = "ErrFetch"
381
382 return status
383
384 """
385 Check on middle version directory such as "2.4/" in "http://xxx/2.4/pkg-2.4.1.tar.gz",
386 'url' - upstream link customized by regular expression
387 'd' - database
388 'curver' - current version
389 Return new version if success, or else error in "Errxxxx" style
390 """
391 def check_new_dir(url, curver, d):
392 pn = d.getVar('PN', True)
393 f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-1-" % pn)
394 status = internal_fetch_wget(url, d, f)
395 fhtml = f.read()
396 if status == "SUCC" and len(fhtml):
397 newver = parse_inter(curver)
398
399 """
400 match "*4.1/">*4.1/ where '*' matches chars
401 N.B. add package name, only match for digits
402 """
403 regex = d.getVar('REGEX', True)
404 if regex == '':
405 regex = "^%s" %prefix
406 m = re.search("^%s" % regex, curver)
407 if m:
408 s = "%s[^\d\"]*?(\d+[\.\-_])+\d+/?" % m.group()
409 else:
410 s = "(\d+[\.\-_])+\d+/?"
411
412 searchstr = "[hH][rR][eE][fF]=\"%s\">" % s
413
414 reg = re.compile(searchstr)
415 valid = 0
416 for line in fhtml.split("\n"):
417 if line.find(curver) >= 0:
418 valid = 1
419 m = reg.search(line)
420 if m:
421 ver = m.group().split("\"")[1]
422 ver = ver.strip("/")
423 ver = parse_inter(ver)
424 if ver and __vercmp(newver, ver) == True:
425 newver = ver
426
427 """Expect a match for curver in directory list, or else it indicates unknown format"""
428 if not valid:
429 status = "ErrParseInterDir"
430 else:
431 """rejoin the path name"""
432 status = newver[0] + newver[1]
433 elif not len(fhtml):
434 status = "ErrHostNoDir"
435
436 f.close()
437 if status != "ErrHostNoDir" and re.match("Err", status):
438 logpath = d.getVar('LOG_DIR', True)
439 subprocess.call("cp %s %s/" % (f.name, logpath), shell=True)
440 os.unlink(f.name)
441 return status
442
443 """
444 Check on the last directory to search '2.4.1' in "http://xxx/2.4/pkg-2.4.1.tar.gz",
445 'url' - upstream link customized by regular expression
446 'd' - database
447 'curname' - current package name
448 Return new version if success, or else error in "Errxxxx" style
449 """
450 def check_new_version(url, curname, d):
451 """possible to have no version in pkg name, such as spectrum-fw"""
452 if not re.search("\d+", curname):
453 return pcurver
454 pn = d.getVar('PN', True)
455 newver_regex = d.getVar('REGEX', True)
456 f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-2-" % pn)
457 status = internal_fetch_wget(url, d, f)
458 fhtml = f.read()
459
460 if status == "SUCC" and len(fhtml):
461 newver = parse_dir(curname)
462
463 if not newver_regex:
464 """this is the default matching pattern, if recipe does not """
465 """provide a regex expression """
466 """match "{PN}-5.21.1.tar.gz">{PN}-5.21.1.tar.gz """
467 pn1 = re.search("^%s" % prefix, curname).group()
468 s = "[^\"]*%s[^\d\"]*?(\d+[\.\-_])+[^\"]*" % pn1
469 searchstr = "[hH][rR][eE][fF]=\"%s\".*[>\"]" % s
470 reg = searchstr
471 else:
472 reg = newver_regex
473 valid = 0
474 count = 0
475 for line in fhtml.split("\n"):
476 if pn == 'kconfig-frontends':
477 m = re.findall(reg, line)
478 if m:
479 valid = 1
480 for match in m:
481 (on, ov, oe) = newver
482 ver = (on, match[0], oe)
483 if ver and __vercmp(newver, ver) == True:
484 newver = ver
485 continue
486 count += 1
487 m = re.search(reg, line)
488 if m:
489 valid = 1
490 if not newver_regex:
491 ver = m.group().split("\"")[1].split("/")[-1]
492 if ver == "download":
493 ver = m.group().split("\"")[1].split("/")[-2]
494 ver = parse_dir(ver)
495 else:
496 """ we cheat a little here, but we assume that the
497 regular expression in the recipe will extract exacly
498 the version """
499 (on, ov, oe) = newver
500 ver = (on, m.group('pver'), oe)
501 if ver and __vercmp(newver, ver) == True:
502 newver = ver
503 """Expect a match for curver in directory list, or else it indicates unknown format"""
504 if not valid:
505 status = "ErrParseDir"
506 else:
507 """newver still contains a full package name string"""
508 status = re.sub('_', '.', newver[1])
509 elif not len(fhtml):
510 status = "ErrHostNoDir"
511
512 f.close()
513 """if host hasn't directory information, no need to save tmp file"""
514 if status != "ErrHostNoDir" and re.match("Err", status):
515 logpath = d.getVar('LOG_DIR', True)
516 subprocess.call("cp %s %s/" % (f.name, logpath), shell=True)
517 os.unlink(f.name)
518 return status
519
520 """first check whether a uri is provided"""
521 src_uri = d.getVar('SRC_URI', True)
522 if not src_uri:
523 return
524
525 """initialize log files."""
526 logpath = d.getVar('LOG_DIR', True)
527 bb.utils.mkdirhier(logpath)
528 logfile = os.path.join(logpath, "checkpkg.csv")
529
530 """generate package information from .bb file"""
531 pname = d.getVar('PN', True)
532
533 if pname.find("-native") != -1:
534 if d.getVar('BBCLASSEXTEND', True):
535 return
536 pnstripped = pname.split("-native")
537 bb.note("Native Split: %s" % pnstripped)
538 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
539 bb.data.update_data(localdata)
540
541 if pname.startswith("nativesdk-"):
542 if d.getVar('BBCLASSEXTEND', True):
543 return
544 pnstripped = pname.replace("nativesdk-", "")
545 bb.note("NativeSDK Split: %s" % pnstripped)
546 localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
547 bb.data.update_data(localdata)
548
549 if pname.find("-cross") != -1:
550 pnstripped = pname.split("-cross")
551 bb.note("cross Split: %s" % pnstripped)
552 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
553 bb.data.update_data(localdata)
554
555 if pname.find("-initial") != -1:
556 pnstripped = pname.split("-initial")
557 bb.note("initial Split: %s" % pnstripped)
558 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
559 bb.data.update_data(localdata)
560
561 chk_uri = d.getVar('REGEX_URI', True)
562 if not chk_uri:
563 chk_uri = src_uri
564 pdesc = localdata.getVar('DESCRIPTION', True)
565 pgrp = localdata.getVar('SECTION', True)
566 if localdata.getVar('PRSPV', True):
567 pversion = localdata.getVar('PRSPV', True)
568 else:
569 pversion = localdata.getVar('PV', True)
570 plicense = localdata.getVar('LICENSE', True)
571 psection = localdata.getVar('SECTION', True)
572 phome = localdata.getVar('HOMEPAGE', True)
573 prelease = localdata.getVar('PR', True)
574 pdepends = localdata.getVar('DEPENDS', True)
575 pbugtracker = localdata.getVar('BUGTRACKER', True)
576 ppe = localdata.getVar('PE', True)
577 psrcuri = localdata.getVar('SRC_URI', True)
578 maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
579
580 found = 0
581 for uri in src_uri.split():
582 m = re.compile('(?P<type>[^:]*)').match(uri)
583 if not m:
584 raise MalformedUrl(uri)
585 elif m.group('type') in ('http', 'https', 'ftp', 'cvs', 'svn', 'git'):
586 found = 1
587 pproto = m.group('type')
588 break
589 if not found:
590 pproto = "file"
591 pupver = "N/A"
592 pstatus = "ErrUnknown"
593
594 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(uri)
595 if type in ['http', 'https', 'ftp']:
596 if d.getVar('PRSPV', True):
597 pcurver = d.getVar('PRSPV', True)
598 else:
599 pcurver = d.getVar('PV', True)
600 else:
601 if d.getVar('PRSPV', True):
602 pcurver = d.getVar('PRSPV', True)
603 else:
604 pcurver = d.getVar("SRCREV", True)
605
606
607 if type in ['http', 'https', 'ftp']:
608 newver = pcurver
609 altpath = path
610 dirver = "-"
611 curname = "-"
612
613 """
614 match version number amid the path, such as "5.7" in:
615 http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz
616 N.B. how about sth. like "../5.7/5.8/..."? Not find such example so far :-P
617 """
618 m = re.search(r"[^/]*(\d+\.)+\d+([\-_]r\d+)*/", path)
619 if m:
620 altpath = path.split(m.group())[0]
621 dirver = m.group().strip("/")
622
623 """use new path and remove param. for wget only param is md5sum"""
624 alturi = bb.fetch.encodeurl([type, host, altpath, user, pswd, {}])
625 my_uri = d.getVar('REGEX_URI', True)
626 if my_uri:
627 if d.getVar('PRSPV', True):
628 newver = d.getVar('PRSPV', True)
629 else:
630 newver = d.getVar('PV', True)
631 else:
632 newver = check_new_dir(alturi, dirver, d)
633 altpath = path
634 if not re.match("Err", newver) and dirver != newver:
635 altpath = altpath.replace(dirver, newver, True)
636 # For folder in folder cases - try to enter the folder again and then try parsing
637 """Now try to acquire all remote files in current directory"""
638 if not re.match("Err", newver):
639 curname = altpath.split("/")[-1]
640
641 """get remote name by skipping pacakge name"""
642 m = re.search(r"/.*/", altpath)
643 if not m:
644 altpath = "/"
645 else:
646 altpath = m.group()
647
648 chk_uri = d.getVar('REGEX_URI', True)
649 if not chk_uri:
650 alturi = bb.fetch.encodeurl([type, host, altpath, user, pswd, {}])
651 else:
652 alturi = chk_uri
653 newver = check_new_version(alturi, curname, d)
654 while(newver == "ErrHostNoDir"):
655 if alturi == "/download":
656 break
657 else:
658 alturi = "/".join(alturi.split("/")[0:-2]) + "/download"
659 newver = check_new_version(alturi, curname, d)
660 if not re.match("Err", newver):
661 pupver = newver
662 if pupver != pcurver:
663 pstatus = "UPDATE"
664 else:
665 pstatus = "MATCH"
666
667 if re.match("Err", newver):
668 pstatus = newver + ":" + altpath + ":" + dirver + ":" + curname
669 elif type == 'git':
670 if user:
671 gituser = user + '@'
672 else:
673 gituser = ""
674
675 if 'protocol' in parm:
676 gitproto = parm['protocol']
677 else:
678 gitproto = "git"
679
680 # Get all tags and HEAD
681 if d.getVar('GIT_REGEX', True):
682 gitcmd = "git ls-remote %s://%s%s%s %s 2>&1" % (gitproto, gituser, host, path, d.getVar('GIT_REGEX', True))
683 else:
684 gitcmd = "git ls-remote %s://%s%s%s *tag* 2>&1" % (gitproto, gituser, host, path)
685 gitcmd2 = "git ls-remote %s://%s%s%s HEAD 2>&1" % (gitproto, gituser, host, path)
686
687 tmp = os.popen(gitcmd).read()
688 if 'unable to connect' in tmp:
689 tmp = None
690 tmp2 = os.popen(gitcmd2).read()
691 if 'unable to connect' in tmp2:
692 tmp2 = None
693 #This is for those repos have tag like: refs/tags/1.2.2
694 phash = pversion.rsplit("+")[-1]
695 if tmp:
696 tmpline = tmp.split("\n")
697 verflag = 0
698 pupver = pversion
699 for line in tmpline:
700 if len(line)==0:
701 break;
702 puptag = line.split("/")[-1]
703 upstr_regex = d.getVar('REGEX', True)
704 if upstr_regex:
705 puptag = re.search(upstr_regex, puptag)
706 else:
707 puptag = re.search("(?P<pver>([0-9][\.|_]?)+)", puptag)
708 if puptag == None:
709 continue
710 puptag = puptag.group('pver')
711 puptag = re.sub("_",".",puptag)
712 plocaltag = pupver.split("+git")[0]
713 if "git" in plocaltag:
714 plocaltag = plocaltag.split("-")[0]
715 result = bb.utils.vercmp(("0", puptag, ""), ("0", plocaltag, ""))
716
717 if result > 0:
718 verflag = 1
719 pupver = puptag
720 elif verflag == 0 :
721 pupver = plocaltag
722 #This is for those no tag repo
723 elif tmp2:
724 pupver = pversion.rsplit("+")[0]
725 phash = pupver
726 else:
727 pstatus = "ErrGitAccess"
728 if not ('ErrGitAccess' in pstatus):
729
730 latest_head = tmp2.rsplit("\t")[0][:7]
731 tmp3 = re.search('(?P<git_ver>(\d+[\.-]?)+)(?P<git_prefix>(\+git[r|\-|]?)AUTOINC\+)(?P<head_md5>([\w|_]+))', pversion)
732 tmp4 = re.search('(?P<git_ver>(\d+[\.-]?)+)(?P<git_prefix>(\+git[r|\-|]?)AUTOINC\+)(?P<head_md5>([\w|_]+))', pupver)
733 if not tmp4:
734 tmp4 = re.search('(?P<git_ver>(\d+[\.-]?)+)', pupver)
735
736 if tmp3:
737 # Get status of the package - MATCH/UPDATE
738 result = bb.utils.vercmp(("0", tmp3.group('git_ver'), ""), ("0",tmp3.group('git_ver') , ""))
739 # Get the latest tag
740 pstatus = 'MATCH'
741 if result < 0:
742 latest_pv = tmp3.group('git_ver')
743 else:
744 latest_pv = pupver
745 if not(tmp3.group('head_md5')[:7] in latest_head) or not(latest_head in tmp3.group('head_md5')[:7]):
746 pstatus = 'UPDATE'
747
748 git_prefix = tmp3.group('git_prefix')
749 pupver = latest_pv + tmp3.group('git_prefix') + latest_head
750 else:
751 if not tmp3:
752 bb.plain("#DEBUG# Package %s: current version (%s) doesn't match the usual pattern" %(pname, pversion))
753 elif type == 'svn':
754 options = []
755 if user:
756 options.append("--username %s" % user)
757 if pswd:
758 options.append("--password %s" % pswd)
759 svnproto = 'svn'
760 if 'proto' in parm:
761 svnproto = parm['proto']
762 if 'rev' in parm:
763 pcurver = parm['rev']
764
765 svncmd = "svn info %s %s://%s%s/%s/ 2>&1" % (" ".join(options), svnproto, host, path, parm["module"])
766 print svncmd
767 svninfo = os.popen(svncmd).read()
768 if "Can't connect to host " in svninfo or "Connection timed out" in svninfo:
769 svncmd = "svn info %s %s://%s%s/%s/ 2>&1" % (" ".join(options), "http",
770 host, path, parm["module"])
771 svninfo = os.popen(svncmd).read()
772 for line in svninfo.split("\n"):
773 if re.search("^Last Changed Rev:", line):
774 pupver = line.split(" ")[-1]
775 if pupver in pversion:
776 pstatus = "MATCH"
777 else:
778 pstatus = "UPDATE"
779
780 if re.match("Err", pstatus):
781 pstatus = "ErrSvnAccess"
782
783 if pstatus != "ErrSvnAccess":
784 tag = pversion.rsplit("+svn")[0]
785 svn_prefix = re.search('(\+svn[r|\-]?)', pversion)
786 if tag and svn_prefix:
787 pupver = tag + svn_prefix.group() + pupver
788
789 elif type == 'cvs':
790 pupver = "HEAD"
791 pstatus = "UPDATE"
792 elif type == 'file':
793 """local file is always up-to-date"""
794 pupver = pcurver
795 pstatus = "MATCH"
796 else:
797 pstatus = "ErrUnsupportedProto"
798
799 if re.match("Err", pstatus):
800 pstatus += ":%s%s" % (host, path)
801
802 """Read from manual distro tracking fields as alternative"""
803 pmver = d.getVar("RECIPE_UPSTREAM_VERSION", True)
804 if not pmver:
805 pmver = "N/A"
806 pmstatus = "ErrNoRecipeData"
807 else:
808 if pmver == pcurver:
809 pmstatus = "MATCH"
810 else:
811 pmstatus = "UPDATE"
812
813 psrcuri = psrcuri.split()[0]
814 pdepends = "".join(pdepends.split("\t"))
815 pdesc = "".join(pdesc.split("\t"))
816 no_upgr_reason = d.getVar('RECIPE_NO_UPDATE_REASON', True)
817 lf = bb.utils.lockfile("%s.lock" % logfile)
818 f = open(logfile, "a")
819 f.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % \
820 (pname,pversion,pupver,plicense,psection, phome,prelease, pdepends,pbugtracker,ppe,pdesc,pstatus,pmver,psrcuri,maintainer, no_upgr_reason))
821 f.close()
822 bb.utils.unlockfile(lf)
823}
824
825addtask checkpkgall after do_checkpkg
826do_checkpkgall[recrdeptask] = "do_checkpkgall do_checkpkg"
827do_checkpkgall[recideptask] = "do_${BB_DEFAULT_TASK}"
828do_checkpkgall[nostamp] = "1"
829do_checkpkgall() {
830 :
831}
832
833addhandler distro_check_eventhandler
834distro_check_eventhandler[eventmask] = "bb.event.BuildStarted"
835python distro_check_eventhandler() {
836 """initialize log files."""
837 import oe.distro_check as dc
838 result_file = dc.create_log_file(e.data, "distrocheck.csv")
839 return
840}
841
842addtask distro_check
843do_distro_check[nostamp] = "1"
844python do_distro_check() {
845 """checks if the package is present in other public Linux distros"""
846 import oe.distro_check as dc
847 import shutil
848 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk',d):
849 return
850
851 localdata = bb.data.createCopy(d)
852 bb.data.update_data(localdata)
853 tmpdir = d.getVar('TMPDIR', True)
854 distro_check_dir = os.path.join(tmpdir, "distro_check")
855 logpath = d.getVar('LOG_DIR', True)
856 bb.utils.mkdirhier(logpath)
857 result_file = os.path.join(logpath, "distrocheck.csv")
858 datetime = localdata.getVar('DATETIME', True)
859 dc.update_distro_data(distro_check_dir, datetime)
860
861 # do the comparison
862 result = dc.compare_in_distro_packages_list(distro_check_dir, d)
863
864 # save the results
865 dc.save_distro_check_result(result, datetime, result_file, d)
866}
867
868addtask distro_checkall after do_distro_check
869do_distro_checkall[recrdeptask] = "do_distro_checkall do_distro_check"
870do_distro_checkall[recideptask] = "do_${BB_DEFAULT_TASK}"
871do_distro_checkall[nostamp] = "1"
872do_distro_checkall() {
873 :
874}
875#
876#Check Missing License Text.
877#Use this task to generate the missing license text data for pkg-report system,
878#then we can search those recipes which license text isn't exsit in common-licenses directory
879#
880addhandler checklicense_eventhandler
881checklicense_eventhandler[eventmask] = "bb.event.BuildStarted"
882python checklicense_eventhandler() {
883 """initialize log files."""
884 import oe.distro_check as dc
885 logfile = dc.create_log_file(e.data, "missinglicense.csv")
886 lf = bb.utils.lockfile("%s.lock" % logfile)
887 f = open(logfile, "a")
888 f.write("Package\tLicense\tMissingLicense\n")
889 f.close()
890 bb.utils.unlockfile(lf)
891 return
892}
893
894addtask checklicense
895do_checklicense[nostamp] = "1"
896python do_checklicense() {
897 import shutil
898 logpath = d.getVar('LOG_DIR', True)
899 bb.utils.mkdirhier(logpath)
900 pn = d.getVar('PN', True)
901 logfile = os.path.join(logpath, "missinglicense.csv")
902 generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
903 license_types = d.getVar('LICENSE', True)
904 for license_type in ((license_types.replace('+', '').replace('|', '&')
905 .replace('(', '').replace(')', '').replace(';', '')
906 .replace(',', '').replace(" ", "").split("&"))):
907 if not os.path.isfile(os.path.join(generic_directory, license_type)):
908 lf = bb.utils.lockfile("%s.lock" % logfile)
909 f = open(logfile, "a")
910 f.write("%s\t%s\t%s\n" % \
911 (pn,license_types,license_type))
912 f.close()
913 bb.utils.unlockfile(lf)
914 return
915}
916
917addtask checklicenseall after do_checklicense
918do_checklicenseall[recrdeptask] = "do_checklicenseall do_checklicense"
919do_checklicenseall[recideptask] = "do_${BB_DEFAULT_TASK}"
920do_checklicenseall[nostamp] = "1"
921do_checklicenseall() {
922 :
923}
924
925
diff --git a/meta/classes/distutils-base.bbclass b/meta/classes/distutils-base.bbclass
new file mode 100644
index 0000000000..3b43e7629f
--- /dev/null
+++ b/meta/classes/distutils-base.bbclass
@@ -0,0 +1,5 @@
1DEPENDS += "${@["python-native python", ""][(d.getVar('PACKAGES', True) == '')]}"
2RDEPENDS_${PN} += "${@['', 'python-core']['${CLASSOVERRIDE}' == 'class-target']}"
3
4inherit distutils-common-base pythonnative
5
diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/distutils-common-base.bbclass
new file mode 100644
index 0000000000..9a608eb63e
--- /dev/null
+++ b/meta/classes/distutils-common-base.bbclass
@@ -0,0 +1,24 @@
1inherit python-dir
2
3EXTRA_OEMAKE = ""
4
5export STAGING_INCDIR
6export STAGING_LIBDIR
7
8PACKAGES = "${PN}-staticdev ${PN}-dev ${PN}-dbg ${PN}-doc ${PN}"
9
10FILES_${PN} = "${bindir}/* ${libdir}/* ${libdir}/${PYTHON_DIR}/*"
11
12FILES_${PN}-staticdev += "\
13 ${PYTHON_SITEPACKAGES_DIR}/*.a \
14"
15FILES_${PN}-dev += "\
16 ${datadir}/pkgconfig \
17 ${libdir}/pkgconfig \
18 ${PYTHON_SITEPACKAGES_DIR}/*.la \
19"
20FILES_${PN}-dbg += "\
21 ${PYTHON_SITEPACKAGES_DIR}/.debug \
22 ${PYTHON_SITEPACKAGES_DIR}/*/.debug \
23 ${PYTHON_SITEPACKAGES_DIR}/*/*/.debug \
24"
diff --git a/meta/classes/distutils-native-base.bbclass b/meta/classes/distutils-native-base.bbclass
new file mode 100644
index 0000000000..ceda512e39
--- /dev/null
+++ b/meta/classes/distutils-native-base.bbclass
@@ -0,0 +1,3 @@
1DEPENDS += "${@["python-native", ""][(d.getVar('PACKAGES', True) == '')]}"
2
3inherit distutils-common-base
diff --git a/meta/classes/distutils.bbclass b/meta/classes/distutils.bbclass
new file mode 100644
index 0000000000..8c3a979322
--- /dev/null
+++ b/meta/classes/distutils.bbclass
@@ -0,0 +1,79 @@
1inherit distutils-base
2
3DISTUTILS_BUILD_ARGS ?= ""
4DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
5DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
6 --install-data=${STAGING_DATADIR}"
7DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
8 --install-data=${D}/${datadir}"
9
10distutils_do_compile() {
11 STAGING_INCDIR=${STAGING_INCDIR} \
12 STAGING_LIBDIR=${STAGING_LIBDIR} \
13 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
14 ${STAGING_BINDIR_NATIVE}/python-native/python setup.py build ${DISTUTILS_BUILD_ARGS} || \
15 bbfatal "python setup.py build_ext execution failed."
16}
17
18distutils_stage_headers() {
19 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
20 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
21 ${STAGING_BINDIR_NATIVE}/python-native/python setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
22 bbfatal "python setup.py install_headers execution failed."
23}
24
25distutils_stage_all() {
26 STAGING_INCDIR=${STAGING_INCDIR} \
27 STAGING_LIBDIR=${STAGING_LIBDIR} \
28 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
29 PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
30 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
31 ${STAGING_BINDIR_NATIVE}/python-native/python setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
32 bbfatal "python setup.py install (stage) execution failed."
33}
34
35distutils_do_install() {
36 install -d ${D}${PYTHON_SITEPACKAGES_DIR}
37 STAGING_INCDIR=${STAGING_INCDIR} \
38 STAGING_LIBDIR=${STAGING_LIBDIR} \
39 PYTHONPATH=${D}/${PYTHON_SITEPACKAGES_DIR} \
40 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
41 ${STAGING_BINDIR_NATIVE}/python-native/python setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
42 bbfatal "python setup.py install execution failed."
43
44 for i in `find ${D} -name "*.py"` ; do \
45 sed -i -e s:${D}::g $i
46 done
47
48 if test -e ${D}${bindir} ; then
49 for i in ${D}${bindir}/* ; do \
50 if [ ${PN} != "${BPN}-native" ]; then
51 sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i
52 fi
53 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
54 done
55 fi
56
57 if test -e ${D}${sbindir}; then
58 for i in ${D}${sbindir}/* ; do \
59 if [ ${PN} != "${BPN}-native" ]; then
60 sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i
61 fi
62 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
63 done
64 fi
65
66 rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
67
68 #
69 # FIXME: Bandaid against wrong datadir computation
70 #
71 if test -e ${D}${datadir}/share; then
72 mv -f ${D}${datadir}/share/* ${D}${datadir}/
73 rmdir ${D}${datadir}/share
74 fi
75}
76
77EXPORT_FUNCTIONS do_compile do_install
78
79export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
new file mode 100644
index 0000000000..c759289701
--- /dev/null
+++ b/meta/classes/externalsrc.bbclass
@@ -0,0 +1,68 @@
1# Copyright (C) 2012 Linux Foundation
2# Author: Richard Purdie
3# Some code and influence taken from srctree.bbclass:
4# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
5# Released under the MIT license (see COPYING.MIT for the terms)
6#
7# externalsrc.bbclass enables use of an existing source tree, usually external to
8# the build system to build a piece of software rather than the usual fetch/unpack/patch
9# process.
10#
11# To use, add externalsrc to the global inherit and set EXTERNALSRC to point at the
12# directory you want to use containing the sources e.g. from local.conf for a recipe
13# called "myrecipe" you would do:
14#
15# INHERIT += "externalsrc"
16# EXTERNALSRC_pn-myrecipe = "/path/to/my/source/tree"
17#
18# In order to make this class work for both target and native versions (or with
19# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
20# directory under the work directory (split source and build directories). This is
21# the default, but the build directory can be set to the source directory if
22# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
23#
24# EXTERNALSRC_BUILD_pn-myrecipe = "/path/to/my/source/tree"
25#
26
27SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
28
29def remove_tasks(tasks, deltasks, d):
30 for task in tasks:
31 deps = d.getVarFlag(task, "deps")
32 for preptask in deltasks:
33 if preptask in deps:
34 deps.remove(preptask)
35 d.setVarFlag(task, "deps", deps)
36 # Poking around bitbake internal variables is evil but there appears to be no better way :(
37 tasklist = d.getVar('__BBTASKS') or []
38 for task in deltasks:
39 d.delVarFlag(task, "task")
40 if task in tasklist:
41 tasklist.remove(task)
42 d.setVar('__BBTASKS', tasklist)
43
44python () {
45 externalsrc = d.getVar('EXTERNALSRC', True)
46 if externalsrc:
47 d.setVar('S', externalsrc)
48 externalsrcbuild = d.getVar('EXTERNALSRC_BUILD', True)
49 if externalsrcbuild:
50 d.setVar('B', externalsrcbuild)
51 else:
52 d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
53 d.setVar('SRC_URI', '')
54
55 tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
56 covered = d.getVar("SRCTREECOVEREDTASKS", True).split()
57
58 for task in tasks:
59 if task.endswith("_setscene"):
60 # sstate is never going to work for external source trees, disable it
61 covered.append(task)
62 else:
63 # Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
64 d.appendVarFlag(task, "lockfiles", "${S}/singletask.lock")
65
66 remove_tasks(tasks, covered, d)
67}
68
diff --git a/meta/classes/extrausers.bbclass b/meta/classes/extrausers.bbclass
new file mode 100644
index 0000000000..8670a2a85a
--- /dev/null
+++ b/meta/classes/extrausers.bbclass
@@ -0,0 +1,61 @@
1# This bbclass is mainly used for image level user/group configuration.
2# Inherit this class if you want to make EXTRA_USERS_PARAMS effective.
3
4# Below is an example showing how to use this functionality.
5# INHERIT += "extrausers"
6# EXTRA_USERS_PARAMS = "\
7# useradd -p '' tester; \
8# groupadd developers; \
9# userdel nobody; \
10# groupdel -g video; \
11# groupmod -g 1020 developers; \
12# usermod -s /bin/sh tester; \
13# "
14
15
16inherit useradd_base
17
18IMAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS', True))]}"
19
20# Image level user / group settings
21ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group;"
22
23# Image level user / group settings
24set_user_group () {
25 user_group_settings="${EXTRA_USERS_PARAMS}"
26 export PSEUDO="${FAKEROOTENV} ${STAGING_DIR_NATIVE}${bindir}/pseudo"
27 setting=`echo $user_group_settings | cut -d ';' -f1`
28 remaining=`echo $user_group_settings | cut -d ';' -f2-`
29 while test "x$setting" != "x"; do
30 cmd=`echo $setting | cut -d ' ' -f1`
31 opts=`echo $setting | cut -d ' ' -f2-`
32 # Different from useradd.bbclass, there's no file locking issue here, as
33 # this setting is actually a serial process. So we only retry once.
34 case $cmd in
35 useradd)
36 perform_useradd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
37 ;;
38 groupadd)
39 perform_groupadd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
40 ;;
41 userdel)
42 perform_userdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
43 ;;
44 groupdel)
45 perform_groupdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
46 ;;
47 usermod)
48 perform_usermod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
49 ;;
50 groupmod)
51 perform_groupmod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
52 ;;
53 *)
54 bbfatal "Invalid command in EXTRA_USERS_PARAMS: $cmd"
55 ;;
56 esac
57 # iterate to the next setting
58 setting=`echo $remaining | cut -d ';' -f1`
59 remaining=`echo $remaining | cut -d ';' -f2-`
60 done
61}
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
new file mode 100644
index 0000000000..325bcae58f
--- /dev/null
+++ b/meta/classes/fontcache.bbclass
@@ -0,0 +1,36 @@
1#
2# This class will generate the proper postinst/postrm scriptlets for font
3# packages.
4#
5
6DEPENDS += "qemu-native"
7inherit qemu
8
9FONT_PACKAGES ??= "${PN}"
10
11fontcache_common() {
12if [ "x$D" != "x" ] ; then
13 $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} bindir=${bindir} \
14 libdir=${libdir} base_libdir=${base_libdir}
15else
16 fc-cache
17fi
18}
19
20python populate_packages_append() {
21 font_pkgs = d.getVar('FONT_PACKAGES', True).split()
22
23 for pkg in font_pkgs:
24 bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
25 postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
26 if not postinst:
27 postinst = '#!/bin/sh\n'
28 postinst += d.getVar('fontcache_common', True)
29 d.setVar('pkg_postinst_%s' % pkg, postinst)
30
31 postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
32 if not postrm:
33 postrm = '#!/bin/sh\n'
34 postrm += d.getVar('fontcache_common', True)
35 d.setVar('pkg_postrm_%s' % pkg, postrm)
36}
diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass
new file mode 100644
index 0000000000..e9076b2779
--- /dev/null
+++ b/meta/classes/gconf.bbclass
@@ -0,0 +1,70 @@
1DEPENDS += "gconf gconf-native"
2
3# These are for when gconftool is used natively and the prefix isn't necessarily
4# the sysroot. TODO: replicate the postinst logic for -native packages going
5# into sysroot as they won't be running their own install-time schema
6# registration (disabled below) nor the postinst script (as they don't happen).
7export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults"
8export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2"
9
10# Disable install-time schema registration as we're a packaging system so this
11# happens in the postinst script, not at install time. Set both the configure
12# script option and the traditional envionment variable just to make sure.
13EXTRA_OECONF += "--disable-schemas-install"
14export GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL = "1"
15
16gconf_postinst() {
17if [ "x$D" != "x" ]; then
18 export GCONF_CONFIG_SOURCE="xml::$D${sysconfdir}/gconf/gconf.xml.defaults"
19else
20 export GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source`
21fi
22
23SCHEMA_LOCATION=$D/etc/gconf/schemas
24for SCHEMA in ${SCHEMA_FILES}; do
25 if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
26 HOME=$D/root gconftool-2 \
27 --makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
28 fi
29done
30}
31
32gconf_prerm() {
33SCHEMA_LOCATION=/etc/gconf/schemas
34for SCHEMA in ${SCHEMA_FILES}; do
35 if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
36 HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
37 gconftool-2 \
38 --makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
39 fi
40done
41}
42
43python populate_packages_append () {
44 import re
45 packages = d.getVar('PACKAGES', True).split()
46 pkgdest = d.getVar('PKGDEST', True)
47
48 for pkg in packages:
49 schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
50 schemas = []
51 schema_re = re.compile(".*\.schemas$")
52 if os.path.exists(schema_dir):
53 for f in os.listdir(schema_dir):
54 if schema_re.match(f):
55 schemas.append(f)
56 if schemas != []:
57 bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
58 d.setVar('SCHEMA_FILES', " ".join(schemas))
59 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
60 if not postinst:
61 postinst = '#!/bin/sh\n'
62 postinst += d.getVar('gconf_postinst', True)
63 d.setVar('pkg_postinst_%s' % pkg, postinst)
64 prerm = d.getVar('pkg_prerm_%s' % pkg, True)
65 if not prerm:
66 prerm = '#!/bin/sh\n'
67 prerm += d.getVar('gconf_prerm', True)
68 d.setVar('pkg_prerm_%s' % pkg, prerm)
69 d.appendVar("RDEPENDS_%s" % pkg, ' ' + d.getVar('MLPREFIX') + 'gconf')
70}
diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass
new file mode 100644
index 0000000000..03b89b2455
--- /dev/null
+++ b/meta/classes/gettext.bbclass
@@ -0,0 +1,19 @@
1def gettext_dependencies(d):
2 if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
3 return ""
4 if d.getVar('USE_NLS', True) == 'no':
5 return "gettext-minimal-native"
6 return d.getVar('DEPENDS_GETTEXT', False)
7
8def gettext_oeconf(d):
9 if d.getVar('USE_NLS', True) == 'no':
10 return '--disable-nls'
11 # Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set
12 if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
13 return '--disable-nls'
14 return "--enable-nls"
15
16DEPENDS_GETTEXT ??= "virtual/gettext gettext-native"
17
18BASEDEPENDS =+ "${@gettext_dependencies(d)}"
19EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
diff --git a/meta/classes/gnome.bbclass b/meta/classes/gnome.bbclass
new file mode 100644
index 0000000000..a19dd1703a
--- /dev/null
+++ b/meta/classes/gnome.bbclass
@@ -0,0 +1,3 @@
1inherit gnomebase gtk-icon-cache gconf mime
2
3EXTRA_OECONF += "--enable-introspection=no"
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
new file mode 100644
index 0000000000..b29950006a
--- /dev/null
+++ b/meta/classes/gnomebase.bbclass
@@ -0,0 +1,30 @@
1def gnome_verdir(v):
2 return oe.utils.trim_version(v, 2)
3
4GNOME_COMPRESS_TYPE ?= "bz2"
5SECTION ?= "x11/gnome"
6GNOMEBN ?= "${BPN}"
7SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
8
9DEPENDS += "gnome-common-native"
10
11FILES_${PN} += "${datadir}/application-registry \
12 ${datadir}/mime-info \
13 ${datadir}/mime/packages \
14 ${datadir}/mime/application \
15 ${datadir}/gnome-2.0 \
16 ${datadir}/polkit* \
17 ${datadir}/GConf \
18 ${datadir}/glib-2.0/schemas \
19"
20
21FILES_${PN}-doc += "${datadir}/devhelp"
22
23inherit autotools pkgconfig
24
25do_install_append() {
26 rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
27 rm -rf ${D}${localstatedir}/scrollkeeper/*
28 rm -f ${D}${datadir}/applications/*.cache
29}
30
diff --git a/meta/classes/grub-efi.bbclass b/meta/classes/grub-efi.bbclass
new file mode 100644
index 0000000000..96fb98b043
--- /dev/null
+++ b/meta/classes/grub-efi.bbclass
@@ -0,0 +1,140 @@
1# grub-efi.bbclass
2# Copyright (c) 2011, Intel Corporation.
3# All rights reserved.
4#
5# Released under the MIT license (see packages/COPYING)
6
7# Provide grub-efi specific functions for building bootable images.
8
9# External variables
10# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
11# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
12# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
13# ${LABELS} - a list of targets for the automatic config
14# ${APPEND} - an override list of append strings for each label
15# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
16# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
17
18do_bootimg[depends] += "grub-efi-${TRANSLATED_TARGET_ARCH}-native:do_deploy"
19
20GRUB_SERIAL ?= "console=ttyS0,115200"
21GRUBCFG = "${S}/grub.cfg"
22GRUB_TIMEOUT ?= "10"
23#FIXME: build this from the machine config
24GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
25
26EFIDIR = "/EFI/BOOT"
27
28grubefi_populate() {
29 # DEST must be the root of the image so that EFIDIR is not
30 # nested under a top level directory.
31 DEST=$1
32
33 install -d ${DEST}${EFIDIR}
34
35 GRUB_IMAGE="bootia32.efi"
36 if [ "${TARGET_ARCH}" = "x86_64" ]; then
37 GRUB_IMAGE="bootx64.efi"
38 fi
39 install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}
40
41 install -m 0644 ${GRUBCFG} ${DEST}${EFIDIR}
42}
43
44grubefi_iso_populate() {
45 iso_dir=$1
46 grubefi_populate $iso_dir
47 # Build a EFI directory to create efi.img
48 mkdir -p ${EFIIMGDIR}/${EFIDIR}
49 cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
50 cp $iso_dir/vmlinuz ${EFIIMGDIR}
51 echo "EFI\\BOOT\\${GRUB_IMAGE}" > ${EFIIMGDIR}/startup.nsh
52 if [ -f "$iso_dir/initrd" ] ; then
53 cp $iso_dir/initrd ${EFIIMGDIR}
54 fi
55}
56
57grubefi_hddimg_populate() {
58 grubefi_populate $1
59}
60
61python build_grub_cfg() {
62 import sys
63
64 workdir = d.getVar('WORKDIR', True)
65 if not workdir:
66 bb.error("WORKDIR not defined, unable to package")
67 return
68
69 gfxserial = d.getVar('GRUB_GFXSERIAL', True) or ""
70
71 labels = d.getVar('LABELS', True)
72 if not labels:
73 bb.debug(1, "LABELS not defined, nothing to do")
74 return
75
76 if labels == []:
77 bb.debug(1, "No labels, nothing to do")
78 return
79
80 cfile = d.getVar('GRUBCFG', True)
81 if not cfile:
82 raise bb.build.FuncFailed('Unable to read GRUBCFG')
83
84 try:
85 cfgfile = file(cfile, 'w')
86 except OSError:
87 raise bb.build.funcFailed('Unable to open %s' % (cfile))
88
89 cfgfile.write('# Automatically created by OE\n')
90
91 opts = d.getVar('GRUB_OPTS', True)
92 if opts:
93 for opt in opts.split(';'):
94 cfgfile.write('%s\n' % opt)
95
96 cfgfile.write('default=%s\n' % (labels.split()[0]))
97
98 timeout = d.getVar('GRUB_TIMEOUT', True)
99 if timeout:
100 cfgfile.write('timeout=%s\n' % timeout)
101 else:
102 cfgfile.write('timeout=50\n')
103
104 if gfxserial == "1":
105 btypes = [ [ " graphics console", "" ],
106 [ " serial console", d.getVar('GRUB_SERIAL', True) or "" ] ]
107 else:
108 btypes = [ [ "", "" ] ]
109
110 for label in labels.split():
111 localdata = d.createCopy()
112
113 overrides = localdata.getVar('OVERRIDES', True)
114 if not overrides:
115 raise bb.build.FuncFailed('OVERRIDES not defined')
116
117 for btype in btypes:
118 localdata.setVar('OVERRIDES', label + ':' + overrides)
119 bb.data.update_data(localdata)
120
121 cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
122 lb = label
123 if label == "install":
124 lb = "install-efi"
125 cfgfile.write('linux /vmlinuz LABEL=%s' % (lb))
126
127 append = localdata.getVar('APPEND', True)
128 initrd = localdata.getVar('INITRD', True)
129
130 if append:
131 cfgfile.write('%s' % (append))
132 cfgfile.write(' %s' % btype[1])
133 cfgfile.write('\n')
134
135 if initrd:
136 cfgfile.write('initrd /initrd')
137 cfgfile.write('\n}\n')
138
139 cfgfile.close()
140}
diff --git a/meta/classes/gsettings.bbclass b/meta/classes/gsettings.bbclass
new file mode 100644
index 0000000000..dec5abc026
--- /dev/null
+++ b/meta/classes/gsettings.bbclass
@@ -0,0 +1,37 @@
1# A bbclass to handle installed GSettings (glib) schemas, updated the compiled
2# form on package install and remove.
3#
4# The compiled schemas are platform-agnostic, so we can depend on
5# glib-2.0-native for the native tool and run the postinst script when the
6# rootfs builds to save a little time on first boot.
7
8# TODO use a trigger so that this runs once per package operation run
9
10DEPENDS += "glib-2.0-native"
11
12RDEPENDS_${PN} += "glib-2.0-utils"
13
14FILES_${PN} += "${datadir}/glib-2.0/schemas"
15
16gsettings_postinstrm () {
17 glib-compile-schemas $D${datadir}/glib-2.0/schemas
18}
19
20python populate_packages_append () {
21 pkg = d.getVar('PN', True)
22 bb.note("adding gsettings postinst scripts to %s" % pkg)
23
24 postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
25 if not postinst:
26 postinst = '#!/bin/sh\n'
27 postinst += d.getVar('gsettings_postinstrm', True)
28 d.setVar('pkg_postinst_%s' % pkg, postinst)
29
30 bb.note("adding gsettings postrm scripts to %s" % pkg)
31
32 postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
33 if not postrm:
34 postrm = '#!/bin/sh\n'
35 postrm += d.getVar('gsettings_postinstrm', True)
36 d.setVar('pkg_postrm_%s' % pkg, postrm)
37}
diff --git a/meta/classes/gtk-doc.bbclass b/meta/classes/gtk-doc.bbclass
new file mode 100644
index 0000000000..fb7863e99b
--- /dev/null
+++ b/meta/classes/gtk-doc.bbclass
@@ -0,0 +1,23 @@
1# Helper class to pull in the right gtk-doc dependencies and disable
2# gtk-doc.
3#
4# Long-term it would be great if this class could be toggled between
5# gtk-doc-stub-native and the real gtk-doc-native, which would enable
6# re-generation of documentation. For now, we'll make do with this which
7# packages up any existing documentation (so from tarball builds).
8
9# The documentation directory, where the infrastructure will be copied.
10# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
11GTKDOC_DOCDIR ?= "${S}"
12
13DEPENDS_append = " gtk-doc-stub-native"
14
15EXTRA_OECONF_append = "\
16 --disable-gtk-doc \
17 --disable-gtk-doc-html \
18 --disable-gtk-doc-pdf \
19"
20
21do_configure_prepend () {
22 ( cd ${S}; gtkdocize --docdir ${GTKDOC_DOCDIR} )
23}
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
new file mode 100644
index 0000000000..789fa38a16
--- /dev/null
+++ b/meta/classes/gtk-icon-cache.bbclass
@@ -0,0 +1,62 @@
1FILES_${PN} += "${datadir}/icons/hicolor"
2
3DEPENDS += "${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk-update-icon-cache-native"
4
5gtk_icon_cache_postinst() {
6if [ "x$D" != "x" ]; then
7 $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
8 base_libdir=${base_libdir}
9else
10
11 # Update the pixbuf loaders in case they haven't been registered yet
12 GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache
13
14 for icondir in /usr/share/icons/* ; do
15 if [ -d $icondir ] ; then
16 gtk-update-icon-cache -fqt $icondir
17 fi
18 done
19fi
20}
21
22gtk_icon_cache_postrm() {
23if [ "x$D" != "x" ]; then
24 $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
25 base_libdir=${base_libdir}
26else
27 for icondir in /usr/share/icons/* ; do
28 if [ -d $icondir ] ; then
29 gtk-update-icon-cache -qt $icondir
30 fi
31 done
32fi
33}
34
35python populate_packages_append () {
36 packages = d.getVar('PACKAGES', True).split()
37 pkgdest = d.getVar('PKGDEST', True)
38
39 for pkg in packages:
40 icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir', True))
41 if not os.path.exists(icon_dir):
42 continue
43
44 bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
45 rdepends = ' ' + d.getVar('MLPREFIX') + "hicolor-icon-theme"
46 d.appendVar('RDEPENDS_%s' % pkg, rdepends)
47
48 bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
49
50 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
51 if not postinst:
52 postinst = '#!/bin/sh\n'
53 postinst += d.getVar('gtk_icon_cache_postinst', True)
54 d.setVar('pkg_postinst_%s' % pkg, postinst)
55
56 postrm = d.getVar('pkg_postrm_%s' % pkg, True)
57 if not postrm:
58 postrm = '#!/bin/sh\n'
59 postrm += d.getVar('gtk_icon_cache_postrm', True)
60 d.setVar('pkg_postrm_%s' % pkg, postrm)
61}
62
diff --git a/meta/classes/gtk-immodules-cache.bbclass b/meta/classes/gtk-immodules-cache.bbclass
new file mode 100644
index 0000000000..e11ed222d6
--- /dev/null
+++ b/meta/classes/gtk-immodules-cache.bbclass
@@ -0,0 +1,83 @@
1# This class will update the inputmethod module cache for virtual keyboards
2#
3# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules
4
5DEPENDS =+ "qemu-native"
6
7inherit qemu
8
9GTKIMMODULES_PACKAGES ?= "${PN}"
10
11gtk_immodule_cache_postinst() {
12if [ "x$D" != "x" ]; then
13 for maj_ver in 2 3; do
14 if [ -x $D${bindir}/gtk-query-immodules-$maj_ver.0 ]; then
15 IMFILES=$(ls $D${libdir}/gtk-$maj_ver.0/*/immodules/*.so)
16 ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-$maj_ver.0')} \
17 $IMFILES > $D/etc/gtk-$maj_ver.0/gtk.immodules 2>/dev/null &&
18 sed -i -e "s:$D::" $D/etc/gtk-$maj_ver.0/gtk.immodules
19
20 [ $? -ne 0 ] && exit 1
21 fi
22 done
23
24 exit 0
25fi
26if [ ! -z `which gtk-query-immodules-2.0` ]; then
27 gtk-query-immodules-2.0 > /etc/gtk-2.0/gtk.immodules
28fi
29if [ ! -z `which gtk-query-immodules-3.0` ]; then
30 gtk-query-immodules-3.0 > /etc/gtk-3.0/gtk.immodules
31fi
32}
33
34gtk_immodule_cache_postrm() {
35if [ "x$D" != "x" ]; then
36 for maj_ver in 2 3; do
37 if [ -x $D${bindir}/gtk-query-immodules-$maj_ver.0 ]; then
38 IMFILES=$(ls $D${libdir}/gtk-$maj_ver.0/*/immodules/*.so)
39 ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-$maj_ver.0')} \
40 $IMFILES > $D/etc/gtk-$maj_ver.0/gtk.immodules 2>/dev/null &&
41 sed -i -e "s:$D::" $D/etc/gtk-$maj_ver.0/gtk.immodules
42
43 [ $? -ne 0 ] && exit 1
44 fi
45 done
46
47 exit 0
48fi
49if [ ! -z `which gtk-query-immodules-2.0` ]; then
50 gtk-query-immodules-2.0 > /etc/gtk-2.0/gtk.immodules
51fi
52if [ ! -z `which gtk-query-immodules-3.0` ]; then
53 gtk-query-immodules-3.0 > /etc/gtk-3.0/gtk.immodules
54fi
55}
56
57python populate_packages_append () {
58 gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES', True).split()
59
60 for pkg in gtkimmodules_pkgs:
61 bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
62
63 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
64 if not postinst:
65 postinst = '#!/bin/sh\n'
66 postinst += d.getVar('gtk_immodule_cache_postinst', True)
67 d.setVar('pkg_postinst_%s' % pkg, postinst)
68
69 postrm = d.getVar('pkg_postrm_%s' % pkg, True)
70 if not postrm:
71 postrm = '#!/bin/sh\n'
72 postrm += d.getVar('gtk_immodule_cache_postrm', True)
73 d.setVar('pkg_postrm_%s' % pkg, postrm)
74}
75
76python __anonymous() {
77 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
78 gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES')
79 if not gtkimmodules_check:
80 bb_filename = d.getVar('FILE')
81 raise bb.build.FuncFailed("ERROR: %s inherits gtk-immodule-cache but doesn't set GTKIMMODULES_PACKAGE" % bb_filename)
82}
83
diff --git a/meta/classes/gzipnative.bbclass b/meta/classes/gzipnative.bbclass
new file mode 100644
index 0000000000..326cbbb6f6
--- /dev/null
+++ b/meta/classes/gzipnative.bbclass
@@ -0,0 +1,5 @@
1EXTRANATIVEPATH += "pigz-native gzip-native"
2DEPENDS += "gzip-native"
3
4# tar may get run by do_unpack or do_populate_lic which could call gzip
5do_unpack[depends] += "gzip-native:do_populate_sysroot"
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
new file mode 100644
index 0000000000..cf3f23d93a
--- /dev/null
+++ b/meta/classes/icecc.bbclass
@@ -0,0 +1,255 @@
1# IceCream distributed compiling support
2#
3# Stages directories with symlinks from gcc/g++ to icecc, for both
4# native and cross compilers. Depending on each configure or compile,
5# the directories are added at the head of the PATH list and ICECC_CXX
6# and ICEC_CC are set.
7#
8# For the cross compiler, creates a tar.gz of our toolchain and sets
9# ICECC_VERSION accordingly.
10#
11#The class now handles all 3 different compile 'stages' (i.e native ,cross-kernel and target) creating the
12#necessary environment tar.gz file to be used by the remote machines.
13#It also supports meta-toolchain generation
14#
15#If ICECC_PATH is not set in local.conf then the class will try to locate it using 'which'
16#but nothing is sure ;)
17#
18#If ICECC_ENV_EXEC is set in local.conf should point to the icecc-create-env script provided by the user
19#or the default one provided by icecc-create-env.bb will be used
20#(NOTE that this is a modified version of the script need it and *not the one that comes with icecc*
21#
22#User can specify if specific packages or packages belonging to class should not use icecc to distribute
23#compile jobs to remote machines, but handled localy, by defining ICECC_USER_CLASS_BL and ICECC_PACKAGE_BL
24#with the appropriate values in local.conf
25#########################################################################################
26#Error checking is kept to minimum so double check any parameters you pass to the class
27###########################################################################################
28
29BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED"
30
31ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
32
33def icecc_dep_prepend(d):
34 # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
35 # we need that built is the responsibility of the patch function / class, not
36 # the application.
37 if not d.getVar('INHIBIT_DEFAULT_DEPS'):
38 return "icecc-create-env-native"
39 return ""
40
41DEPENDS_prepend += "${@icecc_dep_prepend(d)} "
42
43def get_cross_kernel_cc(bb,d):
44 kernel_cc = d.expand('${KERNEL_CC}')
45 kernel_cc = kernel_cc.replace('ccache', '').strip()
46 kernel_cc = kernel_cc.split(' ')[0]
47 kernel_cc = kernel_cc.strip()
48 return kernel_cc
49
50def get_icecc(d):
51 return d.getVar('ICECC_PATH') or os.popen("which icecc").read()[:-1]
52
53def create_path(compilers, bb, d):
54 """
55 Create Symlinks for the icecc in the staging directory
56 """
57 staging = os.path.join(d.expand('${STAGING_BINDIR}'), "ice")
58 if icc_is_kernel(bb, d):
59 staging += "-kernel"
60
61 #check if the icecc path is set by the user
62 icecc = get_icecc(d)
63
64 # Create the dir if necessary
65 try:
66 os.stat(staging)
67 except:
68 try:
69 os.makedirs(staging)
70 except:
71 pass
72
73 for compiler in compilers:
74 gcc_path = os.path.join(staging, compiler)
75 try:
76 os.stat(gcc_path)
77 except:
78 try:
79 os.symlink(icecc, gcc_path)
80 except:
81 pass
82
83 return staging
84
85def use_icc(bb,d):
86 package_tmp = d.expand('${PN}')
87
88 system_class_blacklist = [ "none" ]
89 user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
90 package_class_blacklist = system_class_blacklist + user_class_blacklist
91
92 for black in package_class_blacklist:
93 if bb.data.inherits_class(black, d):
94 #bb.note(package_tmp, ' class ', black, ' found in blacklist, disable icecc')
95 return "no"
96
97 #"system" package blacklist contains a list of packages that can not distribute compile tasks
98 #for one reason or the other
99 system_package_blacklist = [ "uclibc", "glibc", "gcc", "bind", "u-boot", "dhcp-forwarder", "enchant", "connman", "orbit2" ]
100 user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split()
101 package_blacklist = system_package_blacklist + user_package_blacklist
102
103 for black in package_blacklist:
104 if black in package_tmp:
105 #bb.note(package_tmp, ' found in blacklist, disable icecc')
106 return "no"
107
108 if d.getVar('PARALLEL_MAKE') == "":
109 bb.note(package_tmp, " ", d.expand('${PV}'), " has empty PARALLEL_MAKE, disable icecc")
110 return "no"
111
112 return "yes"
113
114def icc_is_kernel(bb, d):
115 return \
116 bb.data.inherits_class("kernel", d);
117
118def icc_is_native(bb, d):
119 return \
120 bb.data.inherits_class("cross", d) or \
121 bb.data.inherits_class("native", d);
122
123def icc_version(bb, d):
124 if use_icc(bb, d) == "no":
125 return ""
126
127 parallel = d.getVar('ICECC_PARALLEL_MAKE') or ""
128 d.setVar("PARALLEL_MAKE", parallel)
129
130 if icc_is_native(bb, d):
131 archive_name = "local-host-env"
132 elif d.expand('${HOST_PREFIX}') == "":
133 bb.fatal(d.expand("${PN}"), " NULL prefix")
134 else:
135 prefix = d.expand('${HOST_PREFIX}' )
136 distro = d.expand('${DISTRO}')
137 target_sys = d.expand('${TARGET_SYS}')
138 float = d.getVar('TARGET_FPU') or "hard"
139 archive_name = prefix + distro + "-" + target_sys + "-" + float
140 if icc_is_kernel(bb, d):
141 archive_name += "-kernel"
142
143 import socket
144 ice_dir = d.expand('${STAGING_DIR_NATIVE}${prefix_native}')
145 tar_file = os.path.join(ice_dir, 'ice', archive_name + "-@VERSION@-" + socket.gethostname() + '.tar.gz')
146
147 return tar_file
148
149def icc_path(bb,d):
150 if icc_is_kernel(bb, d):
151 return create_path( [get_cross_kernel_cc(bb,d), ], bb, d)
152
153 else:
154 prefix = d.expand('${HOST_PREFIX}')
155 return create_path( [prefix+"gcc", prefix+"g++"], bb, d)
156
157def icc_get_external_tool(bb, d, tool):
158 external_toolchain_bindir = d.expand('${EXTERNAL_TOOLCHAIN}${bindir_cross}')
159 target_prefix = d.expand('${TARGET_PREFIX}')
160 return os.path.join(external_toolchain_bindir, '%s%s' % (target_prefix, tool))
161
162def icc_get_tool(bb, d, tool):
163 if icc_is_native(bb, d):
164 return os.popen("which %s" % tool).read()[:-1]
165 elif icc_is_kernel(bb, d):
166 return os.popen("which %s" % get_cross_kernel_cc(bb, d)).read()[:-1]
167 else:
168 ice_dir = d.expand('${STAGING_BINDIR_TOOLCHAIN}')
169 target_sys = d.expand('${TARGET_SYS}')
170 tool_bin = os.path.join(ice_dir, "%s-%s" % (target_sys, tool))
171 if os.path.isfile(tool_bin):
172 return tool_bin
173 else:
174 external_tool_bin = icc_get_external_tool(bb, d, tool)
175 if os.path.isfile(external_tool_bin):
176 return external_tool_bin
177 else:
178 return ""
179
180def icc_get_and_check_tool(bb, d, tool):
181 # Check that g++ or gcc is not a symbolic link to icecc binary in
182 # PATH or icecc-create-env script will silently create an invalid
183 # compiler environment package.
184 t = icc_get_tool(bb, d, tool)
185 if t and os.popen("readlink -f %s" % t).read()[:-1] == get_icecc(d):
186 bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, get_icecc(d)))
187 return ""
188 else:
189 return t
190
191set_icecc_env() {
192 if [ "x${ICECC_DISABLED}" != "x" ]
193 then
194 return
195 fi
196 ICECC_VERSION="${@icc_version(bb, d)}"
197 if [ "x${ICECC_VERSION}" = "x" ]
198 then
199 return
200 fi
201
202 ICE_PATH="${@icc_path(bb, d)}"
203 if [ "x${ICE_PATH}" = "x" ]
204 then
205 return
206 fi
207
208 ICECC_CC="${@icc_get_and_check_tool(bb, d, "gcc")}"
209 ICECC_CXX="${@icc_get_and_check_tool(bb, d, "g++")}"
210 if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
211 then
212 return
213 fi
214
215 ICE_VERSION=`$ICECC_CC -dumpversion`
216 ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"`
217 if [ ! -x "${ICECC_ENV_EXEC}" ]
218 then
219 return
220 fi
221
222 ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
223 if [ "`dirname "${ICECC_AS}"`" = "." ]
224 then
225 ICECC_AS="`which as`"
226 fi
227
228 if [ ! -r "${ICECC_VERSION}" ]
229 then
230 mkdir -p "`dirname "${ICECC_VERSION}"`"
231 ${ICECC_ENV_EXEC} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
232 fi
233
234 export ICECC_VERSION ICECC_CC ICECC_CXX
235 export PATH="$ICE_PATH:$PATH"
236 export CCACHE_PATH="$PATH"
237
238 bbnote "Using icecc"
239}
240
241do_configure_prepend() {
242 set_icecc_env
243}
244
245do_compile_prepend() {
246 set_icecc_env
247}
248
249do_compile_kernelmodules_prepend() {
250 set_icecc_env
251}
252
253#do_install_prepend() {
254# set_icecc_env
255#}
diff --git a/meta/classes/image-empty.bbclass b/meta/classes/image-empty.bbclass
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/meta/classes/image-empty.bbclass
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
new file mode 100644
index 0000000000..bfb59f808b
--- /dev/null
+++ b/meta/classes/image-live.bbclass
@@ -0,0 +1,15 @@
1
2AUTO_SYSLINUXCFG = "1"
3INITRD_IMAGE ?= "core-image-minimal-initramfs"
4INITRD ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE}-${MACHINE}.cpio.gz"
5SYSLINUX_ROOT = "root=/dev/ram0 "
6SYSLINUX_TIMEOUT ?= "10"
7SYSLINUX_LABELS ?= "boot install"
8LABELS_append = " ${SYSLINUX_LABELS} "
9
10ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext3"
11
12do_bootimg[depends] += "${INITRD_IMAGE}:do_rootfs"
13do_bootimg[depends] += "${PN}:do_rootfs"
14
15inherit bootimg
diff --git a/meta/classes/image-mklibs.bbclass b/meta/classes/image-mklibs.bbclass
new file mode 100644
index 0000000000..66b0f5251e
--- /dev/null
+++ b/meta/classes/image-mklibs.bbclass
@@ -0,0 +1,73 @@
1do_rootfs[depends] += "mklibs-native:do_populate_sysroot"
2
3IMAGE_PREPROCESS_COMMAND += "mklibs_optimize_image; "
4
5mklibs_optimize_image_doit() {
6 rm -rf ${WORKDIR}/mklibs
7 mkdir -p ${WORKDIR}/mklibs/dest
8 cd ${IMAGE_ROOTFS}
9 du -bs > ${WORKDIR}/mklibs/du.before.mklibs.txt
10 for i in `find .`; do file $i; done \
11 | grep ELF \
12 | grep "LSB executable" \
13 | grep "dynamically linked" \
14 | sed "s/:.*//" \
15 | sed "s+^\./++" \
16 > ${WORKDIR}/mklibs/executables.list
17
18 case ${TARGET_ARCH} in
19 powerpc | mips | microblaze )
20 dynamic_loader="${base_libdir}/ld.so.1"
21 ;;
22 powerpc64)
23 dynamic_loader="${base_libdir}/ld64.so.1"
24 ;;
25 x86_64)
26 dynamic_loader="${base_libdir}/ld-linux-x86-64.so.2"
27 ;;
28 i586 )
29 dynamic_loader="${base_libdir}/ld-linux.so.2"
30 ;;
31 arm )
32 dynamic_loader="${base_libdir}/ld-linux.so.3"
33 ;;
34 * )
35 dynamic_loader="/unknown_dynamic_linker"
36 ;;
37 esac
38
39 mklibs -v \
40 --ldlib ${dynamic_loader} \
41 --libdir ${baselib} \
42 --sysroot ${PKG_CONFIG_SYSROOT_DIR} \
43 --root ${IMAGE_ROOTFS} \
44 --target `echo ${TARGET_PREFIX} | sed 's/-$//' ` \
45 -d ${WORKDIR}/mklibs/dest \
46 `cat ${WORKDIR}/mklibs/executables.list`
47
48 cd ${WORKDIR}/mklibs/dest
49 for i in *
50 do
51 cp $i `find ${IMAGE_ROOTFS} -name $i`
52 done
53
54 cd ${IMAGE_ROOTFS}
55 du -bs > ${WORKDIR}/mklibs/du.after.mklibs.txt
56
57 echo rootfs size before mklibs optimization: `cat ${WORKDIR}/mklibs/du.before.mklibs.txt`
58 echo rootfs size after mklibs optimization: `cat ${WORKDIR}/mklibs/du.after.mklibs.txt`
59}
60
61mklibs_optimize_image() {
62 for img in ${MKLIBS_OPTIMIZED_IMAGES}
63 do
64 if [ "${img}" = "${PN}" ] || [ "${img}" = "all" ]
65 then
66 mklibs_optimize_image_doit
67 break
68 fi
69 done
70}
71
72
73EXPORT_FUNCTIONS mklibs_optimize_image
diff --git a/meta/classes/image-prelink.bbclass b/meta/classes/image-prelink.bbclass
new file mode 100644
index 0000000000..53ef47e4d4
--- /dev/null
+++ b/meta/classes/image-prelink.bbclass
@@ -0,0 +1,35 @@
1do_rootfs[depends] += "prelink-native:do_populate_sysroot"
2
3IMAGE_PREPROCESS_COMMAND += "prelink_image; "
4
5prelink_image () {
6# export PSEUDO_DEBUG=4
7# /bin/env | /bin/grep PSEUDO
8# echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
9# echo "LD_PRELOAD=$LD_PRELOAD"
10
11 pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
12 echo "Size before prelinking $pre_prelink_size."
13
14 # We need a prelink conf on the filesystem, add one if it's missing
15 if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
16 cp ${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf \
17 ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
18 dummy_prelink_conf=true;
19 else
20 dummy_prelink_conf=false;
21 fi
22
23 # prelink!
24 ${STAGING_DIR_NATIVE}${sbindir_native}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf
25
26 # Remove the prelink.conf if we had to add it.
27 if [ "$dummy_prelink_conf" = "true" ]; then
28 rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
29 fi
30
31 pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
32 echo "Size after prelinking $pre_prelink_size."
33}
34
35EXPORT_FUNCTIONS prelink_image
diff --git a/meta/classes/image-swab.bbclass b/meta/classes/image-swab.bbclass
new file mode 100644
index 0000000000..124a090605
--- /dev/null
+++ b/meta/classes/image-swab.bbclass
@@ -0,0 +1,94 @@
1HOST_DATA ?= "${TMPDIR}/host-contamination-data/"
2SWABBER_REPORT ?= "${LOG_DIR}/swabber/"
3SWABBER_LOGS ?= "${LOG_DIR}/contamination-logs"
4TRACE_LOGDIR ?= "${SWABBER_LOGS}/${PACKAGE_ARCH}"
5TRACE_LOGFILE = "${TRACE_LOGDIR}/${PN}-${PV}"
6
7SWAB_ORIG_TASK := "${BB_DEFAULT_TASK}"
8BB_DEFAULT_TASK = "generate_swabber_report"
9
10# Several recipes don't build with parallel make when run under strace
11# Ideally these should be fixed but as a temporary measure disable parallel
12# builds for troublesome recipes
13PARALLEL_MAKE_pn-openssl = ""
14PARALLEL_MAKE_pn-eglibc = ""
15PARALLEL_MAKE_pn-glib-2.0 = ""
16PARALLEL_MAKE_pn-libxml2 = ""
17PARALLEL_MAKE_pn-readline = ""
18PARALLEL_MAKE_pn-util-linux = ""
19PARALLEL_MAKE_pn-binutils = ""
20PARALLEL_MAKE_pn-bison = ""
21PARALLEL_MAKE_pn-cmake = ""
22PARALLEL_MAKE_pn-elfutils = ""
23PARALLEL_MAKE_pn-gcc = ""
24PARALLEL_MAKE_pn-gcc-runtime = ""
25PARALLEL_MAKE_pn-m4 = ""
26PARALLEL_MAKE_pn-opkg = ""
27PARALLEL_MAKE_pn-pkgconfig = ""
28PARALLEL_MAKE_pn-prelink = ""
29PARALLEL_MAKE_pn-rpm = ""
30PARALLEL_MAKE_pn-tcl = ""
31PARALLEL_MAKE_pn-beecrypt = ""
32PARALLEL_MAKE_pn-curl = ""
33PARALLEL_MAKE_pn-gmp = ""
34PARALLEL_MAKE_pn-libmpc = ""
35PARALLEL_MAKE_pn-libxslt = ""
36PARALLEL_MAKE_pn-lzo = ""
37PARALLEL_MAKE_pn-popt = ""
38PARALLEL_MAKE_pn-linux-wrs = ""
39PARALLEL_MAKE_pn-libgcrypt = ""
40PARALLEL_MAKE_pn-gpgme = ""
41PARALLEL_MAKE_pn-udev = ""
42PARALLEL_MAKE_pn-gnutls = ""
43
44python() {
45 # NOTE: It might be useful to detect host infection on native and cross
46 # packages but as it turns out to be pretty hard to do this for all native
47 # and cross packages which aren't swabber-native or one of its dependencies
48 # I have ignored them for now...
49 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('nativesdk', d) and not bb.data.inherits_class('cross', d):
50 deps = (d.getVarFlag('do_setscene', 'depends') or "").split()
51 deps.append('strace-native:do_populate_sysroot')
52 d.setVarFlag('do_setscene', 'depends', " ".join(deps))
53 logdir = d.expand("${TRACE_LOGDIR}")
54 bb.utils.mkdirhier(logdir)
55 else:
56 d.setVar('STRACEFUNC', '')
57}
58
59STRACEPID = "${@os.getpid()}"
60STRACEFUNC = "imageswab_attachstrace"
61
62do_configure[prefuncs] += "${STRACEFUNC}"
63do_compile[prefuncs] += "${STRACEFUNC}"
64
65imageswab_attachstrace () {
66 STRACE=`which strace`
67
68 if [ -x "$STRACE" ]; then
69 swabber-strace-attach "$STRACE -f -o ${TRACE_LOGFILE}-${BB_CURRENTTASK}.log -e trace=open,execve -p ${STRACEPID}" "${TRACE_LOGFILE}-traceattach-${BB_CURRENTTASK}.log"
70 fi
71}
72
73do_generate_swabber_report () {
74
75 update_distro ${HOST_DATA}
76
77 # Swabber can't create the directory for us
78 mkdir -p ${SWABBER_REPORT}
79
80 REPORTSTAMP=${SWAB_ORIG_TASK}-`date +%2m%2d%2H%2M%Y`
81
82 if [ `which ccache` ] ; then
83 CCACHE_DIR=`( ccache -s | grep "cache directory" | grep -o '[^ ]*$' 2> /dev/null )`
84 fi
85
86 if [ "$(ls -A ${HOST_DATA})" ]; then
87 echo "Generating swabber report"
88 swabber -d ${HOST_DATA} -l ${SWABBER_LOGS} -o ${SWABBER_REPORT}/report-${REPORTSTAMP}.txt -r ${SWABBER_REPORT}/extra_report-${REPORTSTAMP}.txt -c all -p ${TOPDIR} -f ${OEROOT}/meta/conf/swabber ${TOPDIR} ${OEROOT} ${CCACHE_DIR}
89 else
90 echo "No host data, cannot generate swabber report."
91 fi
92}
93addtask generate_swabber_report after do_${SWAB_ORIG_TASK}
94do_generate_swabber_report[depends] = "swabber-native:do_populate_sysroot"
diff --git a/meta/classes/image-vmdk.bbclass b/meta/classes/image-vmdk.bbclass
new file mode 100644
index 0000000000..6983e5c823
--- /dev/null
+++ b/meta/classes/image-vmdk.bbclass
@@ -0,0 +1,32 @@
1
2#NOISO = "1"
3
4SYSLINUX_ROOT = "root=/dev/hda2 "
5SYSLINUX_PROMPT = "0"
6SYSLINUX_TIMEOUT = "1"
7SYSLINUX_LABELS = "boot"
8LABELS_append = " ${SYSLINUX_LABELS} "
9
10# need to define the dependency and the ROOTFS for directdisk
11do_bootdirectdisk[depends] += "${PN}:do_rootfs"
12ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext3"
13
14# creating VMDK relies on having a live hddimg so ensure we
15# inherit it here.
16#inherit image-live
17inherit boot-directdisk
18
19create_vmdk_image () {
20 qemu-img convert -O vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.vmdk
21 ln -sf ${IMAGE_NAME}.vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.vmdk
22}
23
24python do_vmdkimg() {
25 bb.build.exec_func('create_vmdk_image', d)
26}
27
28#addtask vmdkimg after do_bootimg before do_build
29addtask vmdkimg after do_bootdirectdisk before do_build
30
31do_vmdkimg[depends] += "qemu-native:do_populate_sysroot"
32
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
new file mode 100644
index 0000000000..7650594f8c
--- /dev/null
+++ b/meta/classes/image.bbclass
@@ -0,0 +1,661 @@
1inherit rootfs_${IMAGE_PKGTYPE}
2
3inherit populate_sdk_base
4
5TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
6TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
7POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_install_complementary populate_sdk; rootfs_sysroot_relativelinks; "
8
9inherit gzipnative
10
11LICENSE = "MIT"
12PACKAGES = ""
13DEPENDS += "${MLPREFIX}qemuwrapper-cross ${MLPREFIX}depmodwrapper-cross"
14RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL}"
15RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
16
17INHIBIT_DEFAULT_DEPS = "1"
18
19TESTIMAGECLASS = "${@base_conditional('TEST_IMAGE', '1', 'testimage-auto', '', d)}"
20inherit ${TESTIMAGECLASS}
21
22# IMAGE_FEATURES may contain any available package group
23IMAGE_FEATURES ?= ""
24IMAGE_FEATURES[type] = "list"
25IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs package-management"
26
27# rootfs bootstrap install
28ROOTFS_BOOTSTRAP_INSTALL = "${@base_contains("IMAGE_FEATURES", "package-management", "", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)}"
29
30# packages to install from features
31FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
32FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
33
34# Define some very basic feature package groups
35SPLASH ?= "psplash"
36PACKAGE_GROUP_splash = "${SPLASH}"
37
38# Wildcards specifying complementary packages to install for every package that has been explicitly
39# installed into the rootfs
40COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev'
41COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev'
42COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
43COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
44COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
45
46def complementary_globs(featurevar, d):
47 all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
48 globs = []
49 features = set((d.getVar(featurevar, True) or '').split())
50 for name, glob in all_globs.items():
51 if name in features:
52 globs.append(glob)
53 return ' '.join(globs)
54
55IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
56SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs"
57SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
58
59def check_image_features(d):
60 valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems', True) or "").split()
61 valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys()
62 for var in d:
63 if var.startswith("PACKAGE_GROUP_"):
64 valid_features.append(var[14:])
65 valid_features.sort()
66
67 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
68 for feature in features:
69 if feature not in valid_features:
70 bb.fatal("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
71
72IMAGE_INSTALL ?= ""
73IMAGE_INSTALL[type] = "list"
74export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}"
75PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
76
77# Images are generally built explicitly, do not need to be part of world.
78EXCLUDE_FROM_WORLD = "1"
79
80USE_DEVFS ?= "0"
81
82PID = "${@os.getpid()}"
83
84PACKAGE_ARCH = "${MACHINE_ARCH}"
85
86LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
87LDCONFIGDEPEND_libc-uclibc = ""
88
89do_rootfs[depends] += "makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND}"
90do_rootfs[depends] += "virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot"
91do_rootfs[recrdeptask] += "do_packagedata"
92
93IMAGE_TYPE_live = '${@base_contains("IMAGE_FSTYPES", "live", "live", "empty", d)}'
94inherit image-${IMAGE_TYPE_live}
95IMAGE_TYPE_vmdk = '${@base_contains("IMAGE_FSTYPES", "vmdk", "vmdk", "empty", d)}'
96inherit image-${IMAGE_TYPE_vmdk}
97
98python () {
99 deps = " " + imagetypes_getdepends(d)
100 d.appendVarFlag('do_rootfs', 'depends', deps)
101
102 deps = ""
103 for dep in (d.getVar('EXTRA_IMAGEDEPENDS', True) or "").split():
104 deps += " %s:do_populate_sysroot" % dep
105 d.appendVarFlag('do_build', 'depends', deps)
106
107 #process IMAGE_FEATURES, we must do this before runtime_mapping_rename
108 #Check for replaces image features
109 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
110 remain_features = features.copy()
111 for feature in features:
112 replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature, True) or "").split())
113 remain_features -= replaces
114
115 #Check for conflict image features
116 for feature in remain_features:
117 conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature, True) or "").split())
118 temp = conflicts & remain_features
119 if temp:
120 bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN', True), feature, ' '.join(list(temp))))
121
122 d.setVar('IMAGE_FEATURES', ' '.join(list(remain_features)))
123
124 # Ensure we have the vendor list for complementary package handling
125 ml_vendor_list = ""
126 multilibs = d.getVar('MULTILIBS', True) or ""
127 for ext in multilibs.split():
128 eext = ext.split(':')
129 if len(eext) > 1 and eext[0] == 'multilib':
130 localdata = bb.data.createCopy(d)
131 vendor = localdata.getVar("TARGET_VENDOR_virtclass-multilib-" + eext[1], False)
132 ml_vendor_list += " " + vendor
133 d.setVar('MULTILIB_VENDORS', ml_vendor_list)
134
135 check_image_features(d)
136 initramfs_image = d.getVar('INITRAMFS_IMAGE', True) or ""
137 if initramfs_image != "":
138 d.appendVarFlag('do_build', 'depends', " %s:do_bundle_initramfs" % d.getVar('PN', True))
139 d.appendVarFlag('do_bundle_initramfs', 'depends', " %s:do_rootfs" % initramfs_image)
140}
141
142#
143# Get a list of files containing device tables to create.
144# * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file
145# * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached
146# for in the BBPATH
147# If neither are specified then the default name of files/device_table-minimal.txt
148# is searched for in the BBPATH (same as the old version.)
149#
150def get_devtable_list(d):
151 devtable = d.getVar('IMAGE_DEVICE_TABLE', True)
152 if devtable != None:
153 return devtable
154 str = ""
155 devtables = d.getVar('IMAGE_DEVICE_TABLES', True)
156 if devtables == None:
157 devtables = 'files/device_table-minimal.txt'
158 for devtable in devtables.split():
159 str += " %s" % bb.utils.which(d.getVar('BBPATH', True), devtable)
160 return str
161
162IMAGE_CLASSES ?= "image_types"
163inherit ${IMAGE_CLASSES}
164
165IMAGE_POSTPROCESS_COMMAND ?= ""
166MACHINE_POSTPROCESS_COMMAND ?= ""
167ROOTFS_POSTPROCESS_COMMAND_prepend = "run_intercept_scriptlets; "
168# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks is enabled
169ROOTFS_POSTPROCESS_COMMAND += '${@base_contains("IMAGE_FEATURES", "debug-tweaks", "ssh_allow_empty_password; ", "",d)}'
170# Enable postinst logging if debug-tweaks is enabled
171ROOTFS_POSTPROCESS_COMMAND += '${@base_contains("IMAGE_FEATURES", "debug-tweaks", "postinst_enable_logging; ", "",d)}'
172# Set default postinst log file
173POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
174
175# some default locales
176IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
177
178LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS', True).split()))}"
179
180PSEUDO_PASSWD = "${IMAGE_ROOTFS}"
181
182do_rootfs[dirs] = "${TOPDIR} ${WORKDIR}/intercept_scripts"
183do_rootfs[lockfiles] += "${IMAGE_ROOTFS}.lock"
184do_rootfs[cleandirs] += "${S} ${WORKDIR}/intercept_scripts"
185
186# Must call real_do_rootfs() from inside here, rather than as a separate
187# task, so that we have a single fakeroot context for the whole process.
188do_rootfs[umask] = "022"
189
190
191run_intercept_scriptlets () {
192 if [ -d ${WORKDIR}/intercept_scripts ]; then
193 cd ${WORKDIR}/intercept_scripts
194 echo "Running intercept scripts:"
195 for script in *; do
196 [ "$script" = "*" ] && break
197 [ "$script" = "postinst_intercept" ] || [ ! -x "$script" ] && continue
198 echo "> Executing $script"
199 ./$script && continue
200 echo "WARNING: intercept script \"$script\" failed, falling back to running postinstalls at first boot"
201 #
202 # If we got here, than the intercept has failed. Next, we must
203 # mark the postinstalls as "unpacked". For rpm is a little bit
204 # different, we just have to save the package postinstalls in
205 # /etc/rpm-postinsts
206 #
207 pkgs="$(cat ./$script|grep "^##PKGS"|cut -d':' -f2)" || continue
208 case ${IMAGE_PKGTYPE} in
209 "rpm")
210 [ -d ${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts/ ] || mkdir ${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts/
211 v_expr=$(echo ${MULTILIB_GLOBAL_VARIANTS}|tr ' ' '|')
212 for p in $pkgs; do
213 # remove any multilib prefix from the package name (RPM
214 # does not use it like this)
215 new_p=$(echo $p | sed -r "s/^($v_expr)-//")
216
217 # extract the postinstall scriptlet from rpm package and
218 # save it in /etc/rpm-postinsts
219 echo " * postponing $new_p"
220 rpm -q --scripts --root=${IMAGE_ROOTFS} --dbpath=/var/lib/rpm $new_p |\
221 sed -n -e '/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}' |\
222 sed -e 's/postinstall scriptlet (using \(.*\)):$/#!\1/' -e '/^.* scriptlet (using .*):$/d'\
223 > ${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts/$new_p
224 chmod +x ${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts/$new_p
225 done
226 # move to the next intercept script
227 continue
228 ;;
229 "ipk")
230 status_file="${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/status"
231 ;;
232 "deb")
233 status_file="${IMAGE_ROOTFS}/var/lib/dpkg/status"
234 ;;
235 esac
236 # the next piece of code is run only for ipk/dpkg
237 sed_expr=""
238 for p in $pkgs; do
239 echo " * postponing $p"
240 sed_expr="$sed_expr -e \"/^Package: ${p}$/,/^Status: install.* installed$/ {s/installed/unpacked/}\""
241 done
242 eval sed -i $sed_expr $status_file
243 done
244 fi
245}
246
247# A hook function to support read-only-rootfs IMAGE_FEATURES
248# Currently, it only supports sysvinit system.
249read_only_rootfs_hook () {
250 if ${@base_contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
251 # Tweak the mount option and fs_passno for rootfs in fstab
252 sed -i -e '/^[#[:space:]]*rootfs/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
253 # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
254 if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
255 sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS
256 fi
257 # Run populate-volatile.sh at rootfs time to set up basic files
258 # and directories to support read-only rootfs.
259 if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
260 ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
261 fi
262 # If we're using openssh and the /etc/ssh directory has no pre-generated keys,
263 # we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
264 # and the keys under /var/run/ssh.
265 if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
266 if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
267 echo "SYSCONFDIR=/etc/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
268 echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
269 else
270 echo "SYSCONFDIR=/var/run/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
271 echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
272 fi
273 fi
274 fi
275}
276
277PACKAGE_EXCLUDE ??= ""
278PACKAGE_EXCLUDE[type] = "list"
279
280python rootfs_process_ignore() {
281 excl_pkgs = d.getVar("PACKAGE_EXCLUDE", True).split()
282 inst_pkgs = d.getVar("PACKAGE_INSTALL", True).split()
283 inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY", True).split()
284
285 d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
286 d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
287
288 for pkg in excl_pkgs:
289 if pkg in inst_pkgs:
290 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs))
291 inst_pkgs.remove(pkg)
292
293 if pkg in inst_attempt_pkgs:
294 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs))
295 inst_attempt_pkgs.remove(pkg)
296
297 d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
298 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
299}
300do_rootfs[prefuncs] += "rootfs_process_ignore"
301
302# We have to delay the runtime_mapping_rename until just before rootfs runs
303# otherwise, the multilib renaming could step in and squash any fixups that
304# may have occurred.
305python rootfs_runtime_mapping() {
306 pn = d.getVar('PN', True)
307 runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
308 runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
309 runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
310}
311do_rootfs[prefuncs] += "rootfs_runtime_mapping"
312
313fakeroot do_rootfs () {
314 #set -x
315 # When use the rpm incremental image generation, don't remove the rootfs
316 if [ "${INC_RPM_IMAGE_GEN}" != "1" -o "${IMAGE_PKGTYPE}" != "rpm" ]; then
317 rm -rf ${IMAGE_ROOTFS}
318 elif [ -d ${T}/saved_rpmlib/var/lib/rpm ]; then
319 # Move the rpmlib back
320 if [ ! -d ${IMAGE_ROOTFS}/var/lib/rpm ]; then
321 mkdir -p ${IMAGE_ROOTFS}/var/lib/
322 mv ${T}/saved_rpmlib/var/lib/rpm ${IMAGE_ROOTFS}/var/lib/
323 fi
324 fi
325 rm -rf ${MULTILIB_TEMP_ROOTFS}
326 mkdir -p ${IMAGE_ROOTFS}
327 mkdir -p ${DEPLOY_DIR_IMAGE}
328
329 cp ${COREBASE}/meta/files/deploydir_readme.txt ${DEPLOY_DIR_IMAGE}/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt || true
330
331 # copy the intercept scripts
332 cp ${COREBASE}/scripts/postinst-intercepts/* ${WORKDIR}/intercept_scripts/
333
334 rootfs_${IMAGE_PKGTYPE}_do_rootfs
335
336 if [ "${USE_DEVFS}" != "1" ]; then
337 for devtable in ${@get_devtable_list(d)}; do
338 # Always return ture since there maybe already one when use the
339 # incremental image generation
340 makedevs -r ${IMAGE_ROOTFS} -D $devtable
341 done
342 fi
343
344 # remove unneeded packages/files from the final image
345 rootfs_uninstall_unneeded
346
347 insert_feed_uris
348
349 if [ "x${LDCONFIGDEPEND}" != "x" ]; then
350 # Run ldconfig on the image to create a valid cache
351 # (new format for cross arch compatibility)
352 echo executing: ldconfig -r ${IMAGE_ROOTFS} -c new -v
353 ldconfig -r ${IMAGE_ROOTFS} -c new -v
354 fi
355
356 # (re)create kernel modules dependencies
357 # This part is done by kernel-module-* postinstall scripts but if image do
358 # not contains modules at all there are few moments in boot sequence with
359 # "unable to open modules.dep" message.
360 if [ -e ${STAGING_KERNEL_DIR}/kernel-abiversion ]; then
361 KERNEL_VERSION=`cat ${STAGING_KERNEL_DIR}/kernel-abiversion`
362
363 mkdir -p ${IMAGE_ROOTFS}/lib/modules/$KERNEL_VERSION
364 depmodwrapper -a -b ${IMAGE_ROOTFS} $KERNEL_VERSION
365 fi
366
367 ${IMAGE_PREPROCESS_COMMAND}
368
369 ${@get_imagecmds(d)}
370
371 ${IMAGE_POSTPROCESS_COMMAND}
372
373 ${MACHINE_POSTPROCESS_COMMAND}
374}
375
376insert_feed_uris () {
377
378 echo "Building feeds for [${DISTRO}].."
379
380 for line in ${FEED_URIS}
381 do
382 # strip leading and trailing spaces/tabs, then split into name and uri
383 line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`"
384 feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`"
385 feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`"
386
387 echo "Added $feed_name feed with URL $feed_uri"
388
389 # insert new feed-sources
390 echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/opkg/${feed_name}-feed.conf
391 done
392}
393
394log_check() {
395 for target in $*
396 do
397 lf_path="`dirname ${BB_LOGFILE}`/log.do_$target.${PID}"
398
399 echo "log_check: Using $lf_path as logfile"
400
401 if test -e "$lf_path"
402 then
403 ${IMAGE_PKGTYPE}_log_check $target $lf_path
404 else
405 echo "Cannot find logfile [$lf_path]"
406 fi
407 echo "Logfile is clean"
408 done
409}
410
411MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|"
412MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
413MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
414
415multilib_generate_python_file() {
416 cat >${MULTILIB_CHECK_FILE} <<EOF
417import sys, os, os.path
418import re,filecmp
419
420allow_rep=re.compile(re.sub("\|$","","${MULTILIBRE_ALLOW_REP}"))
421error_prompt="Multilib check error:"
422
423files={}
424dirs=raw_input()
425for dir in dirs.split():
426 for root, subfolders, subfiles in os.walk(dir):
427 for file in subfiles:
428 item=os.path.join(root,file)
429 key=str(os.path.join("/",os.path.relpath(item,dir)))
430
431 valid=True;
432 if key in files:
433 #check whether the file is allow to replace
434 if allow_rep.match(key):
435 valid=True
436 else:
437 if not filecmp.cmp(files[key],item):
438 valid=False
439 print("%s duplicate files %s %s is not the same\n" % (error_prompt, item, files[key]))
440 sys.exit(1)
441
442 #pass the check, add to list
443 if valid:
444 files[key]=item
445EOF
446}
447
448multilib_sanity_check() {
449 multilib_generate_python_file
450 echo $@ | python ${MULTILIB_CHECK_FILE}
451}
452
453get_split_linguas() {
454 for translation in ${IMAGE_LINGUAS}; do
455 translation_split=$(echo ${translation} | awk -F '-' '{print $1}')
456 echo ${translation}
457 echo ${translation_split}
458 done | sort | uniq
459}
460
461rootfs_install_complementary() {
462 # Install complementary packages based upon the list of currently installed packages
463 # e.g. locales, *-dev, *-dbg, etc. This will only attempt to install these packages,
464 # if they don't exist then no error will occur.
465 # Note: every backend needs to call this function explicitly after the normal
466 # package installation
467
468 # Get list of installed packages
469 list_installed_packages arch > ${WORKDIR}/installed_pkgs.txt
470
471 # Apply the globs to all the packages currently installed
472 if [ -n "$1" -a "$1" = "populate_sdk" ] ; then
473 GLOBS="${SDKIMAGE_INSTALL_COMPLEMENTARY}"
474 elif [ -n "$1" ]; then
475 GLOBS="$@"
476 else
477 GLOBS="${IMAGE_INSTALL_COMPLEMENTARY}"
478 # Add locales
479 SPLIT_LINGUAS=`get_split_linguas`
480 PACKAGES_TO_INSTALL=""
481 for lang in $SPLIT_LINGUAS ; do
482 GLOBS="$GLOBS *-locale-$lang"
483 done
484 fi
485
486 if [ "$GLOBS" != "" ] ; then
487 # Use the magic script to do all the work for us :)
488 : > ${WORKDIR}/complementary_pkgs.txt
489 for vendor in '${TARGET_VENDOR}' ${MULTILIB_VENDORS} ; do
490 oe-pkgdata-util glob ${PKGDATA_DIR} ${WORKDIR}/installed_pkgs.txt "$GLOBS" >> ${WORKDIR}/complementary_pkgs.txt
491 done
492
493 # Install the packages, if any
494 sed -i '/^$/d' ${WORKDIR}/complementary_pkgs.txt
495 if [ -s ${WORKDIR}/complementary_pkgs.txt ]; then
496 echo "Installing complementary packages"
497 rootfs_install_packages ${WORKDIR}/complementary_pkgs.txt
498 fi
499 fi
500
501 # Workaround for broken shell function dependencies
502 if false ; then
503 get_split_linguas
504 fi
505}
506
507rootfs_uninstall_unneeded () {
508 if ${@base_contains("IMAGE_FEATURES", "package-management", "false", "true", d)}; then
509 if [ -z "$(delayed_postinsts)" ]; then
510 # All packages were successfully configured.
511 # update-rc.d, base-passwd, run-postinsts are no further use, remove them now
512 remove_run_postinsts=false
513 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts ]; then
514 remove_run_postinsts=true
515 fi
516
517 # Remove package only if it's installed
518 pkgs_to_remove="update-rc.d base-passwd ${ROOTFS_BOOTSTRAP_INSTALL}"
519 for pkg in $pkgs_to_remove; do
520 # regexp for pkg, to be used in grep and sed
521 pkg_regexp="^`echo $pkg | sed 's/\./\\\./'` "
522 if grep -q "$pkg_regexp" ${WORKDIR}/installed_pkgs.txt; then
523 rootfs_uninstall_packages $pkg
524 sed -i "/$pkg_regexp/d" ${WORKDIR}/installed_pkgs.txt
525 fi
526 done
527
528 # Need to remove rc.d files for run-postinsts by hand since opkg won't
529 # call postrm scripts in offline root mode.
530 if $remove_run_postinsts; then
531 update-rc.d -f -r ${IMAGE_ROOTFS} run-postinsts remove
532 fi
533 else
534 # Some packages were not successfully configured, save them only
535 # if we have run-postinsts script present. Otherwise, they're
536 # useless
537 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts ]; then
538 save_postinsts
539 fi
540 fi
541
542 # Since no package manager is present in the image the metadata is not needed
543 remove_packaging_data_files
544 fi
545}
546
547# set '*' as the root password so the images
548# can decide if they want it or not
549zap_root_password () {
550 sed 's%^root:[^:]*:%root:*:%' < ${IMAGE_ROOTFS}/etc/passwd >${IMAGE_ROOTFS}/etc/passwd.new
551 mv ${IMAGE_ROOTFS}/etc/passwd.new ${IMAGE_ROOTFS}/etc/passwd
552}
553
554# allow dropbear/openssh to accept root logins and logins from accounts with an empty password string
555ssh_allow_empty_password () {
556 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then
557 sed -i 's#.*PermitRootLogin.*#PermitRootLogin yes#' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
558 sed -i 's#.*PermitEmptyPasswords.*#PermitEmptyPasswords yes#' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
559 fi
560
561 if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
562 if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
563 if ! grep -q "DROPBEAR_EXTRA_ARGS=.*-B" ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear ; then
564 sed -i 's/^DROPBEAR_EXTRA_ARGS="*\([^"]*\)"*/DROPBEAR_EXTRA_ARGS="\1 -B"/' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
565 fi
566 else
567 printf '\nDROPBEAR_EXTRA_ARGS="-B"\n' >> ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
568 fi
569 fi
570}
571
572# Enable postinst logging if debug-tweaks is enabled
573postinst_enable_logging () {
574 mkdir -p ${IMAGE_ROOTFS}${sysconfdir}/default
575 echo "POSTINST_LOGGING=1" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
576 echo "LOGFILE=${POSTINST_LOGFILE}" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
577}
578
579# Turn any symbolic /sbin/init link into a file
580remove_init_link () {
581 if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
582 LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
583 rm ${IMAGE_ROOTFS}/sbin/init
584 cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
585 fi
586}
587
588make_zimage_symlink_relative () {
589 if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
590 (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
591 fi
592}
593
594write_image_manifest () {
595 rootfs_${IMAGE_PKGTYPE}_write_manifest
596
597 if [ -n "${IMAGE_LINK_NAME}" ]; then
598 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.manifest
599 ln -s ${IMAGE_NAME}.rootfs.manifest ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.manifest
600 fi
601}
602
603# Make login manager(s) enable automatic login.
604# Useful for devices where we do not want to log in at all (e.g. phones)
605set_image_autologin () {
606 sed -i 's%^AUTOLOGIN=\"false"%AUTOLOGIN="true"%g' ${IMAGE_ROOTFS}/etc/sysconfig/gpelogin
607}
608
609# Can be use to create /etc/timestamp during image construction to give a reasonably
610# sane default time setting
611rootfs_update_timestamp () {
612 date -u +%4Y%2m%2d%2H%2M >${IMAGE_ROOTFS}/etc/timestamp
613}
614
615# Prevent X from being started
616rootfs_no_x_startup () {
617 if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then
618 chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm
619 fi
620}
621
622rootfs_trim_schemas () {
623 for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas
624 do
625 # Need this in case no files exist
626 if [ -e $schema ]; then
627 oe-trim-schemas $schema > $schema.new
628 mv $schema.new $schema
629 fi
630 done
631}
632
633# Make any absolute links in a sysroot relative
634rootfs_sysroot_relativelinks () {
635 sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT}
636}
637
638EXPORT_FUNCTIONS zap_root_password remove_init_link do_rootfs make_zimage_symlink_relative set_image_autologin rootfs_update_timestamp rootfs_no_x_startup
639
640do_fetch[noexec] = "1"
641do_unpack[noexec] = "1"
642do_patch[noexec] = "1"
643do_configure[noexec] = "1"
644do_compile[noexec] = "1"
645do_install[noexec] = "1"
646do_populate_sysroot[noexec] = "1"
647do_package[noexec] = "1"
648do_packagedata[noexec] = "1"
649do_package_write_ipk[noexec] = "1"
650do_package_write_deb[noexec] = "1"
651do_package_write_rpm[noexec] = "1"
652
653addtask rootfs before do_build
654# Allow the kernel to be repacked with the initramfs and boot image file as a single file
655do_bundle_initramfs[depends] += "virtual/kernel:do_bundle_initramfs"
656do_bundle_initramfs[nostamp] = "1"
657do_bundle_initramfs[noexec] = "1"
658do_bundle_initramfs () {
659 :
660}
661addtask bundle_initramfs after do_rootfs
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
new file mode 100644
index 0000000000..8c49169533
--- /dev/null
+++ b/meta/classes/image_types.bbclass
@@ -0,0 +1,251 @@
1def get_imagecmds(d):
2 cmds = "\n"
3 old_overrides = d.getVar('OVERRIDES', 0)
4
5 alltypes = d.getVar('IMAGE_FSTYPES', True).split()
6 types = []
7 ctypes = d.getVar('COMPRESSIONTYPES', True).split()
8 cimages = {}
9
10 # The elf image depends on the cpio.gz image already having
11 # been created, so we add that explicit ordering here.
12
13 if "elf" in alltypes:
14 alltypes.remove("elf")
15 if "cpio.gz" not in alltypes:
16 alltypes.append("cpio.gz")
17 alltypes.append("elf")
18
19 # Filter out all the compressed images from alltypes
20 for type in alltypes:
21 basetype = None
22 for ctype in ctypes:
23 if type.endswith("." + ctype):
24 basetype = type[:-len("." + ctype)]
25 if basetype not in types:
26 types.append(basetype)
27 if basetype not in cimages:
28 cimages[basetype] = []
29 if ctype not in cimages[basetype]:
30 cimages[basetype].append(ctype)
31 break
32 if not basetype and type not in types:
33 types.append(type)
34
35 # Live and VMDK images will be processed via inheriting
36 # bbclass and does not get processed here.
37 # vmdk depend on live images also depend on ext3 so ensure its present
38 # Note: we need to ensure ext3 is in alltypes, otherwise, subimages may
39 # not contain ext3 and the .rootfs.ext3 file won't be created.
40 if "vmdk" in types:
41 if "ext3" not in types:
42 types.append("ext3")
43 if "ext3" not in alltypes:
44 alltypes.append("ext3")
45 types.remove("vmdk")
46 if "live" in types:
47 if "ext3" not in types:
48 types.append("ext3")
49 if "ext3" not in alltypes:
50 alltypes.append("ext3")
51 types.remove("live")
52
53 if d.getVar('IMAGE_LINK_NAME', True):
54 if d.getVar('RM_OLD_IMAGE', True) == "1":
55 # Remove the old image
56 cmds += "\trm -f `find ${DEPLOY_DIR_IMAGE} -maxdepth 1 -type l -name ${IMAGE_LINK_NAME}'.*' -exec readlink -f {} \;`"
57 # Remove the symlink
58 cmds += "\n\trm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.*"
59
60 for type in types:
61 ccmd = []
62 subimages = []
63 localdata = bb.data.createCopy(d)
64 localdata.setVar('OVERRIDES', '%s:%s' % (type, old_overrides))
65 bb.data.update_data(localdata)
66 localdata.setVar('type', type)
67 if type in cimages:
68 for ctype in cimages[type]:
69 ccmd.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True))
70 subimages.append(type + "." + ctype)
71 if type not in alltypes:
72 ccmd.append(localdata.expand("\trm ${IMAGE_NAME}.rootfs.${type}"))
73 else:
74 subimages.append(type)
75 localdata.setVar('ccmd', "\n".join(ccmd))
76 localdata.setVar('subimages', " ".join(subimages))
77 cmd = localdata.getVar("IMAGE_CMD", True)
78 localdata.setVar('cmd', cmd)
79 cmds += "\n" + localdata.getVar("runimagecmd", True)
80 return cmds
81
82# The default aligment of the size of the rootfs is set to 1KiB. In case
83# you're using the SD card emulation of a QEMU system simulator you may
84# set this value to 2048 (2MiB alignment).
85IMAGE_ROOTFS_ALIGNMENT ?= "1"
86
87runimagecmd () {
88 # Image generation code for image type ${type}
89 # The base_size gets calculated:
90 # - initial size determined by `du -ks` of the IMAGE_ROOTFS
91 # - then multiplied by the IMAGE_OVERHEAD_FACTOR
92 # - tested against IMAGE_ROOTFS_SIZE
93 # - round up ROOTFS_SIZE to IMAGE_ROOTFS_ALIGNMENT
94 ROOTFS_SIZE=`du -ks ${IMAGE_ROOTFS} | awk '{base_size = $1 * ${IMAGE_OVERHEAD_FACTOR}; base_size = ((base_size > ${IMAGE_ROOTFS_SIZE} ? base_size : ${IMAGE_ROOTFS_SIZE}) + ${IMAGE_ROOTFS_EXTRA_SPACE}); if (base_size != int(base_size)) base_size = int(base_size + 1); base_size = base_size + ${IMAGE_ROOTFS_ALIGNMENT} - 1; base_size -= base_size % ${IMAGE_ROOTFS_ALIGNMENT}; print base_size }'`
95 ${cmd}
96 # Now create the needed compressed versions
97 cd ${DEPLOY_DIR_IMAGE}/
98 ${ccmd}
99 # And create the symlinks
100 if [ -n "${IMAGE_LINK_NAME}" ]; then
101 for type in ${subimages}; do
102 if [ -e ${IMAGE_NAME}.rootfs.$type ]; then
103 ln -s ${IMAGE_NAME}.rootfs.$type ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.$type
104 fi
105 done
106 fi
107}
108
109def imagetypes_getdepends(d):
110 def adddep(depstr, deps):
111 for i in (depstr or "").split():
112 if i not in deps:
113 deps.append(i)
114
115 deps = []
116 ctypes = d.getVar('COMPRESSIONTYPES', True).split()
117 for type in (d.getVar('IMAGE_FSTYPES', True) or "").split():
118 if type == "vmdk" or type == "live":
119 type = "ext3"
120 basetype = type
121 for ctype in ctypes:
122 if type.endswith("." + ctype):
123 basetype = type[:-len("." + ctype)]
124 adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps)
125 break
126 adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps)
127
128 depstr = ""
129 for dep in deps:
130 depstr += " " + dep + ":do_populate_sysroot"
131 return depstr
132
133
134XZ_COMPRESSION_LEVEL ?= "-e -6"
135XZ_INTEGRITY_CHECK ?= "crc32"
136XZ_THREADS ?= "-T 0"
137
138IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 -n ${EXTRA_IMAGECMD}"
139IMAGE_CMD_sum.jffs2 = "${IMAGE_CMD_jffs2} && sumtool -i ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 \
140 -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.sum.jffs2 -n ${EXTRA_IMAGECMD}"
141
142IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cramfs ${EXTRA_IMAGECMD}"
143
144oe_mkext234fs () {
145 fstype=$1
146 extra_imagecmd=""
147
148 if [ $# -gt 1 ]; then
149 shift
150 extra_imagecmd=$@
151 fi
152
153 # Create a sparse image block
154 dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$fstype seek=$ROOTFS_SIZE count=0 bs=1k
155 mkfs.$fstype -F $extra_imagecmd ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$fstype
156 populate-extfs.sh ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$fstype
157}
158
159IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
160IMAGE_CMD_ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
161IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
162
163IMAGE_CMD_btrfs () {
164 mkfs.btrfs -b `expr ${ROOTFS_SIZE} \* 1024` ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
165}
166
167IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs ${EXTRA_IMAGECMD} -noappend"
168IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
169IMAGE_CMD_tar = "cd ${IMAGE_ROOTFS} && tar -cvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.tar ."
170
171CPIO_TOUCH_INIT () {
172 if [ ! -L ${IMAGE_ROOTFS}/init ]
173 then
174 touch ${IMAGE_ROOTFS}/init
175 fi
176}
177IMAGE_CMD_cpio () {
178 ${CPIO_TOUCH_INIT}
179 cd ${IMAGE_ROOTFS} && (find . | cpio -o -H newc >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio)
180}
181
182ELF_KERNEL ?= "${STAGING_DIR_HOST}/usr/src/kernel/${KERNEL_IMAGETYPE}"
183ELF_APPEND ?= "ramdisk_size=32768 root=/dev/ram0 rw console="
184
185IMAGE_CMD_elf () {
186 test -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf && rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf
187 mkelfImage --kernel=${ELF_KERNEL} --initrd=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio.gz --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD}
188}
189
190UBI_VOLNAME ?= "${MACHINE}-rootfs"
191
192IMAGE_CMD_ubi () {
193 echo \[ubifs\] > ubinize.cfg
194 echo mode=ubi >> ubinize.cfg
195 echo image=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs >> ubinize.cfg
196 echo vol_id=0 >> ubinize.cfg
197 echo vol_type=dynamic >> ubinize.cfg
198 echo vol_name=${UBI_VOLNAME} >> ubinize.cfg
199 echo vol_flags=autoresize >> ubinize.cfg
200 mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS} && ubinize -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubi ${UBINIZE_ARGS} ubinize.cfg
201}
202IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS}"
203
204EXTRA_IMAGECMD = ""
205
206inherit siteinfo
207JFFS2_ENDIANNESS ?= "${@base_conditional('SITEINFO_ENDIANNESS', 'le', '--little-endian', '--big-endian', d)}"
208JFFS2_ERASEBLOCK ?= "0x40000"
209EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
210
211# Change these if you want default mkfs behavior (i.e. create minimal inode number)
212EXTRA_IMAGECMD_ext2 ?= "-i 8192"
213EXTRA_IMAGECMD_ext3 ?= "-i 8192"
214EXTRA_IMAGECMD_ext4 ?= "-i 8192"
215EXTRA_IMAGECMD_btrfs ?= ""
216EXTRA_IMAGECMD_elf ?= ""
217
218IMAGE_DEPENDS = ""
219IMAGE_DEPENDS_jffs2 = "mtd-utils-native"
220IMAGE_DEPENDS_sum.jffs2 = "mtd-utils-native"
221IMAGE_DEPENDS_cramfs = "util-linux-native"
222IMAGE_DEPENDS_ext2 = "e2fsprogs-native"
223IMAGE_DEPENDS_ext3 = "e2fsprogs-native"
224IMAGE_DEPENDS_ext4 = "e2fsprogs-native"
225IMAGE_DEPENDS_btrfs = "btrfs-tools-native"
226IMAGE_DEPENDS_squashfs = "squashfs-tools-native"
227IMAGE_DEPENDS_squashfs-xz = "squashfs-tools-native"
228IMAGE_DEPENDS_elf = "virtual/kernel mkelfimage-native"
229IMAGE_DEPENDS_ubi = "mtd-utils-native"
230IMAGE_DEPENDS_ubifs = "mtd-utils-native"
231
232# This variable is available to request which values are suitable for IMAGE_FSTYPES
233IMAGE_TYPES = "jffs2 sum.jffs2 cramfs ext2 ext2.gz ext2.bz2 ext3 ext3.gz ext2.lzma btrfs live squashfs squashfs-xz ubi ubifs tar tar.gz tar.bz2 tar.xz cpio cpio.gz cpio.xz cpio.lzma vmdk elf"
234
235COMPRESSIONTYPES = "gz bz2 lzma xz"
236COMPRESS_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}.rootfs.${type}"
237COMPRESS_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.gz"
238COMPRESS_CMD_bz2 = "bzip2 -f -k ${IMAGE_NAME}.rootfs.${type}"
239COMPRESS_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.xz"
240COMPRESS_DEPENDS_lzma = "xz-native"
241COMPRESS_DEPENDS_gz = ""
242COMPRESS_DEPENDS_bz2 = ""
243COMPRESS_DEPENDS_xz = "xz-native"
244
245RUNNABLE_IMAGE_TYPES ?= "ext2 ext3"
246RUNNABLE_MACHINE_PATTERNS ?= "qemu"
247
248DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
249
250# Use IMAGE_EXTENSION_xxx to map image type 'xxx' with real image file extension name(s) for Hob
251IMAGE_EXTENSION_live = "hddimg iso"
diff --git a/meta/classes/image_types_uboot.bbclass b/meta/classes/image_types_uboot.bbclass
new file mode 100644
index 0000000000..07837b566c
--- /dev/null
+++ b/meta/classes/image_types_uboot.bbclass
@@ -0,0 +1,23 @@
1inherit image_types kernel-arch
2
3oe_mkimage () {
4 mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C $2 -n ${IMAGE_NAME} \
5 -d ${DEPLOY_DIR_IMAGE}/$1 ${DEPLOY_DIR_IMAGE}/$1.u-boot
6}
7
8COMPRESSIONTYPES += "gz.u-boot bz2.u-boot lzma.u-boot u-boot"
9
10COMPRESS_DEPENDS_u-boot = "u-boot-mkimage-native"
11COMPRESS_CMD_u-boot = "oe_mkimage ${IMAGE_NAME}.rootfs.${type} none"
12
13COMPRESS_DEPENDS_gz.u-boot = "u-boot-mkimage-native"
14COMPRESS_CMD_gz.u-boot = "${COMPRESS_CMD_gz}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.gz gzip"
15
16COMPRESS_DEPENDS_bz2.u-boot = "u-boot-mkimage-native"
17COMPRESS_CMD_bz2.u-boot = "${COMPRESS_CMD_bz2}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.bz2 bzip2"
18
19COMPRESS_DEPENDS_lzma.u-boot = "u-boot-mkimage-native"
20COMPRESS_CMD_lzma.u-boot = "${COMPRESS_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma"
21
22IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot"
23
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
new file mode 100644
index 0000000000..a784aff3a9
--- /dev/null
+++ b/meta/classes/insane.bbclass
@@ -0,0 +1,954 @@
1# BB Class inspired by ebuild.sh
2#
3# This class will test files after installation for certain
4# security issues and other kind of issues.
5#
6# Checks we do:
7# -Check the ownership and permissions
8# -Check the RUNTIME path for the $TMPDIR
9# -Check if .la files wrongly point to workdir
10# -Check if .pc files wrongly point to workdir
11# -Check if packages contains .debug directories or .so files
12# where they should be in -dev or -dbg
13# -Check if config.log contains traces to broken autoconf tests
14# -Ensure that binaries in base_[bindir|sbindir|libdir] do not link
15# into exec_prefix
16# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
17# files under exec_prefix
18
19
20PACKAGE_DEPENDS += "${QADEPENDS}"
21PACKAGEFUNCS += " do_package_qa "
22
23# unsafe-references-in-binaries requires prelink-rtld from
24# prelink-native, but we don't want this DEPENDS for -native builds
25QADEPENDS = "prelink-native"
26QADEPENDS_class-native = ""
27QADEPENDS_class-nativesdk = ""
28QA_SANE = "True"
29
30# Elect whether a given type of error is a warning or error, they may
31# have been set by other files.
32WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \
33 textrel already-stripped incompatible-license files-invalid \
34 installed-vs-shipped compile-host-path install-host-path \
35 pn-overrides infodir \
36 "
37ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
38 perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
39 split-strip packages-list pkgv-undefined var-undefined \
40 version-going-backwards \
41 "
42
43ALL_QA = "${WARN_QA} ${ERROR_QA}"
44
45#
46# dictionary for elf headers
47#
48# feel free to add and correct.
49#
50# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
51def package_qa_get_machine_dict():
52 return {
53 "darwin9" : {
54 "arm" : (40, 0, 0, True, 32),
55 },
56 "linux" : {
57 "aarch64" : (183, 0, 0, True, 64),
58 "arm" : (40, 97, 0, True, 32),
59 "armeb": (40, 97, 0, False, 32),
60 "powerpc": (20, 0, 0, False, 32),
61 "powerpc64": (21, 0, 0, False, 64),
62 "i386": ( 3, 0, 0, True, 32),
63 "i486": ( 3, 0, 0, True, 32),
64 "i586": ( 3, 0, 0, True, 32),
65 "i686": ( 3, 0, 0, True, 32),
66 "x86_64": (62, 0, 0, True, 64),
67 "ia64": (50, 0, 0, True, 64),
68 "alpha": (36902, 0, 0, True, 64),
69 "hppa": (15, 3, 0, False, 32),
70 "m68k": ( 4, 0, 0, False, 32),
71 "mips": ( 8, 0, 0, False, 32),
72 "mipsel": ( 8, 0, 0, True, 32),
73 "mips64": ( 8, 0, 0, False, 64),
74 "mips64el": ( 8, 0, 0, True, 64),
75 "s390": (22, 0, 0, False, 32),
76 "sh4": (42, 0, 0, True, 32),
77 "sparc": ( 2, 0, 0, False, 32),
78 "microblaze": (189, 0, 0, False, 32),
79 "microblazeel":(189, 0, 0, True, 32),
80 },
81 "linux-uclibc" : {
82 "arm" : ( 40, 97, 0, True, 32),
83 "armeb": ( 40, 97, 0, False, 32),
84 "powerpc": ( 20, 0, 0, False, 32),
85 "i386": ( 3, 0, 0, True, 32),
86 "i486": ( 3, 0, 0, True, 32),
87 "i586": ( 3, 0, 0, True, 32),
88 "i686": ( 3, 0, 0, True, 32),
89 "x86_64": ( 62, 0, 0, True, 64),
90 "mips": ( 8, 0, 0, False, 32),
91 "mipsel": ( 8, 0, 0, True, 32),
92 "mips64": ( 8, 0, 0, False, 64),
93 "mips64el": ( 8, 0, 0, True, 64),
94 "avr32": (6317, 0, 0, False, 32),
95 "sh4": (42, 0, 0, True, 32),
96
97 },
98 "uclinux-uclibc" : {
99 "bfin": ( 106, 0, 0, True, 32),
100 },
101 "linux-gnueabi" : {
102 "arm" : (40, 0, 0, True, 32),
103 "armeb" : (40, 0, 0, False, 32),
104 },
105 "linux-uclibceabi" : {
106 "arm" : (40, 0, 0, True, 32),
107 "armeb" : (40, 0, 0, False, 32),
108 },
109 "linux-gnuspe" : {
110 "powerpc": (20, 0, 0, False, 32),
111 },
112 "linux-uclibcspe" : {
113 "powerpc": (20, 0, 0, False, 32),
114 },
115 "linux-gnu" : {
116 "powerpc": (20, 0, 0, False, 32),
117 "sh4": (42, 0, 0, True, 32),
118 },
119 "linux-gnux32" : {
120 "x86_64": (62, 0, 0, True, 32),
121 },
122 "linux-gnun32" : {
123 "mips64": ( 8, 0, 0, False, 32),
124 "mips64el": ( 8, 0, 0, True, 32),
125 },
126 }
127
128
129def package_qa_clean_path(path,d):
130 """ Remove the common prefix from the path. In this case it is the TMPDIR"""
131 return path.replace(d.getVar('TMPDIR',True),"")
132
133def package_qa_write_error(error, d):
134 logfile = d.getVar('QA_LOGFILE', True)
135 if logfile:
136 p = d.getVar('P', True)
137 f = file( logfile, "a+")
138 print >> f, "%s: %s" % (p, error)
139 f.close()
140
141def package_qa_handle_error(error_class, error_msg, d):
142 package_qa_write_error(error_msg, d)
143 if error_class in (d.getVar("ERROR_QA", True) or "").split():
144 bb.error("QA Issue: %s" % error_msg)
145 d.setVar("QA_SANE", False)
146 return False
147 elif error_class in (d.getVar("WARN_QA", True) or "").split():
148 bb.warn("QA Issue: %s" % error_msg)
149 else:
150 bb.note("QA Issue: %s" % error_msg)
151 return True
152
153QAPATHTEST[libexec] = "package_qa_check_libexec"
154def package_qa_check_libexec(path,name, d, elf, messages):
155
156 # Skip the case where the default is explicitly /usr/libexec
157 libexec = d.getVar('libexecdir', True)
158 if libexec == "/usr/libexec":
159 return True
160
161 if 'libexec' in path.split(os.path.sep):
162 messages.append("%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
163 return False
164
165 return True
166
167QAPATHTEST[rpaths] = "package_qa_check_rpath"
168def package_qa_check_rpath(file,name, d, elf, messages):
169 """
170 Check for dangerous RPATHs
171 """
172 if not elf:
173 return
174
175 if os.path.islink(file):
176 return
177
178 bad_dirs = [d.getVar('BASE_WORKDIR', True), d.getVar('STAGING_DIR_TARGET', True)]
179
180 phdrs = elf.run_objdump("-p", d)
181
182 import re
183 rpath_re = re.compile("\s+RPATH\s+(.*)")
184 for line in phdrs.split("\n"):
185 m = rpath_re.match(line)
186 if m:
187 rpath = m.group(1)
188 for dir in bad_dirs:
189 if dir in rpath:
190 messages.append("package %s contains bad RPATH %s in file %s" % (name, rpath, file))
191
192QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
193def package_qa_check_useless_rpaths(file, name, d, elf, messages):
194 """
195 Check for RPATHs that are useless but not dangerous
196 """
197 def rpath_eq(a, b):
198 return os.path.normpath(a) == os.path.normpath(b)
199
200 if not elf:
201 return
202
203 if os.path.islink(file):
204 return
205
206 libdir = d.getVar("libdir", True)
207 base_libdir = d.getVar("base_libdir", True)
208
209 phdrs = elf.run_objdump("-p", d)
210
211 import re
212 rpath_re = re.compile("\s+RPATH\s+(.*)")
213 for line in phdrs.split("\n"):
214 m = rpath_re.match(line)
215 if m:
216 rpath = m.group(1)
217 if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
218 # The dynamic linker searches both these places anyway. There is no point in
219 # looking there again.
220 messages.append("%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
221
222QAPATHTEST[dev-so] = "package_qa_check_dev"
223def package_qa_check_dev(path, name, d, elf, messages):
224 """
225 Check for ".so" library symlinks in non-dev packages
226 """
227
228 if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
229 messages.append("non -dev/-dbg/-nativesdk package contains symlink .so: %s path '%s'" % \
230 (name, package_qa_clean_path(path,d)))
231
232QAPATHTEST[staticdev] = "package_qa_check_staticdev"
233def package_qa_check_staticdev(path, name, d, elf, messages):
234 """
235 Check for ".a" library in non-staticdev packages
236 There are a number of exceptions to this rule, -pic packages can contain
237 static libraries, the _nonshared.a belong with their -dev packages and
238 libgcc.a, libgcov.a will be skipped in their packages
239 """
240
241 if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a"):
242 messages.append("non -staticdev package contains static .a library: %s path '%s'" % \
243 (name, package_qa_clean_path(path,d)))
244
245def package_qa_check_libdir(d):
246 """
247 Check for wrong library installation paths. For instance, catch
248 recipes installing /lib/bar.so when ${base_libdir}="lib32" or
249 installing in /usr/lib64 when ${libdir}="/usr/lib"
250 """
251 import re
252
253 pkgdest = d.getVar('PKGDEST', True)
254 base_libdir = d.getVar("base_libdir",True) + os.sep
255 libdir = d.getVar("libdir", True) + os.sep
256 exec_prefix = d.getVar("exec_prefix", True) + os.sep
257
258 messages = []
259
260 lib_re = re.compile("^/lib.+\.so(\..+)?$")
261 exec_re = re.compile("^%s.*/lib.+\.so(\..+)?$" % exec_prefix)
262
263 for root, dirs, files in os.walk(pkgdest):
264 if root == pkgdest:
265 # Skip subdirectories for any packages with libdir in INSANE_SKIP
266 skippackages = []
267 for package in dirs:
268 if 'libdir' in (d.getVar('INSANE_SKIP_' + package, True) or "").split():
269 bb.note("Package %s skipping libdir QA test" % (package))
270 skippackages.append(package)
271 for package in skippackages:
272 dirs.remove(package)
273 for file in files:
274 full_path = os.path.join(root, file)
275 rel_path = os.path.relpath(full_path, pkgdest)
276 if os.sep in rel_path:
277 package, rel_path = rel_path.split(os.sep, 1)
278 rel_path = os.sep + rel_path
279 if lib_re.match(rel_path):
280 if base_libdir not in rel_path:
281 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
282 if exec_re.match(rel_path):
283 if libdir not in rel_path:
284 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
285
286 if messages:
287 package_qa_handle_error("libdir", "\n".join(messages), d)
288
289QAPATHTEST[debug-files] = "package_qa_check_dbg"
290def package_qa_check_dbg(path, name, d, elf, messages):
291 """
292 Check for ".debug" files or directories outside of the dbg package
293 """
294
295 if not "-dbg" in name and not "-ptest" in name:
296 if '.debug' in path.split(os.path.sep):
297 messages.append("non debug package contains .debug directory: %s path %s" % \
298 (name, package_qa_clean_path(path,d)))
299
300QAPATHTEST[perms] = "package_qa_check_perm"
301def package_qa_check_perm(path,name,d, elf, messages):
302 """
303 Check the permission of files
304 """
305 return
306
307QAPATHTEST[unsafe-references-in-binaries] = "package_qa_check_unsafe_references_in_binaries"
308def package_qa_check_unsafe_references_in_binaries(path, name, d, elf, messages):
309 """
310 Ensure binaries in base_[bindir|sbindir|libdir] do not link to files under exec_prefix
311 """
312 if unsafe_references_skippable(path, name, d):
313 return
314
315 if elf:
316 import subprocess as sub
317 pn = d.getVar('PN', True)
318
319 exec_prefix = d.getVar('exec_prefix', True)
320 sysroot_path = d.getVar('STAGING_DIR_TARGET', True)
321 sysroot_path_usr = sysroot_path + exec_prefix
322
323 try:
324 ldd_output = bb.process.Popen(["prelink-rtld", "--root", sysroot_path, path], stdout=sub.PIPE).stdout.read()
325 except bb.process.CmdError:
326 error_msg = pn + ": prelink-rtld aborted when processing %s" % path
327 package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
328 return False
329
330 if sysroot_path_usr in ldd_output:
331 ldd_output = ldd_output.replace(sysroot_path, "")
332
333 pkgdest = d.getVar('PKGDEST', True)
334 packages = d.getVar('PACKAGES', True)
335
336 for package in packages.split():
337 short_path = path.replace('%s/%s' % (pkgdest, package), "", 1)
338 if (short_path != path):
339 break
340
341 base_err = pn + ": %s, installed in the base_prefix, requires a shared library under exec_prefix (%s)" % (short_path, exec_prefix)
342 for line in ldd_output.split('\n'):
343 if exec_prefix in line:
344 error_msg = "%s: %s" % (base_err, line.strip())
345 package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
346
347 return False
348
349QAPATHTEST[unsafe-references-in-scripts] = "package_qa_check_unsafe_references_in_scripts"
350def package_qa_check_unsafe_references_in_scripts(path, name, d, elf, messages):
351 """
352 Warn if scripts in base_[bindir|sbindir|libdir] reference files under exec_prefix
353 """
354 if unsafe_references_skippable(path, name, d):
355 return
356
357 if not elf:
358 import stat
359 import subprocess
360 pn = d.getVar('PN', True)
361
362 # Ensure we're checking an executable script
363 statinfo = os.stat(path)
364 if bool(statinfo.st_mode & stat.S_IXUSR):
365 # grep shell scripts for possible references to /exec_prefix/
366 exec_prefix = d.getVar('exec_prefix', True)
367 statement = "grep -e '%s/' %s > /dev/null" % (exec_prefix, path)
368 if subprocess.call(statement, shell=True) == 0:
369 error_msg = pn + ": Found a reference to %s/ in %s" % (exec_prefix, path)
370 package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
371 error_msg = "Shell scripts in base_bindir and base_sbindir should not reference anything in exec_prefix"
372 package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
373
374def unsafe_references_skippable(path, name, d):
375 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d):
376 return True
377
378 if "-dbg" in name or "-dev" in name:
379 return True
380
381 # Other package names to skip:
382 if name.startswith("kernel-module-"):
383 return True
384
385 # Skip symlinks
386 if os.path.islink(path):
387 return True
388
389 # Skip unusual rootfs layouts which make these tests irrelevant
390 exec_prefix = d.getVar('exec_prefix', True)
391 if exec_prefix == "":
392 return True
393
394 pkgdest = d.getVar('PKGDEST', True)
395 pkgdest = pkgdest + "/" + name
396 pkgdest = os.path.abspath(pkgdest)
397 base_bindir = pkgdest + d.getVar('base_bindir', True)
398 base_sbindir = pkgdest + d.getVar('base_sbindir', True)
399 base_libdir = pkgdest + d.getVar('base_libdir', True)
400 bindir = pkgdest + d.getVar('bindir', True)
401 sbindir = pkgdest + d.getVar('sbindir', True)
402 libdir = pkgdest + d.getVar('libdir', True)
403
404 if base_bindir == bindir and base_sbindir == sbindir and base_libdir == libdir:
405 return True
406
407 # Skip files not in base_[bindir|sbindir|libdir]
408 path = os.path.abspath(path)
409 if not (base_bindir in path or base_sbindir in path or base_libdir in path):
410 return True
411
412 return False
413
414QAPATHTEST[arch] = "package_qa_check_arch"
415def package_qa_check_arch(path,name,d, elf, messages):
416 """
417 Check if archs are compatible
418 """
419 if not elf:
420 return
421
422 target_os = d.getVar('TARGET_OS', True)
423 target_arch = d.getVar('TARGET_ARCH', True)
424 provides = d.getVar('PROVIDES', True)
425 bpn = d.getVar('BPN', True)
426
427 # FIXME: Cross package confuse this check, so just skip them
428 for s in ['cross', 'nativesdk', 'cross-canadian']:
429 if bb.data.inherits_class(s, d):
430 return
431
432 # avoid following links to /usr/bin (e.g. on udev builds)
433 # we will check the files pointed to anyway...
434 if os.path.islink(path):
435 return
436
437 #if this will throw an exception, then fix the dict above
438 (machine, osabi, abiversion, littleendian, bits) \
439 = package_qa_get_machine_dict()[target_os][target_arch]
440
441 # Check the architecture and endiannes of the binary
442 if not ((machine == elf.machine()) or \
443 ("virtual/kernel" in provides) and (target_os == "linux-gnux32")):
444 messages.append("Architecture did not match (%d to %d) on %s" % \
445 (machine, elf.machine(), package_qa_clean_path(path,d)))
446 elif not ((bits == elf.abiSize()) or \
447 ("virtual/kernel" in provides) and (target_os == "linux-gnux32")):
448 messages.append("Bit size did not match (%d to %d) %s on %s" % \
449 (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
450 elif not littleendian == elf.isLittleEndian():
451 messages.append("Endiannes did not match (%d to %d) on %s" % \
452 (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
453
454QAPATHTEST[desktop] = "package_qa_check_desktop"
455def package_qa_check_desktop(path, name, d, elf, messages):
456 """
457 Run all desktop files through desktop-file-validate.
458 """
459 if path.endswith(".desktop"):
460 desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE',True),'desktop-file-validate')
461 output = os.popen("%s %s" % (desktop_file_validate, path))
462 # This only produces output on errors
463 for l in output:
464 messages.append("Desktop file issue: " + l.strip())
465
466QAPATHTEST[textrel] = "package_qa_textrel"
467def package_qa_textrel(path, name, d, elf, messages):
468 """
469 Check if the binary contains relocations in .text
470 """
471
472 if not elf:
473 return
474
475 if os.path.islink(path):
476 return
477
478 phdrs = elf.run_objdump("-p", d)
479 sane = True
480
481 import re
482 textrel_re = re.compile("\s+TEXTREL\s+")
483 for line in phdrs.split("\n"):
484 if textrel_re.match(line):
485 sane = False
486
487 if not sane:
488 messages.append("ELF binary '%s' has relocations in .text" % path)
489
490QAPATHTEST[ldflags] = "package_qa_hash_style"
491def package_qa_hash_style(path, name, d, elf, messages):
492 """
493 Check if the binary has the right hash style...
494 """
495
496 if not elf:
497 return
498
499 if os.path.islink(path):
500 return
501
502 gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS', True)
503 if not gnu_hash:
504 gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS', True)
505 if not gnu_hash:
506 return
507
508 sane = False
509 has_syms = False
510
511 phdrs = elf.run_objdump("-p", d)
512
513 # If this binary has symbols, we expect it to have GNU_HASH too.
514 for line in phdrs.split("\n"):
515 if "SYMTAB" in line:
516 has_syms = True
517 if "GNU_HASH" in line:
518 sane = True
519 if "[mips32]" in line or "[mips64]" in line:
520 sane = True
521
522 if has_syms and not sane:
523 messages.append("No GNU_HASH in the elf binary: '%s'" % path)
524
525
526QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
527def package_qa_check_buildpaths(path, name, d, elf, messages):
528 """
529 Check for build paths inside target files and error if not found in the whitelist
530 """
531 # Ignore .debug files, not interesting
532 if path.find(".debug") != -1:
533 return
534
535 # Ignore symlinks
536 if os.path.islink(path):
537 return
538
539 tmpdir = d.getVar('TMPDIR', True)
540 with open(path) as f:
541 file_content = f.read()
542 if tmpdir in file_content:
543 messages.append("File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d))
544
545
546QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
547def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
548 """
549 Check that all packages containing Xorg drivers have ABI dependencies
550 """
551
552 # Skip dev, dbg or nativesdk packages
553 if name.endswith("-dev") or name.endswith("-dbg") or name.startswith("nativesdk-"):
554 return
555
556 driverdir = d.expand("${libdir}/xorg/modules/drivers/")
557 if driverdir in path and path.endswith(".so"):
558 for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name, True) or ""):
559 if rdep.startswith("xorg-abi-"):
560 return
561 messages.append("Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
562
563QAPATHTEST[infodir] = "package_qa_check_infodir"
564def package_qa_check_infodir(path, name, d, elf, messages):
565 """
566 Check that /usr/share/info/dir isn't shipped in a particular package
567 """
568 infodir = d.expand("${infodir}/dir")
569
570 if infodir in path:
571 messages.append("The /usr/share/info/dir file is not meant to be shipped in a particular package.")
572
573def package_qa_check_license(workdir, d):
574 """
575 Check for changes in the license files
576 """
577 import tempfile
578 sane = True
579
580 lic_files = d.getVar('LIC_FILES_CHKSUM', True)
581 lic = d.getVar('LICENSE', True)
582 pn = d.getVar('PN', True)
583
584 if lic == "CLOSED":
585 return True
586
587 if not lic_files:
588 bb.error(pn + ": Recipe file does not have license file information (LIC_FILES_CHKSUM)")
589 return False
590
591 srcdir = d.getVar('S', True)
592
593 for url in lic_files.split():
594 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
595 srclicfile = os.path.join(srcdir, path)
596 if not os.path.isfile(srclicfile):
597 raise bb.build.FuncFailed( pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile)
598
599 if 'md5' not in parm:
600 bb.error(pn + ": md5 checksum is not specified for ", url)
601 return False
602 beginline, endline = 0, 0
603 if 'beginline' in parm:
604 beginline = int(parm['beginline'])
605 if 'endline' in parm:
606 endline = int(parm['endline'])
607
608 if (not beginline) and (not endline):
609 md5chksum = bb.utils.md5_file(srclicfile)
610 else:
611 fi = open(srclicfile, 'rb')
612 fo = tempfile.NamedTemporaryFile(mode='wb', prefix='poky.', suffix='.tmp', delete=False)
613 tmplicfile = fo.name;
614 lineno = 0
615 linesout = 0
616 for line in fi:
617 lineno += 1
618 if (lineno >= beginline):
619 if ((lineno <= endline) or not endline):
620 fo.write(line)
621 linesout += 1
622 else:
623 break
624 fo.flush()
625 fo.close()
626 fi.close()
627 md5chksum = bb.utils.md5_file(tmplicfile)
628 os.unlink(tmplicfile)
629
630 if parm['md5'] == md5chksum:
631 bb.note (pn + ": md5 checksum matched for ", url)
632 else:
633 bb.error (pn + ": md5 data is not matching for ", url)
634 bb.error (pn + ": The new md5 checksum is ", md5chksum)
635 bb.error (pn + ": Check if the license information has changed in")
636 sane = False
637
638 return sane
639
640def package_qa_check_staged(path,d):
641 """
642 Check staged la and pc files for sanity
643 -e.g. installed being false
644
645 As this is run after every stage we should be able
646 to find the one responsible for the errors easily even
647 if we look at every .pc and .la file
648 """
649
650 sane = True
651 tmpdir = d.getVar('TMPDIR', True)
652 workdir = os.path.join(tmpdir, "work")
653
654 installed = "installed=yes"
655 if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
656 pkgconfigcheck = workdir
657 else:
658 pkgconfigcheck = tmpdir
659
660 # find all .la and .pc files
661 # read the content
662 # and check for stuff that looks wrong
663 for root, dirs, files in os.walk(path):
664 for file in files:
665 path = os.path.join(root,file)
666 if file.endswith(".la"):
667 with open(path) as f:
668 file_content = f.read()
669 if workdir in file_content:
670 error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
671 sane = package_qa_handle_error("la", error_msg, d)
672 elif file.endswith(".pc"):
673 with open(path) as f:
674 file_content = f.read()
675 if pkgconfigcheck in file_content:
676 error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
677 sane = package_qa_handle_error("pkgconfig", error_msg, d)
678
679 return sane
680
681# Walk over all files in a directory and call func
682def package_qa_walk(path, warnfuncs, errorfuncs, skip, package, d):
683 import oe.qa
684
685 #if this will throw an exception, then fix the dict above
686 target_os = d.getVar('TARGET_OS', True)
687 target_arch = d.getVar('TARGET_ARCH', True)
688
689 warnings = []
690 errors = []
691 for path in pkgfiles[package]:
692 elf = oe.qa.ELFFile(path)
693 try:
694 elf.open()
695 except:
696 elf = None
697 for func in warnfuncs:
698 func(path, package, d, elf, warnings)
699 for func in errorfuncs:
700 func(path, package, d, elf, errors)
701
702 for w in warnings:
703 bb.warn("QA Issue: %s" % w)
704 package_qa_write_error(w, d)
705 for e in errors:
706 bb.error("QA Issue: %s" % e)
707 package_qa_write_error(e, d)
708
709 return len(errors) == 0
710
711def package_qa_check_rdepends(pkg, pkgdest, skip, d):
712 # Don't do this check for kernel/module recipes, there aren't too many debug/development
713 # packages and you can get false positives e.g. on kernel-module-lirc-dev
714 if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
715 return True
716
717 sane = True
718 if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg:
719 localdata = bb.data.createCopy(d)
720 localdata.setVar('OVERRIDES', pkg)
721 bb.data.update_data(localdata)
722
723 # Now check the RDEPENDS
724 rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS', True) or "")
725
726 # Now do the sanity check!!!
727 for rdepend in rdepends:
728 if "-dbg" in rdepend and "debug-deps" not in skip:
729 error_msg = "%s rdepends on %s" % (pkg,rdepend)
730 sane = package_qa_handle_error("debug-deps", error_msg, d)
731 if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
732 error_msg = "%s rdepends on %s" % (pkg, rdepend)
733 sane = package_qa_handle_error("dev-deps", error_msg, d)
734
735 return sane
736
737def package_qa_check_deps(pkg, pkgdest, skip, d):
738 sane = True
739
740 localdata = bb.data.createCopy(d)
741 localdata.setVar('OVERRIDES', pkg)
742 bb.data.update_data(localdata)
743
744 def check_valid_deps(var):
745 sane = True
746 try:
747 rvar = bb.utils.explode_dep_versions2(localdata.getVar(var, True) or "")
748 except ValueError as e:
749 bb.fatal("%s_%s: %s" % (var, pkg, e))
750 for dep in rvar:
751 for v in rvar[dep]:
752 if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
753 error_msg = "%s_%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
754 sane = package_qa_handle_error("dep-cmp", error_msg, d)
755 return sane
756
757 sane = True
758 if not check_valid_deps('RDEPENDS'):
759 sane = False
760 if not check_valid_deps('RRECOMMENDS'):
761 sane = False
762 if not check_valid_deps('RSUGGESTS'):
763 sane = False
764 if not check_valid_deps('RPROVIDES'):
765 sane = False
766 if not check_valid_deps('RREPLACES'):
767 sane = False
768 if not check_valid_deps('RCONFLICTS'):
769 sane = False
770
771 return sane
772
773# The PACKAGE FUNC to scan each package
774python do_package_qa () {
775 import subprocess
776
777 bb.note("DO PACKAGE QA")
778
779 logdir = d.getVar('T', True)
780 pkg = d.getVar('PN', True)
781
782 # Check the compile log for host contamination
783 compilelog = os.path.join(logdir,"log.do_compile")
784
785 if os.path.exists(compilelog):
786 statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
787 if subprocess.call(statement, shell=True) == 0:
788 msg = "%s: The compile log indicates that host include and/or library paths were used.\n \
789 Please check the log '%s' for more information." % (pkg, compilelog)
790 package_qa_handle_error("compile-host-path", msg, d)
791
792 # Check the install log for host contamination
793 installlog = os.path.join(logdir,"log.do_install")
794
795 if os.path.exists(installlog):
796 statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
797 if subprocess.call(statement, shell=True) == 0:
798 msg = "%s: The install log indicates that host include and/or library paths were used.\n \
799 Please check the log '%s' for more information." % (pkg, installlog)
800 package_qa_handle_error("install-host-path", msg, d)
801
802 # Scan the packages...
803 pkgdest = d.getVar('PKGDEST', True)
804 packages = d.getVar('PACKAGES', True)
805
806 # no packages should be scanned
807 if not packages:
808 return
809
810 testmatrix = d.getVarFlags("QAPATHTEST")
811 import re
812 # The package name matches the [a-z0-9.+-]+ regular expression
813 pkgname_pattern = re.compile("^[a-z0-9.+-]+$")
814
815 g = globals()
816 walk_sane = True
817 rdepends_sane = True
818 deps_sane = True
819 for package in packages.split():
820 skip = (d.getVar('INSANE_SKIP_' + package, True) or "").split()
821 if skip:
822 bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
823 warnchecks = []
824 for w in (d.getVar("WARN_QA", True) or "").split():
825 if w in skip:
826 continue
827 if w in testmatrix and testmatrix[w] in g:
828 warnchecks.append(g[testmatrix[w]])
829 errorchecks = []
830 for e in (d.getVar("ERROR_QA", True) or "").split():
831 if e in skip:
832 continue
833 if e in testmatrix and testmatrix[e] in g:
834 errorchecks.append(g[testmatrix[e]])
835
836 bb.note("Checking Package: %s" % package)
837 # Check package name
838 if not pkgname_pattern.match(package):
839 package_qa_handle_error("pkgname",
840 "%s doesn't match the [a-z0-9.+-]+ regex\n" % package, d)
841
842 path = "%s/%s" % (pkgdest, package)
843 if not package_qa_walk(path, warnchecks, errorchecks, skip, package, d):
844 walk_sane = False
845 if not package_qa_check_rdepends(package, pkgdest, skip, d):
846 rdepends_sane = False
847 if not package_qa_check_deps(package, pkgdest, skip, d):
848 deps_sane = False
849
850
851 if 'libdir' in d.getVar("ALL_QA", True).split():
852 package_qa_check_libdir(d)
853
854 qa_sane = d.getVar("QA_SANE", True)
855 if not walk_sane or not rdepends_sane or not deps_sane or not qa_sane:
856 bb.fatal("QA run found fatal errors. Please consider fixing them.")
857 bb.note("DONE with PACKAGE QA")
858}
859
860
861python do_qa_staging() {
862 bb.note("QA checking staging")
863
864 if not package_qa_check_staged(d.expand('${SYSROOT_DESTDIR}/${STAGING_LIBDIR}'), d):
865 bb.fatal("QA staging was broken by the package built above")
866}
867
868python do_qa_configure() {
869 import subprocess
870
871 ###########################################################################
872 # Check config.log for cross compile issues
873 ###########################################################################
874
875 configs = []
876 workdir = d.getVar('WORKDIR', True)
877 bb.note("Checking autotools environment for common misconfiguration")
878 for root, dirs, files in os.walk(workdir):
879 statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % \
880 os.path.join(root,"config.log")
881 if "config.log" in files:
882 if subprocess.call(statement, shell=True) == 0:
883 bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
884Rerun configure task after fixing this. The path was '%s'""" % root)
885
886 if "configure.ac" in files:
887 configs.append(os.path.join(root,"configure.ac"))
888 if "configure.in" in files:
889 configs.append(os.path.join(root, "configure.in"))
890
891 ###########################################################################
892 # Check gettext configuration and dependencies are correct
893 ###########################################################################
894
895 cnf = d.getVar('EXTRA_OECONF', True) or ""
896 if "gettext" not in d.getVar('P', True) and "gcc-runtime" not in d.getVar('P', True) and "--disable-nls" not in cnf:
897 ml = d.getVar("MLPREFIX", True) or ""
898 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk', d):
899 gt = "gettext-native"
900 elif bb.data.inherits_class('cross-canadian', d):
901 gt = "nativesdk-gettext"
902 else:
903 gt = "virtual/" + ml + "gettext"
904 deps = bb.utils.explode_deps(d.getVar('DEPENDS', True) or "")
905 if gt not in deps:
906 for config in configs:
907 gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
908 if subprocess.call(gnu, shell=True) == 0:
909 bb.fatal("""%s required but not in DEPENDS for file %s.
910Missing inherit gettext?""" % (gt, config))
911
912 ###########################################################################
913 # Check license variables
914 ###########################################################################
915
916 if not package_qa_check_license(workdir, d):
917 bb.fatal("Licensing Error: LIC_FILES_CHKSUM does not match, please fix")
918
919}
920# The Staging Func, to check all staging
921#addtask qa_staging after do_populate_sysroot before do_build
922do_populate_sysroot[postfuncs] += "do_qa_staging "
923
924# Check broken config.log files, for packages requiring Gettext which don't
925# have it in DEPENDS and for correct LIC_FILES_CHKSUM
926#addtask qa_configure after do_configure before do_compile
927do_configure[postfuncs] += "do_qa_configure "
928
929python () {
930 tests = d.getVar('ALL_QA', True).split()
931 if "desktop" in tests:
932 d.appendVar("PACKAGE_DEPENDS", "desktop-file-utils-native")
933
934 ###########################################################################
935 # Check various variables
936 ###########################################################################
937
938 if d.getVar('do_stage', True) is not None:
939 bb.fatal("Legacy staging found for %s as it has a do_stage function. This will need conversion to a do_install or often simply removal to work with OE-core" % d.getVar("FILE", True))
940
941 overrides = d.getVar('OVERRIDES', True).split(':')
942 pn = d.getVar('PN', True)
943 if pn in overrides:
944 msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE", True), pn)
945 package_qa_handle_error("pn-overrides", msg, d)
946
947 issues = []
948 if (d.getVar('PACKAGES', True) or "").split():
949 for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY':
950 if d.getVar(var):
951 issues.append(var)
952 for i in issues:
953 package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE", True), i), d)
954}
diff --git a/meta/classes/insserv.bbclass b/meta/classes/insserv.bbclass
new file mode 100644
index 0000000000..14290a77e2
--- /dev/null
+++ b/meta/classes/insserv.bbclass
@@ -0,0 +1,5 @@
1do_rootfs[depends] += "insserv-native:do_populate_sysroot"
2run_insserv () {
3 insserv -p ${IMAGE_ROOTFS}/etc/init.d -c ${STAGING_ETCDIR_NATIVE}/insserv.conf
4}
5ROOTFS_POSTPROCESS_COMMAND += " run_insserv ; "
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
new file mode 100644
index 0000000000..4a140ebdaf
--- /dev/null
+++ b/meta/classes/kernel-arch.bbclass
@@ -0,0 +1,58 @@
1#
2# set the ARCH environment variable for kernel compilation (including
3# modules). return value must match one of the architecture directories
4# in the kernel source "arch" directory
5#
6
7valid_archs = "alpha cris ia64 \
8 i386 x86 \
9 m68knommu m68k ppc powerpc powerpc64 ppc64 \
10 sparc sparc64 \
11 arm aarch64 \
12 m32r mips \
13 sh sh64 um h8300 \
14 parisc s390 v850 \
15 avr32 blackfin \
16 microblaze"
17
18def map_kernel_arch(a, d):
19 import re
20
21 valid_archs = d.getVar('valid_archs', True).split()
22
23 if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
24 elif re.match('armeb$', a): return 'arm'
25 elif re.match('aarch64$', a): return 'arm64'
26 elif re.match('mips(el|64|64el)$', a): return 'mips'
27 elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
28 elif re.match('sh(3|4)$', a): return 'sh'
29 elif re.match('bfin', a): return 'blackfin'
30 elif re.match('microblazeel', a): return 'microblaze'
31 elif a in valid_archs: return a
32 else:
33 bb.error("cannot map '%s' to a linux kernel architecture" % a)
34
35export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH', True), d)}"
36
37def map_uboot_arch(a, d):
38 import re
39
40 if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
41 elif re.match('i.86$', a): return 'x86'
42 return a
43
44export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH', True), d)}"
45
46# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
47# specific options necessary for building the kernel and modules.
48TARGET_CC_KERNEL_ARCH ?= ""
49HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
50TARGET_LD_KERNEL_ARCH ?= ""
51HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
52TARGET_AR_KERNEL_ARCH ?= ""
53HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
54
55KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH}"
56KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
57KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
58
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
new file mode 100644
index 0000000000..9a4329dcc9
--- /dev/null
+++ b/meta/classes/kernel-module-split.bbclass
@@ -0,0 +1,185 @@
1pkg_postinst_modules () {
2if [ -z "$D" ]; then
3 depmod -a ${KERNEL_VERSION}
4else
5 depmodwrapper -a -b $D ${KERNEL_VERSION}
6fi
7}
8
9pkg_postrm_modules () {
10if [ -z "$D" ]; then
11 depmod -a ${KERNEL_VERSION}
12else
13 depmodwrapper -a -b $D ${KERNEL_VERSION}
14fi
15}
16
17autoload_postinst_fragment() {
18if [ x"$D" = "x" ]; then
19 modprobe %s || true
20fi
21}
22
23do_install_append() {
24 install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
25}
26
27PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
28
29KERNEL_MODULES_META_PACKAGE ?= "kernel-modules"
30
31python split_kernel_module_packages () {
32 import re
33
34 modinfoexp = re.compile("([^=]+)=(.*)")
35 kerverrexp = re.compile('^(.*-hh.*)[\.\+].*$')
36 depmodpat0 = re.compile("^(.*\.k?o):..*$")
37 depmodpat1 = re.compile("^(.*\.k?o):\s*(.*\.k?o)\s*$")
38 depmodpat2 = re.compile("^(.*\.k?o):\s*(.*\.k?o)\s*\\\$")
39 depmodpat3 = re.compile("^\t(.*\.k?o)\s*\\\$")
40 depmodpat4 = re.compile("^\t(.*\.k?o)\s*$")
41
42 def extract_modinfo(file):
43 import tempfile, subprocess
44 tempfile.tempdir = d.getVar("WORKDIR", True)
45 tf = tempfile.mkstemp()
46 tmpfile = tf[1]
47 cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX", True) or "", file, tmpfile)
48 subprocess.call(cmd, shell=True)
49 f = open(tmpfile)
50 l = f.read().split("\000")
51 f.close()
52 os.close(tf[0])
53 os.unlink(tmpfile)
54 vals = {}
55 for i in l:
56 m = modinfoexp.match(i)
57 if not m:
58 continue
59 vals[m.group(1)] = m.group(2)
60 return vals
61
62 def parse_depmod():
63
64 dvar = d.getVar('PKGD', True)
65
66 kernelver = d.getVar('KERNEL_VERSION', True)
67 kernelver_stripped = kernelver
68 m = kerverrexp.match(kernelver)
69 if m:
70 kernelver_stripped = m.group(1)
71 staging_kernel_dir = d.getVar("STAGING_KERNEL_DIR", True)
72 system_map_file = "%s/boot/System.map-%s" % (dvar, kernelver)
73 if not os.path.exists(system_map_file):
74 system_map_file = "%s/System.map-%s" % (staging_kernel_dir, kernelver)
75 if not os.path.exists(system_map_file):
76 bb.fatal("System.map-%s does not exist in '%s/boot' nor STAGING_KERNEL_DIR '%s'" % (kernelver, dvar, staging_kernel_dir))
77
78 cmd = "depmod -n -a -b %s -F %s %s" % (dvar, system_map_file, kernelver_stripped)
79 f = os.popen(cmd, 'r')
80
81 deps = {}
82 line = f.readline()
83 while line:
84 if not depmodpat0.match(line):
85 line = f.readline()
86 continue
87 m1 = depmodpat1.match(line)
88 if m1:
89 deps[m1.group(1)] = m1.group(2).split()
90 else:
91 m2 = depmodpat2.match(line)
92 if m2:
93 deps[m2.group(1)] = m2.group(2).split()
94 line = f.readline()
95 m3 = depmodpat3.match(line)
96 while m3:
97 deps[m2.group(1)].extend(m3.group(1).split())
98 line = f.readline()
99 m3 = depmodpat3.match(line)
100 m4 = depmodpat4.match(line)
101 deps[m2.group(1)].extend(m4.group(1).split())
102 line = f.readline()
103 f.close()
104 return deps
105
106 def get_dependencies(file, pattern, format):
107 # file no longer includes PKGD
108 file = file.replace(d.getVar('PKGD', True) or '', '', 1)
109 # instead is prefixed with /lib/modules/${KERNEL_VERSION}
110 file = file.replace("/lib/modules/%s/" % d.getVar('KERNEL_VERSION', True) or '', '', 1)
111
112 if file in module_deps:
113 dependencies = []
114 for i in module_deps[file]:
115 m = re.match(pattern, os.path.basename(i))
116 if not m:
117 continue
118 on = legitimize_package_name(m.group(1))
119 dependency_pkg = format % on
120 dependencies.append(dependency_pkg)
121 return dependencies
122 return []
123
124 def frob_metadata(file, pkg, pattern, format, basename):
125 vals = extract_modinfo(file)
126
127 dvar = d.getVar('PKGD', True)
128
129 # If autoloading is requested, output /etc/modules-load.d/<name>.conf and append
130 # appropriate modprobe commands to the postinst
131 autoload = d.getVar('module_autoload_%s' % basename, True)
132 if autoload:
133 name = '%s/etc/modules-load.d/%s.conf' % (dvar, basename)
134 f = open(name, 'w')
135 for m in autoload.split():
136 f.write('%s\n' % m)
137 f.close()
138 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
139 if not postinst:
140 bb.fatal("pkg_postinst_%s not defined" % pkg)
141 postinst += d.getVar('autoload_postinst_fragment', True) % autoload
142 d.setVar('pkg_postinst_%s' % pkg, postinst)
143
144 # Write out any modconf fragment
145 modconf = d.getVar('module_conf_%s' % basename, True)
146 if modconf:
147 name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
148 f = open(name, 'w')
149 f.write("%s\n" % modconf)
150 f.close()
151
152 files = d.getVar('FILES_%s' % pkg, True)
153 files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
154 d.setVar('FILES_%s' % pkg, files)
155
156 if "description" in vals:
157 old_desc = d.getVar('DESCRIPTION_' + pkg, True) or ""
158 d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
159
160 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
161 for dep in get_dependencies(file, pattern, format):
162 if not dep in rdepends:
163 rdepends[dep] = []
164 d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
165
166 module_deps = parse_depmod()
167 module_regex = '^(.*)\.k?o$'
168 module_pattern = 'kernel-module-%s'
169
170 postinst = d.getVar('pkg_postinst_modules', True)
171 postrm = d.getVar('pkg_postrm_modules', True)
172
173 modules = do_split_packages(d, root='/lib/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='kernel-%s' % (d.getVar("KERNEL_VERSION", True)))
174 if modules:
175 metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE', True)
176 d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules))
177
178 # If modules-load.d and modprobe.d are empty at this point, remove them to
179 # avoid warnings. removedirs only raises an OSError if an empty
180 # directory cannot be removed.
181 dvar = d.getVar('PKGD', True)
182 for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]:
183 if len(os.listdir(dir)) == 0:
184 os.rmdir(dir)
185}
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
new file mode 100644
index 0000000000..8f79932438
--- /dev/null
+++ b/meta/classes/kernel-yocto.bbclass
@@ -0,0 +1,413 @@
1S = "${WORKDIR}/linux"
2
3# remove tasks that modify the source tree in case externalsrc is inherited
4SRCTREECOVEREDTASKS += "do_kernel_link_vmlinux do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_patch"
5
6# returns local (absolute) path names for all valid patches in the
7# src_uri
8def find_patches(d):
9 patches = src_patches(d)
10 patch_list=[]
11 for p in patches:
12 _, _, local, _, _, _ = bb.fetch.decodeurl(p)
13 patch_list.append(local)
14
15 return patch_list
16
17# returns all the elements from the src uri that are .scc files
18def find_sccs(d):
19 sources=src_patches(d, True)
20 sources_list=[]
21 for s in sources:
22 base, ext = os.path.splitext(os.path.basename(s))
23 if ext and ext in [".scc", ".cfg"]:
24 sources_list.append(s)
25 elif base and base in 'defconfig':
26 sources_list.append(s)
27
28 return sources_list
29
30# check the SRC_URI for "kmeta" type'd git repositories. Return the name of
31# the repository as it will be found in WORKDIR
32def find_kernel_feature_dirs(d):
33 feature_dirs=[]
34 fetch = bb.fetch2.Fetch([], d)
35 for url in fetch.urls:
36 urldata = fetch.ud[url]
37 parm = urldata.parm
38 if "type" in parm:
39 type = parm["type"]
40 if "destsuffix" in parm:
41 destdir = parm["destsuffix"]
42 if type == "kmeta":
43 feature_dirs.append(destdir)
44
45 return feature_dirs
46
47# find the master/machine source branch. In the same way that the fetcher proceses
48# git repositories in the SRC_URI we take the first repo found, first branch.
49def get_machine_branch(d, default):
50 fetch = bb.fetch2.Fetch([], d)
51 for url in fetch.urls:
52 urldata = fetch.ud[url]
53 parm = urldata.parm
54 if "branch" in parm:
55 branches = urldata.parm.get("branch").split(',')
56 return branches[0]
57
58 return default
59
60do_patch() {
61 cd ${S}
62 export KMETA=${KMETA}
63
64 # if kernel tools are available in-tree, they are preferred
65 # and are placed on the path before any external tools. Unless
66 # the external tools flag is set, in that case we do nothing.
67 if [ -f "${S}/scripts/util/configme" ]; then
68 if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then
69 PATH=${S}/scripts/util:${PATH}
70 fi
71 fi
72
73 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
74
75 # if we have a defined/set meta branch we should not be generating
76 # any meta data. The passed branch has what we need.
77 if [ -n "${KMETA}" ]; then
78 createme_flags="--disable-meta-gen --meta ${KMETA}"
79 fi
80
81 createme ${createme_flags} ${ARCH} ${machine_branch}
82 if [ $? -ne 0 ]; then
83 echo "ERROR. Could not create ${machine_branch}"
84 exit 1
85 fi
86
87 sccs="${@" ".join(find_sccs(d))}"
88 patches="${@" ".join(find_patches(d))}"
89 feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
90
91 set +e
92 # add any explicitly referenced features onto the end of the feature
93 # list that is passed to the kernel build scripts.
94 if [ -n "${KERNEL_FEATURES}" ]; then
95 for feat in ${KERNEL_FEATURES}; do
96 addon_features="$addon_features --feature $feat"
97 done
98 fi
99
100 # check for feature directories/repos/branches that were part of the
101 # SRC_URI. If they were supplied, we convert them into include directives
102 # for the update part of the process
103 if [ -n "${feat_dirs}" ]; then
104 for f in ${feat_dirs}; do
105 if [ -d "${WORKDIR}/$f/meta" ]; then
106 includes="$includes -I${WORKDIR}/$f/meta"
107 elif [ -d "${WORKDIR}/$f" ]; then
108 includes="$includes -I${WORKDIR}/$f"
109 fi
110 done
111 fi
112
113 if [ "${machine_branch}" != "${KBRANCH_DEFAULT}" ]; then
114 updateme_flags="--branch ${machine_branch}"
115 fi
116
117 # updates or generates the target description
118 updateme ${updateme_flags} -DKDESC=${KMACHINE}:${LINUX_KERNEL_TYPE} \
119 ${includes} ${addon_features} ${ARCH} ${KMACHINE} ${sccs} ${patches}
120 if [ $? -ne 0 ]; then
121 echo "ERROR. Could not update ${machine_branch}"
122 exit 1
123 fi
124
125 # executes and modifies the source tree as required
126 patchme ${KMACHINE}
127 if [ $? -ne 0 ]; then
128 echo "ERROR. Could not apply patches for ${KMACHINE}."
129 echo " Patch failures can be resolved in the devshell (bitbake -c devshell ${PN})"
130 exit 1
131 fi
132
133 # Perform a final check. If something other than the default kernel
134 # branch was requested, and that's not where we ended up, then we
135 # should thrown an error, since we aren't building what was expected
136 final_branch="$(git symbolic-ref HEAD 2>/dev/null)"
137 final_branch=${final_branch##refs/heads/}
138 if [ "${machine_branch}" != "${KBRANCH_DEFAULT}" ] &&
139 [ "${final_branch}" != "${machine_branch}" ]; then
140 echo "ERROR: branch ${machine_branch} was requested, but was not properly"
141 echo " configured to be built. The current branch is ${final_branch}"
142 exit 1
143 fi
144}
145
146do_kernel_checkout() {
147 set +e
148
149 # A linux yocto SRC_URI should use the bareclone option. That
150 # ensures that all the branches are available in the WORKDIR version
151 # of the repository.
152 source_dir=`echo ${S} | sed 's%/$%%'`
153 source_workdir="${WORKDIR}/git"
154 if [ -d "${WORKDIR}/git/" ] && [ -d "${WORKDIR}/git/.git" ]; then
155 # case2: the repository is a non-bare clone
156
157 # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
158 if [ "${source_dir}" != "${source_workdir}" ]; then
159 rm -rf ${S}
160 mv ${WORKDIR}/git ${S}
161 fi
162 cd ${S}
163 elif [ -d "${WORKDIR}/git/" ] && [ ! -d "${WORKDIR}/git/.git" ]; then
164 # case2: the repository is a bare clone
165
166 # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
167 if [ "${source_dir}" != "${source_workdir}" ]; then
168 rm -rf ${S}
169 mkdir -p ${S}/.git
170 mv ${WORKDIR}/git/* ${S}/.git
171 rm -rf ${WORKDIR}/git/
172 fi
173 cd ${S}
174 git config core.bare false
175 else
176 # case 3: we have no git repository at all.
177 # To support low bandwidth options for building the kernel, we'll just
178 # convert the tree to a git repo and let the rest of the process work unchanged
179
180 # if ${S} hasn't been set to the proper subdirectory a default of "linux" is
181 # used, but we can't initialize that empty directory. So check it and throw a
182 # clear error
183
184 cd ${S}
185 if [ ! -f "Makefile" ]; then
186 echo "[ERROR]: S is not set to the linux source directory. Check "
187 echo " the recipe and set S to the proper extracted subdirectory"
188 exit 1
189 fi
190 git init
191 git add .
192 git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
193 fi
194 # end debare
195
196 # If KMETA is defined, the branch must exist, but a machine branch
197 # can be missing since it may be created later by the tools.
198 if [ -n "${KMETA}" ]; then
199 git branch -a | grep -q ${KMETA}
200 if [ $? -ne 0 ]; then
201 echo "ERROR. The branch '${KMETA}' is required and was not"
202 echo "found. Ensure that the SRC_URI points to a valid linux-yocto"
203 echo "kernel repository"
204 exit 1
205 fi
206 fi
207
208 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
209
210 if [ "${KBRANCH}" != "${machine_branch}" ]; then
211 echo "WARNING: The SRC_URI machine branch and KBRANCH are not the same."
212 echo " KBRANCH will be adjusted to match, but this typically is a"
213 echo " misconfiguration and should be checked."
214 fi
215
216 # convert any remote branches to local tracking ones
217 for i in `git branch -a | grep remotes | grep -v HEAD`; do
218 b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
219 git show-ref --quiet --verify -- "refs/heads/$b"
220 if [ $? -ne 0 ]; then
221 git branch $b $i > /dev/null
222 fi
223 done
224
225 # Create a working tree copy of the kernel by checking out a branch
226 git show-ref --quiet --verify -- "refs/heads/${machine_branch}"
227 if [ $? -eq 0 ]; then
228 # checkout and clobber any unimportant files
229 git checkout -f ${machine_branch}
230 else
231 echo "Not checking out ${machine_branch}, it will be created later"
232 git checkout -f master
233 fi
234}
235do_kernel_checkout[dirs] = "${S}"
236
237addtask kernel_checkout before do_patch after do_unpack
238
239do_kernel_configme[dirs] = "${S} ${B}"
240do_kernel_configme() {
241 echo "[INFO] doing kernel configme"
242 export KMETA=${KMETA}
243
244 if [ -n ${KCONFIG_MODE} ]; then
245 configmeflags=${KCONFIG_MODE}
246 else
247 # If a defconfig was passed, use =n as the baseline, which is achieved
248 # via --allnoconfig
249 if [ -f ${WORKDIR}/defconfig ]; then
250 configmeflags="--allnoconfig"
251 fi
252 fi
253
254 cd ${S}
255 PATH=${PATH}:${S}/scripts/util
256 configme ${configmeflags} --reconfig --output ${B} ${LINUX_KERNEL_TYPE} ${KMACHINE}
257 if [ $? -ne 0 ]; then
258 echo "ERROR. Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
259 exit 1
260 fi
261
262 echo "# Global settings from linux recipe" >> ${B}/.config
263 echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
264}
265
266python do_kernel_configcheck() {
267 import re, string, sys
268
269 bb.plain("NOTE: validating kernel config, see log.do_kernel_configcheck for details")
270
271 # if KMETA isn't set globally by a recipe using this routine, we need to
272 # set the default to 'meta'. Otherwise, kconf_check is not passed a valid
273 # meta-series for processing
274 kmeta = d.getVar( "KMETA", True ) or "meta"
275 if not os.path.exists(kmeta):
276 kmeta = "." + kmeta
277
278 pathprefix = "export PATH=%s:%s; " % (d.getVar('PATH', True), "${S}/scripts/util/")
279 cmd = d.expand("cd ${S}; kconf_check -config- %s/meta-series ${S} ${B}" % kmeta)
280 ret, result = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
281
282 config_check_visibility = d.getVar( "KCONF_AUDIT_LEVEL", True ) or 1
283 if config_check_visibility == 1:
284 bb.debug( 1, "%s" % result )
285 else:
286 bb.note( "%s" % result )
287}
288
289# Ensure that the branches (BSP and meta) are on the locations specified by
290# their SRCREV values. If they are NOT on the right commits, the branches
291# are corrected to the proper commit.
292do_validate_branches() {
293 cd ${S}
294 export KMETA=${KMETA}
295
296 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
297
298 set +e
299 # if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
300 # check and we can exit early
301 if [ "${SRCREV_machine}" = "AUTOINC" ] || [ "${SRCREV_machine}" = "INVALID" ] ||
302 [ "${SRCREV_machine}" = "" ]; then
303 return
304 fi
305
306 # If something other than the default branch was requested, it must
307 # exist in the tree, and it's a hard error if it wasn't
308 git show-ref --quiet --verify -- "refs/heads/${machine_branch}"
309 if [ $? -eq 1 ]; then
310 if [ -n "${KBRANCH_DEFAULT}" ] &&
311 [ "${machine_branch}" != "${KBRANCH_DEFAULT}" ]; then
312 echo "ERROR: branch ${machine_branch} was set for kernel compilation, "
313 echo " but it does not exist in the kernel repository."
314 echo " Check the value of KBRANCH and ensure that it describes"
315 echo " a valid banch in the source kernel repository"
316 exit 1
317 fi
318 fi
319
320 if [ -z "${SRCREV_machine}" ]; then
321 target_branch_head="${SRCREV}"
322 else
323 target_branch_head="${SRCREV_machine}"
324 fi
325
326 # $SRCREV could have also been AUTOINC, so check again
327 if [ "${target_branch_head}" = "AUTOINC" ]; then
328 return
329 fi
330
331 ref=`git show ${target_branch_head} 2>&1 | head -n1 || true`
332 if [ "$ref" = "fatal: bad object ${target_meta_head}" ]; then
333 echo "ERROR ${target_branch_head} is not a valid commit ID."
334 echo "The kernel source tree may be out of sync"
335 exit 1
336 fi
337
338 containing_branches=`git branch --contains $target_branch_head | sed 's/^..//'`
339 if [ -z "$containing_branches" ]; then
340 echo "ERROR: SRCREV was set to \"$target_branch_head\", but no branches"
341 echo " contain this commit"
342 exit 1
343 fi
344
345 # force the SRCREV in each branch that contains the specified
346 # SRCREV (if it isn't the current HEAD of that branch)
347 git checkout -q master
348 for b in $containing_branches; do
349 branch_head=`git show-ref -s --heads ${b}`
350 if [ "$branch_head" != "$target_branch_head" ]; then
351 echo "[INFO] Setting branch $b to ${target_branch_head}"
352 if [ "$b" = "master" ]; then
353 git reset --hard $target_branch_head > /dev/null
354 else
355 git branch -D $b > /dev/null
356 git branch $b $target_branch_head > /dev/null
357 fi
358 fi
359 done
360
361 ## KMETA branch validation
362 meta_head=`git show-ref -s --heads ${KMETA}`
363 target_meta_head="${SRCREV_meta}"
364 git show-ref --quiet --verify -- "refs/heads/${KMETA}"
365 if [ $? -eq 1 ]; then
366 return
367 fi
368
369 if [ "${target_meta_head}" = "AUTOINC" ]; then
370 return
371 fi
372
373 if [ "$meta_head" != "$target_meta_head" ]; then
374 ref=`git show ${target_meta_head} 2>&1 | head -n1 || true`
375 if [ "$ref" = "fatal: bad object ${target_meta_head}" ]; then
376 echo "ERROR ${target_meta_head} is not a valid commit ID"
377 echo "The kernel source tree may be out of sync"
378 exit 1
379 else
380 echo "[INFO] Setting branch ${KMETA} to ${target_meta_head}"
381 git branch -m ${KMETA} ${KMETA}-orig
382 git checkout -q -b ${KMETA} ${target_meta_head}
383 if [ $? -ne 0 ];then
384 echo "ERROR: could not checkout ${KMETA} branch from known hash ${target_meta_head}"
385 exit 1
386 fi
387 fi
388 fi
389
390 git show-ref --quiet --verify -- "refs/heads/${machine_branch}"
391 if [ $? -eq 0 ]; then
392 # restore the branch for builds
393 git checkout -q -f ${machine_branch}
394 else
395 git checkout -q master
396 fi
397}
398
399# Many scripts want to look in arch/$arch/boot for the bootable
400# image. This poses a problem for vmlinux based booting. This
401# task arranges to have vmlinux appear in the normalized directory
402# location.
403do_kernel_link_vmlinux() {
404 if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
405 mkdir ${B}/arch/${ARCH}/boot
406 fi
407 cd ${B}/arch/${ARCH}/boot
408 ln -sf ../../../vmlinux
409}
410
411OE_TERMINAL_EXPORTS += "GUILT_BASE KBUILD_OUTPUT"
412GUILT_BASE = "meta"
413KBUILD_OUTPUT = "${B}"
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
new file mode 100644
index 0000000000..f40ea8985d
--- /dev/null
+++ b/meta/classes/kernel.bbclass
@@ -0,0 +1,474 @@
1inherit linux-kernel-base module_strip kernel-module-split
2
3PROVIDES += "virtual/kernel"
4DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native depmodwrapper-cross"
5
6# we include gcc above, we dont need virtual/libc
7INHIBIT_DEFAULT_DEPS = "1"
8
9KERNEL_IMAGETYPE ?= "zImage"
10INITRAMFS_IMAGE ?= ""
11INITRAMFS_TASK ?= ""
12INITRAMFS_IMAGE_BUNDLE ?= ""
13
14python __anonymous () {
15 kerneltype = d.getVar('KERNEL_IMAGETYPE', True) or ''
16 if kerneltype == 'uImage':
17 depends = d.getVar("DEPENDS", True)
18 depends = "%s u-boot-mkimage-native" % depends
19 d.setVar("DEPENDS", depends)
20
21 image = d.getVar('INITRAMFS_IMAGE', True)
22 if image:
23 d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_rootfs')
24
25 # NOTE: setting INITRAMFS_TASK is for backward compatibility
26 # The preferred method is to set INITRAMFS_IMAGE, because
27 # this INITRAMFS_TASK has circular dependency problems
28 # if the initramfs requires kernel modules
29 image_task = d.getVar('INITRAMFS_TASK', True)
30 if image_task:
31 d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
32}
33
34inherit kernel-arch deploy
35
36PACKAGES_DYNAMIC += "^kernel-module-.*"
37PACKAGES_DYNAMIC += "^kernel-image-.*"
38PACKAGES_DYNAMIC += "^kernel-firmware-.*"
39
40export OS = "${TARGET_OS}"
41export CROSS_COMPILE = "${TARGET_PREFIX}"
42
43KERNEL_PRIORITY ?= "${@int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[0]) * 10000 + \
44 int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[1]) * 100 + \
45 int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[-1])}"
46
47KERNEL_RELEASE ?= "${KERNEL_VERSION}"
48
49# Where built kernel lies in the kernel tree
50KERNEL_OUTPUT ?= "arch/${ARCH}/boot/${KERNEL_IMAGETYPE}"
51KERNEL_IMAGEDEST = "boot"
52
53#
54# configuration
55#
56export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE",1) or "ttyS0"}"
57
58KERNEL_VERSION = "${@get_kernelversion('${B}')}"
59
60KERNEL_LOCALVERSION ?= ""
61
62# kernels are generally machine specific
63PACKAGE_ARCH = "${MACHINE_ARCH}"
64
65# U-Boot support
66UBOOT_ENTRYPOINT ?= "20008000"
67UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
68
69# Some Linux kernel configurations need additional parameters on the command line
70KERNEL_EXTRA_ARGS ?= ""
71
72# For the kernel, we don't want the '-e MAKEFLAGS=' in EXTRA_OEMAKE.
73# We don't want to override kernel Makefile variables from the environment
74EXTRA_OEMAKE = ""
75
76KERNEL_ALT_IMAGETYPE ??= ""
77
78# Define where the kernel headers are installed on the target as well as where
79# they are staged.
80KERNEL_SRC_PATH = "/usr/src/kernel"
81
82KERNEL_IMAGETYPE_FOR_MAKE = "${@(lambda s: s[:-3] if s[-3:] == ".gz" else s)(d.getVar('KERNEL_IMAGETYPE', True))}"
83
84copy_initramfs() {
85 echo "Copying initramfs into ./usr ..."
86 # In case the directory is not created yet from the first pass compile:
87 mkdir -p ${B}/usr
88 # Find and use the first initramfs image archive type we find
89 rm -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
90 for img in cpio.gz cpio.lzo cpio.lzma cpio.xz; do
91 if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img" ]; then
92 cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img ${B}/usr/.
93 case $img in
94 *gz)
95 echo "gzip decompressing image"
96 gunzip -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
97 break
98 ;;
99 *lzo)
100 echo "lzo decompressing image"
101 lzop -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
102 break
103 ;;
104 *lzma)
105 echo "lzma decompressing image"
106 lzmash -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
107 break
108 ;;
109 *xz)
110 echo "xz decompressing image"
111 xz -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
112 break
113 ;;
114 esac
115 fi
116 done
117 echo "Finished copy of initramfs into ./usr"
118}
119
120INITRAMFS_BASE_NAME = "${KERNEL_IMAGETYPE}-initramfs-${PV}-${PR}-${MACHINE}-${DATETIME}"
121INITRAMFS_BASE_NAME[vardepsexclude] = "DATETIME"
122do_bundle_initramfs () {
123 if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
124 echo "Creating a kernel image with a bundled initramfs..."
125 copy_initramfs
126 if [ -e ${KERNEL_OUTPUT} ] ; then
127 mv -f ${KERNEL_OUTPUT} ${KERNEL_OUTPUT}.bak
128 fi
129 use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
130 kernel_do_compile
131 mv -f ${KERNEL_OUTPUT} ${KERNEL_OUTPUT}.initramfs
132 mv -f ${KERNEL_OUTPUT}.bak ${KERNEL_OUTPUT}
133 # Update install area
134 echo "There is kernel image bundled with initramfs: ${B}/${KERNEL_OUTPUT}.initramfs"
135 install -m 0644 ${B}/${KERNEL_OUTPUT}.initramfs ${D}/boot/${KERNEL_IMAGETYPE}-initramfs-${MACHINE}.bin
136 echo "${B}/${KERNEL_OUTPUT}.initramfs"
137 cd ${B}
138 # Update deploy directory
139 if [ -e "${KERNEL_OUTPUT}.initramfs" ]; then
140 echo "Copying deploy kernel-initramfs image and setting up links..."
141 initramfs_base_name=${INITRAMFS_BASE_NAME}
142 initramfs_symlink_name=${KERNEL_IMAGETYPE}-initramfs-${MACHINE}
143 install -m 0644 ${KERNEL_OUTPUT}.initramfs ${DEPLOY_DIR_IMAGE}/${initramfs_base_name}.bin
144 cd ${DEPLOY_DIR_IMAGE}
145 ln -sf ${initramfs_base_name}.bin ${initramfs_symlink_name}.bin
146 fi
147 fi
148}
149do_bundle_initramfs[nostamp] = "1"
150
151python do_devshell_prepend () {
152 os.environ["LDFLAGS"] = ''
153}
154
155addtask bundle_initramfs after do_compile
156
157kernel_do_compile() {
158 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
159 # The $use_alternate_initrd is only set from
160 # do_bundle_initramfs() This variable is specifically for the
161 # case where we are making a second pass at the kernel
162 # compilation and we want to force the kernel build to use a
163 # different initramfs image. The way to do that in the kernel
164 # is to specify:
165 # make ...args... CONFIG_INITRAMFS_SOURCE=some_other_initramfs.cpio
166 if [ "$use_alternate_initrd" = "" ] && [ "${INITRAMFS_TASK}" != "" ] ; then
167 # The old style way of copying an prebuilt image and building it
168 # is turned on via INTIRAMFS_TASK != ""
169 copy_initramfs
170 use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
171 fi
172 oe_runmake ${KERNEL_IMAGETYPE_FOR_MAKE} ${KERNEL_ALT_IMAGETYPE} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
173 if test "${KERNEL_IMAGETYPE_FOR_MAKE}.gz" = "${KERNEL_IMAGETYPE}"; then
174 gzip -9c < "${KERNEL_IMAGETYPE_FOR_MAKE}" > "${KERNEL_OUTPUT}"
175 fi
176}
177
178do_compile_kernelmodules() {
179 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
180 if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
181 oe_runmake ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
182 else
183 bbnote "no modules to compile"
184 fi
185}
186addtask compile_kernelmodules after do_compile before do_strip
187
188kernel_do_install() {
189 #
190 # First install the modules
191 #
192 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
193 if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
194 oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" modules_install
195 rm "${D}/lib/modules/${KERNEL_VERSION}/build"
196 rm "${D}/lib/modules/${KERNEL_VERSION}/source"
197 else
198 bbnote "no modules to install"
199 fi
200
201 #
202 # Install various kernel output (zImage, map file, config, module support files)
203 #
204 install -d ${D}/${KERNEL_IMAGEDEST}
205 install -d ${D}/boot
206 install -m 0644 ${KERNEL_OUTPUT} ${D}/${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}
207 install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
208 install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
209 install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
210 [ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/boot/Module.symvers-${KERNEL_VERSION}
211 install -d ${D}${sysconfdir}/modules-load.d
212 install -d ${D}${sysconfdir}/modprobe.d
213
214 #
215 # Support for external module building - create a minimal copy of the
216 # kernel source tree.
217 #
218 kerneldir=${D}${KERNEL_SRC_PATH}
219 install -d $kerneldir
220
221 #
222 # Store the kernel version in sysroots for module-base.bbclass
223 #
224
225 echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion
226
227 #
228 # Store kernel image name to allow use during image generation
229 #
230
231 echo "${KERNEL_IMAGE_BASE_NAME}" >$kerneldir/kernel-image-name
232
233 #
234 # Copy the entire source tree. In case an external build directory is
235 # used, copy the build directory over first, then copy over the source
236 # dir. This ensures the original Makefiles are used and not the
237 # redirecting Makefiles in the build directory.
238 #
239 # work and sysroots can be on different partitions, so we can't rely on
240 # hardlinking, unfortunately.
241 #
242 find . -depth -not -name "*.cmd" -not -name "*.o" -not -path "./.*" -print0 | cpio --null -pdu $kerneldir
243 cp .config $kerneldir
244 if [ "${S}" != "${B}" ]; then
245 pwd="$PWD"
246 cd "${S}"
247 find . -depth -not -path "./.*" -print0 | cpio --null -pdu $kerneldir
248 cd "$pwd"
249 fi
250 install -m 0644 ${KERNEL_OUTPUT} $kerneldir/${KERNEL_IMAGETYPE}
251 install -m 0644 System.map $kerneldir/System.map-${KERNEL_VERSION}
252
253 #
254 # Clean and remove files not needed for building modules.
255 # Some distributions go through a lot more trouble to strip out
256 # unecessary headers, for now, we just prune the obvious bits.
257 #
258 # We don't want to leave host-arch binaries in /sysroots, so
259 # we clean the scripts dir while leaving the generated config
260 # and include files.
261 #
262 oe_runmake -C $kerneldir CC="${KERNEL_CC}" LD="${KERNEL_LD}" clean
263 make -C $kerneldir _mrproper_scripts
264 find $kerneldir -path $kerneldir/lib -prune -o -path $kerneldir/tools -prune -o -path $kerneldir/scripts -prune -o -name "*.[csS]" -exec rm '{}' \;
265 find $kerneldir/Documentation -name "*.txt" -exec rm '{}' \;
266
267 # As of Linux kernel version 3.0.1, the clean target removes
268 # arch/powerpc/lib/crtsavres.o which is present in
269 # KBUILD_LDFLAGS_MODULE, making it required to build external modules.
270 if [ ${ARCH} = "powerpc" ]; then
271 cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
272 fi
273
274 # Necessary for building modules like compat-wireless.
275 if [ -f include/generated/bounds.h ]; then
276 cp include/generated/bounds.h $kerneldir/include/generated/bounds.h
277 fi
278 if [ -d arch/${ARCH}/include/generated ]; then
279 mkdir -p $kerneldir/arch/${ARCH}/include/generated/
280 cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
281 fi
282
283 # Remove the following binaries which cause strip or arch QA errors
284 # during do_package for cross-compiled platforms
285 bin_files="arch/powerpc/boot/addnote arch/powerpc/boot/hack-coff \
286 arch/powerpc/boot/mktree scripts/kconfig/zconf.tab.o \
287 scripts/kconfig/conf.o scripts/kconfig/kxgettext.o"
288 for entry in $bin_files; do
289 rm -f $kerneldir/$entry
290 done
291
292 # kernels <2.6.30 don't have $kerneldir/tools directory so we check if it exists before calling sed
293 if [ -f $kerneldir/tools/perf/Makefile ]; then
294 # Fix SLANG_INC for slang.h
295 sed -i 's#-I/usr/include/slang#-I=/usr/include/slang#g' $kerneldir/tools/perf/Makefile
296 fi
297}
298do_install[prefuncs] += "package_get_auto_pr"
299
300sysroot_stage_all_append() {
301 sysroot_stage_dir ${D}${KERNEL_SRC_PATH} ${SYSROOT_DESTDIR}${KERNEL_SRC_PATH}
302}
303
304kernel_do_configure() {
305 # fixes extra + in /lib/modules/2.6.37+
306 # $ scripts/setlocalversion . => +
307 # $ make kernelversion => 2.6.37
308 # $ make kernelrelease => 2.6.37+
309 touch ${B}/.scmversion ${S}/.scmversion
310
311 # Copy defconfig to .config if .config does not exist. This allows
312 # recipes to manage the .config themselves in do_configure_prepend().
313 if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
314 cp "${WORKDIR}/defconfig" "${B}/.config"
315 fi
316 yes '' | oe_runmake oldconfig
317}
318
319do_savedefconfig() {
320 oe_runmake savedefconfig
321}
322do_savedefconfig[nostamp] = "1"
323addtask savedefconfig after do_configure
324
325inherit cml1
326
327EXPORT_FUNCTIONS do_compile do_install do_configure
328
329# kernel-base becomes kernel-${KERNEL_VERSION}
330# kernel-image becomes kernel-image-${KERNEL_VERISON}
331PACKAGES = "kernel kernel-base kernel-vmlinux kernel-image kernel-dev kernel-modules"
332FILES_${PN} = ""
333FILES_kernel-base = "/lib/modules/${KERNEL_VERSION}/modules.order /lib/modules/${KERNEL_VERSION}/modules.builtin"
334FILES_kernel-image = "/boot/${KERNEL_IMAGETYPE}*"
335FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH}"
336FILES_kernel-vmlinux = "/boot/vmlinux*"
337FILES_kernel-modules = ""
338RDEPENDS_kernel = "kernel-base"
339# Allow machines to override this dependency if kernel image files are
340# not wanted in images as standard
341RDEPENDS_kernel-base ?= "kernel-image"
342PKG_kernel-image = "kernel-image-${@legitimize_package_name('${KERNEL_VERSION}')}"
343PKG_kernel-base = "kernel-${@legitimize_package_name('${KERNEL_VERSION}')}"
344RPROVIDES_kernel-base += "kernel-${KERNEL_VERSION}"
345ALLOW_EMPTY_kernel = "1"
346ALLOW_EMPTY_kernel-base = "1"
347ALLOW_EMPTY_kernel-image = "1"
348ALLOW_EMPTY_kernel-modules = "1"
349DESCRIPTION_kernel-modules = "Kernel modules meta package"
350
351pkg_postinst_kernel-image () {
352 update-alternatives --install /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE}-${KERNEL_VERSION} ${KERNEL_PRIORITY} || true
353 if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
354 mkdir -p $D/lib/modules/${KERNEL_VERSION}
355 fi
356 if [ -n "$D" ]; then
357 depmodwrapper -a -b $D ${KERNEL_VERSION}
358 else
359 depmod -a ${KERNEL_VERSION}
360 fi
361}
362
363pkg_postrm_kernel-image () {
364 update-alternatives --remove ${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE}-${KERNEL_VERSION} || true
365}
366
367PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
368
369python split_kernel_packages () {
370 do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.bin$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
371 do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.fw$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
372 do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.cis$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
373}
374
375do_strip() {
376 if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then
377 if [[ "${KERNEL_IMAGETYPE}" != "vmlinux" ]]; then
378 bbwarn "image type will not be stripped (not supported): ${KERNEL_IMAGETYPE}"
379 return
380 fi
381
382 cd ${B}
383 headers=`"$CROSS_COMPILE"readelf -S ${KERNEL_OUTPUT} | \
384 grep "^ \{1,\}\[[0-9 ]\{1,\}\] [^ ]" | \
385 sed "s/^ \{1,\}\[[0-9 ]\{1,\}\] //" | \
386 gawk '{print $1}'`
387
388 for str in ${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}; do {
389 if [[ "$headers" != *"$str"* ]]; then
390 bbwarn "Section not found: $str";
391 fi
392
393 "$CROSS_COMPILE"strip -s -R $str ${KERNEL_OUTPUT}
394 }; done
395
396 bbnote "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections:" \
397 "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}"
398 fi;
399}
400do_strip[dirs] = "${B}"
401
402addtask do_strip before do_sizecheck after do_kernel_link_vmlinux
403
404# Support checking the kernel size since some kernels need to reside in partitions
405# with a fixed length or there is a limit in transferring the kernel to memory
406do_sizecheck() {
407 if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
408 cd ${B}
409 size=`ls -lL ${KERNEL_OUTPUT} | awk '{ print $5}'`
410 if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
411 die "This kernel (size=$size > ${KERNEL_IMAGE_MAXSIZE}) is too big for your device. Please reduce the size of the kernel by making more of it modular."
412 fi
413 fi
414}
415do_sizecheck[dirs] = "${B}"
416
417addtask sizecheck before do_install after do_strip
418
419KERNEL_IMAGE_BASE_NAME ?= "${KERNEL_IMAGETYPE}-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}"
420# Don't include the DATETIME variable in the sstate package signatures
421KERNEL_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
422KERNEL_IMAGE_SYMLINK_NAME ?= "${KERNEL_IMAGETYPE}-${MACHINE}"
423MODULE_IMAGE_BASE_NAME ?= "modules-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}"
424MODULE_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
425MODULE_TARBALL_BASE_NAME ?= "${MODULE_IMAGE_BASE_NAME}.tgz"
426# Don't include the DATETIME variable in the sstate package signatures
427MODULE_TARBALL_SYMLINK_NAME ?= "modules-${MACHINE}.tgz"
428MODULE_TARBALL_DEPLOY ?= "1"
429
430do_uboot_mkimage() {
431 if test "x${KERNEL_IMAGETYPE}" = "xuImage" ; then
432 if test "x${KEEPUIMAGE}" != "xyes" ; then
433 ENTRYPOINT=${UBOOT_ENTRYPOINT}
434 if test -n "${UBOOT_ENTRYSYMBOL}"; then
435 ENTRYPOINT=`${HOST_PREFIX}nm ${S}/vmlinux | \
436 awk '$3=="${UBOOT_ENTRYSYMBOL}" {print $1}'`
437 fi
438 if test -e arch/${ARCH}/boot/compressed/vmlinux ; then
439 ${OBJCOPY} -O binary -R .note -R .comment -S arch/${ARCH}/boot/compressed/vmlinux linux.bin
440 uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C none -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin arch/${ARCH}/boot/uImage
441 rm -f linux.bin
442 else
443 ${OBJCOPY} -O binary -R .note -R .comment -S vmlinux linux.bin
444 rm -f linux.bin.gz
445 gzip -9 linux.bin
446 uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C gzip -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin.gz arch/${ARCH}/boot/uImage
447 rm -f linux.bin.gz
448 fi
449 fi
450 fi
451}
452
453addtask uboot_mkimage before do_install after do_compile
454
455kernel_do_deploy() {
456 install -m 0644 ${KERNEL_OUTPUT} ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin
457 if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
458 mkdir -p ${D}/lib
459 tar -cvzf ${DEPLOYDIR}/${MODULE_TARBALL_BASE_NAME} -C ${D} lib
460 ln -sf ${MODULE_TARBALL_BASE_NAME} ${DEPLOYDIR}/${MODULE_TARBALL_SYMLINK_NAME}
461 fi
462
463 ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${DEPLOYDIR}/${KERNEL_IMAGE_SYMLINK_NAME}.bin
464 ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${DEPLOYDIR}/${KERNEL_IMAGETYPE}
465
466 cp ${COREBASE}/meta/files/deploydir_readme.txt ${DEPLOYDIR}/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt
467}
468do_deploy[dirs] = "${DEPLOYDIR} ${B}"
469do_deploy[prefuncs] += "package_get_auto_pr"
470
471addtask deploy before do_build after do_install
472
473EXPORT_FUNCTIONS do_deploy
474
diff --git a/meta/classes/lib_package.bbclass b/meta/classes/lib_package.bbclass
new file mode 100644
index 0000000000..8849f59042
--- /dev/null
+++ b/meta/classes/lib_package.bbclass
@@ -0,0 +1,7 @@
1#
2# ${PN}-bin is defined in bitbake.conf
3#
4# We need to allow the other packages to be greedy with what they
5# want out of /usr/bin and /usr/sbin before ${PN}-bin gets greedy.
6#
7PACKAGE_BEFORE_PN = "${PN}-bin"
diff --git a/meta/classes/libc-common.bbclass b/meta/classes/libc-common.bbclass
new file mode 100644
index 0000000000..67b018b753
--- /dev/null
+++ b/meta/classes/libc-common.bbclass
@@ -0,0 +1,36 @@
1do_install() {
2 oe_runmake install_root=${D} install
3 for r in ${rpcsvc}; do
4 h=`echo $r|sed -e's,\.x$,.h,'`
5 install -m 0644 ${S}/sunrpc/rpcsvc/$h ${D}/${includedir}/rpcsvc/
6 done
7 install -d ${D}/${sysconfdir}/
8 install -m 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/
9 install -d ${D}${localedir}
10 make -f ${WORKDIR}/generate-supported.mk IN="${S}/localedata/SUPPORTED" OUT="${WORKDIR}/SUPPORTED"
11 # get rid of some broken files...
12 for i in ${GLIBC_BROKEN_LOCALES}; do
13 grep -v $i ${WORKDIR}/SUPPORTED > ${WORKDIR}/SUPPORTED.tmp
14 mv ${WORKDIR}/SUPPORTED.tmp ${WORKDIR}/SUPPORTED
15 done
16 rm -f ${D}${sysconfdir}/rpc
17 rm -rf ${D}${datadir}/zoneinfo
18 rm -rf ${D}${libexecdir}/getconf
19}
20
21def get_libc_fpu_setting(bb, d):
22 if d.getVar('TARGET_FPU', True) in [ 'soft' ]:
23 return "--without-fp"
24 return ""
25
26python populate_packages_prepend () {
27 if d.getVar('DEBIAN_NAMES', True):
28 bpn = d.getVar('BPN', True)
29 d.setVar('PKG_'+bpn, 'libc6')
30 d.setVar('PKG_'+bpn+'-dev', 'libc6-dev')
31 d.setVar('PKG_'+bpn+'-dbg', 'libc6-dbg')
32 # For backward compatibility with old -dbg package
33 d.appendVar('RPROVIDES_' + bpn + '-dbg', ' libc-dbg')
34 d.appendVar('RCONFLICTS_' + bpn + '-dbg', ' libc-dbg')
35 d.appendVar('RREPLACES_' + bpn + '-dbg', ' libc-dbg')
36}
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
new file mode 100644
index 0000000000..40c3138d5b
--- /dev/null
+++ b/meta/classes/libc-package.bbclass
@@ -0,0 +1,389 @@
1#
2# This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains
3# may need packaging and its pointless to duplicate this code.
4#
5# Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of:
6# "compile" - Use QEMU to generate the binary locale files
7# "precompiled" - The binary locale files are pregenerated and already present
8# "ondevice" - The device will build the locale files upon first boot through the postinst
9
10GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
11
12python __anonymous () {
13 enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION", True)
14
15 pn = d.getVar("PN", True)
16 if pn.endswith("-initial"):
17 enabled = False
18
19 if enabled and int(enabled):
20 import re
21
22 target_arch = d.getVar("TARGET_ARCH", True)
23 binary_arches = d.getVar("BINARY_LOCALE_ARCHES", True) or ""
24 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or ""
25
26 for regexp in binary_arches.split(" "):
27 r = re.compile(regexp)
28
29 if r.match(target_arch):
30 depends = d.getVar("DEPENDS", True)
31 if use_cross_localedef == "1" :
32 depends = "%s cross-localedef-native" % depends
33 else:
34 depends = "%s qemu-native" % depends
35 d.setVar("DEPENDS", depends)
36 d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
37 break
38
39 distro_features = (d.getVar('DISTRO_FEATURES', True) or '').split()
40
41 # try to fix disable charsets/locales/locale-code compile fail
42 if 'libc-charsets' in distro_features and 'libc-locales' in distro_features and 'libc-locale-code' in distro_features:
43 d.setVar('PACKAGE_NO_GCONV', '0')
44 else:
45 d.setVar('PACKAGE_NO_GCONV', '1')
46}
47
48OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
49
50do_configure_prepend() {
51 if [ -e ${S}/elf/ldd.bash.in ]; then
52 sed -e "s#@BASH@#/bin/sh#" -i ${S}/elf/ldd.bash.in
53 fi
54}
55
56
57
58# indentation removed on purpose
59locale_base_postinst() {
60#!/bin/sh
61
62if [ "x$D" != "x" ]; then
63 exit 1
64fi
65
66rm -rf ${TMP_LOCALE}
67mkdir -p ${TMP_LOCALE}
68if [ -f ${localedir}/locale-archive ]; then
69 cp ${localedir}/locale-archive ${TMP_LOCALE}/
70fi
71localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s --prefix=/tmp/locale %s
72mkdir -p ${localedir}/
73mv ${TMP_LOCALE}/locale-archive ${localedir}/
74rm -rf ${TMP_LOCALE}
75}
76
77# indentation removed on purpose
78locale_base_postrm() {
79#!/bin/sh
80
81rm -rf ${TMP_LOCALE}
82mkdir -p ${TMP_LOCALE}
83if [ -f ${localedir}/locale-archive ]; then
84 cp ${localedir}/locale-archive ${TMP_LOCALE}/
85fi
86localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s --prefix=/tmp/locale %s
87mv ${TMP_LOCALE}/locale-archive ${localedir}/
88rm -rf ${TMP_LOCALE}
89}
90
91
92TMP_LOCALE="/tmp/locale${localedir}"
93LOCALETREESRC ?= "${PKGD}"
94
95do_prep_locale_tree() {
96 treedir=${WORKDIR}/locale-tree
97 rm -rf $treedir
98 mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${localedir}
99 tar -cf - -C ${LOCALETREESRC}${datadir} -p i18n | tar -xf - -C $treedir/${datadir}
100 # unzip to avoid parsing errors
101 for i in $treedir/${datadir}/i18n/charmaps/*gz; do
102 gunzip $i
103 done
104 tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir}
105 if [ -f ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.* ]; then
106 tar -cf - -C ${STAGING_DIR_NATIVE}/${prefix_native}/${base_libdir} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
107 fi
108 install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
109}
110
111do_collect_bins_from_locale_tree() {
112 treedir=${WORKDIR}/locale-tree
113
114 parent=$(dirname ${localedir})
115 mkdir -p ${PKGD}/$parent
116 tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent
117}
118
119inherit qemu
120
121python package_do_split_gconvs () {
122 import re
123 if (d.getVar('PACKAGE_NO_GCONV', True) == '1'):
124 bb.note("package requested not splitting gconvs")
125 return
126
127 if not d.getVar('PACKAGES', True):
128 return
129
130 mlprefix = d.getVar("MLPREFIX", True) or ""
131
132 bpn = d.getVar('BPN', True)
133 libdir = d.getVar('libdir', True)
134 if not libdir:
135 bb.error("libdir not defined")
136 return
137 datadir = d.getVar('datadir', True)
138 if not datadir:
139 bb.error("datadir not defined")
140 return
141
142 gconv_libdir = base_path_join(libdir, "gconv")
143 charmap_dir = base_path_join(datadir, "i18n", "charmaps")
144 locales_dir = base_path_join(datadir, "i18n", "locales")
145 binary_locales_dir = d.getVar('localedir', True)
146
147 def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
148 deps = []
149 f = open(fn, "rb")
150 c_re = re.compile('^copy "(.*)"')
151 i_re = re.compile('^include "(\w+)".*')
152 for l in f.readlines():
153 m = c_re.match(l) or i_re.match(l)
154 if m:
155 dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1)))
156 if not dp in deps:
157 deps.append(dp)
158 f.close()
159 if deps != []:
160 d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
161 if bpn != 'glibc':
162 d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
163
164 do_split_packages(d, gconv_libdir, file_regex='^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
165 description='gconv module for character set %s', hook=calc_gconv_deps, \
166 extra_depends=bpn+'-gconv')
167
168 def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
169 deps = []
170 f = open(fn, "rb")
171 c_re = re.compile('^copy "(.*)"')
172 i_re = re.compile('^include "(\w+)".*')
173 for l in f.readlines():
174 m = c_re.match(l) or i_re.match(l)
175 if m:
176 dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1)))
177 if not dp in deps:
178 deps.append(dp)
179 f.close()
180 if deps != []:
181 d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
182 if bpn != 'glibc':
183 d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
184
185 do_split_packages(d, charmap_dir, file_regex='^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
186 description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
187
188 def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
189 deps = []
190 f = open(fn, "rb")
191 c_re = re.compile('^copy "(.*)"')
192 i_re = re.compile('^include "(\w+)".*')
193 for l in f.readlines():
194 m = c_re.match(l) or i_re.match(l)
195 if m:
196 dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1))
197 if not dp in deps:
198 deps.append(dp)
199 f.close()
200 if deps != []:
201 d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
202 if bpn != 'glibc':
203 d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
204
205 do_split_packages(d, locales_dir, file_regex='(.*)', output_pattern=bpn+'-localedata-%s', \
206 description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
207 d.setVar('PACKAGES', d.getVar('PACKAGES') + ' ' + d.getVar('MLPREFIX') + bpn + '-gconv')
208
209 use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", True)
210
211 dot_re = re.compile("(.*)\.(.*)")
212
213 # Read in supported locales and associated encodings
214 supported = {}
215 with open(base_path_join(d.getVar('WORKDIR', True), "SUPPORTED")) as f:
216 for line in f.readlines():
217 try:
218 locale, charset = line.rstrip().split()
219 except ValueError:
220 continue
221 supported[locale] = charset
222
223 # GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
224 to_generate = d.getVar('GLIBC_GENERATE_LOCALES', True)
225 if not to_generate or to_generate == 'all':
226 to_generate = supported.keys()
227 else:
228 to_generate = to_generate.split()
229 for locale in to_generate:
230 if locale not in supported:
231 if '.' in locale:
232 charset = locale.split('.')[1]
233 else:
234 charset = 'UTF-8'
235 bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset))
236 supported[locale] = charset
237
238 def output_locale_source(name, pkgname, locale, encoding):
239 d.setVar('RDEPENDS_%s' % pkgname, 'localedef %s-localedata-%s %s-charmap-%s' % \
240 (mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
241 d.setVar('pkg_postinst_%s' % pkgname, d.getVar('locale_base_postinst', True) \
242 % (locale, encoding, locale))
243 d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm', True) % \
244 (locale, encoding, locale))
245
246 def output_locale_binary_rdepends(name, pkgname, locale, encoding):
247 m = re.match("(.*)\.(.*)", name)
248 if m:
249 libc_name = "%s.%s" % (m.group(1), m.group(2).lower())
250 else:
251 libc_name = name
252 d.setVar('RDEPENDS_%s' % pkgname, legitimize_package_name('%s-binary-localedata-%s' \
253 % (mlprefix+bpn, libc_name)))
254
255 commands = {}
256
257 def output_locale_binary(name, pkgname, locale, encoding):
258 treedir = base_path_join(d.getVar("WORKDIR", True), "locale-tree")
259 ldlibdir = base_path_join(treedir, d.getVar("base_libdir", True))
260 path = d.getVar("PATH", True)
261 i18npath = base_path_join(treedir, datadir, "i18n")
262 gconvpath = base_path_join(treedir, "iconvdata")
263 outputpath = base_path_join(treedir, binary_locales_dir)
264
265 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or "0"
266 if use_cross_localedef == "1":
267 target_arch = d.getVar('TARGET_ARCH', True)
268 locale_arch_options = { \
269 "arm": " --uint32-align=4 --little-endian ", \
270 "armeb": " --uint32-align=4 --big-endian ", \
271 "sh4": " --uint32-align=4 --big-endian ", \
272 "powerpc": " --uint32-align=4 --big-endian ", \
273 "powerpc64": " --uint32-align=4 --big-endian ", \
274 "mips": " --uint32-align=4 --big-endian ", \
275 "mips64": " --uint32-align=4 --big-endian ", \
276 "mipsel": " --uint32-align=4 --little-endian ", \
277 "mips64el":" --uint32-align=4 --little-endian ", \
278 "i586": " --uint32-align=4 --little-endian ", \
279 "i686": " --uint32-align=4 --little-endian ", \
280 "x86_64": " --uint32-align=4 --little-endian " }
281
282 if target_arch in locale_arch_options:
283 localedef_opts = locale_arch_options[target_arch]
284 else:
285 bb.error("locale_arch_options not found for target_arch=" + target_arch)
286 raise bb.build.FuncFailed("unknown arch:" + target_arch + " for locale_arch_options")
287
288 localedef_opts += " --force --old-style --no-archive --prefix=%s \
289 --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
290 % (treedir, treedir, datadir, locale, encoding, outputpath, name)
291
292 cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \
293 (path, i18npath, gconvpath, localedef_opts)
294 else: # earlier slower qemu way
295 qemu = qemu_target_binary(d)
296 localedef_opts = "--force --old-style --no-archive --prefix=%s \
297 --inputfile=%s/i18n/locales/%s --charmap=%s %s" \
298 % (treedir, datadir, locale, encoding, name)
299
300 qemu_options = d.getVar("QEMU_OPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True)
301 if not qemu_options:
302 qemu_options = d.getVar('QEMU_OPTIONS', True)
303
304 cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
305 -E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \
306 (path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
307
308 commands["%s/%s" % (outputpath, name)] = cmd
309
310 bb.note("generating locale %s (%s)" % (locale, encoding))
311
312 def output_locale(name, locale, encoding):
313 pkgname = d.getVar('MLPREFIX') + 'locale-base-' + legitimize_package_name(name)
314 d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
315 d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES', True)))
316 rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
317 m = re.match("(.*)_(.*)", name)
318 if m:
319 rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
320 d.setVar('RPROVIDES_%s' % pkgname, rprovides)
321
322 if use_bin == "compile":
323 output_locale_binary_rdepends(name, pkgname, locale, encoding)
324 output_locale_binary(name, pkgname, locale, encoding)
325 elif use_bin == "precompiled":
326 output_locale_binary_rdepends(name, pkgname, locale, encoding)
327 else:
328 output_locale_source(name, pkgname, locale, encoding)
329
330 if use_bin == "compile":
331 bb.note("preparing tree for binary locale generation")
332 bb.build.exec_func("do_prep_locale_tree", d)
333
334 utf8_only = int(d.getVar('LOCALE_UTF8_ONLY', True) or 0)
335 encodings = {}
336 for locale in to_generate:
337 charset = supported[locale]
338 if utf8_only and charset != 'UTF-8':
339 continue
340
341 m = dot_re.match(locale)
342 if m:
343 base = m.group(1)
344 else:
345 base = locale
346
347 # Precompiled locales are kept as is, obeying SUPPORTED, while
348 # others are adjusted, ensuring that the non-suffixed locales
349 # are utf-8, while the suffixed are not.
350 if use_bin == "precompiled":
351 output_locale(locale, base, charset)
352 else:
353 if charset == 'UTF-8':
354 output_locale(base, base, charset)
355 else:
356 output_locale('%s.%s' % (base, charset), base, charset)
357
358 if use_bin == "compile":
359 makefile = base_path_join(d.getVar("WORKDIR", True), "locale-tree", "Makefile")
360 m = open(makefile, "w")
361 m.write("all: %s\n\n" % " ".join(commands.keys()))
362 for cmd in commands:
363 m.write(cmd + ":\n")
364 m.write("\t" + commands[cmd] + "\n\n")
365 m.close()
366 d.setVar("B", os.path.dirname(makefile))
367 d.setVar("EXTRA_OEMAKE", "${PARALLEL_MAKE}")
368 bb.note("Executing binary locale generation makefile")
369 bb.build.exec_func("oe_runmake", d)
370 bb.note("collecting binary locales from locale tree")
371 bb.build.exec_func("do_collect_bins_from_locale_tree", d)
372 do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
373 output_pattern=bpn+'-binary-localedata-%s', \
374 description='binary locale definition for %s', extra_depends='', allow_dirs=True)
375 elif use_bin == "precompiled":
376 do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
377 output_pattern=bpn+'-binary-localedata-%s', \
378 description='binary locale definition for %s', extra_depends='', allow_dirs=True)
379 else:
380 bb.note("generation of binary locales disabled. this may break i18n!")
381
382}
383
384# We want to do this indirection so that we can safely 'return'
385# from the called function even though we're prepending
386python populate_packages_prepend () {
387 bb.build.exec_func('package_do_split_gconvs', d)
388}
389
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
new file mode 100644
index 0000000000..6abdae4e84
--- /dev/null
+++ b/meta/classes/license.bbclass
@@ -0,0 +1,359 @@
1# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
2# LIC_FILES_CHKSUM.
3# TODO:
4# - There is a real issue revolving around license naming standards.
5
6LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
7LICSSTATEDIR = "${WORKDIR}/license-destdir/"
8
9# Create extra package with license texts and add it to RRECOMMENDS_${PN}
10LICENSE_CREATE_PACKAGE[type] = "boolean"
11LICENSE_CREATE_PACKAGE ??= "0"
12LICENSE_PACKAGE_SUFFIX ??= "-lic"
13LICENSE_FILES_DIRECTORY ??= "${datadir}/licenses/"
14
15addtask populate_lic after do_patch before do_build
16do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
17do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
18
19license_create_manifest() {
20 mkdir -p ${LICENSE_DIRECTORY}/${IMAGE_NAME}
21 # Get list of installed packages
22 list_installed_packages |sort > ${LICENSE_DIRECTORY}/${IMAGE_NAME}/package.manifest
23 INSTALLED_PKGS=`cat ${LICENSE_DIRECTORY}/${IMAGE_NAME}/package.manifest`
24 LICENSE_MANIFEST="${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest"
25 # remove existing license.manifest file
26 if [ -f ${LICENSE_MANIFEST} ]; then
27 rm ${LICENSE_MANIFEST}
28 fi
29 touch ${LICENSE_MANIFEST}
30 for pkg in ${INSTALLED_PKGS}; do
31 filename=`ls ${PKGDATA_DIR}/runtime-reverse/${pkg}| head -1`
32 pkged_pn="$(sed -n 's/^PN: //p' ${filename})"
33
34 # check to see if the package name exists in the manifest. if so, bail.
35 if grep -q "^PACKAGE NAME: ${pkg}" ${LICENSE_MANIFEST}; then
36 continue
37 fi
38
39 pkged_pv="$(sed -n 's/^PV: //p' ${filename})"
40 pkged_name="$(basename $(readlink ${filename}))"
41 pkged_lic="$(sed -n "/^LICENSE_${pkged_name}: /{ s/^LICENSE_${pkged_name}: //; s/[|&()*]/ /g; s/ */ /g; p }" ${filename})"
42 if [ -z ${pkged_lic} ]; then
43 # fallback checking value of LICENSE
44 pkged_lic="$(sed -n "/^LICENSE: /{ s/^LICENSE: //; s/[|&()*]/ /g; s/ */ /g; p }" ${filename})"
45 fi
46
47 echo "PACKAGE NAME:" ${pkg} >> ${LICENSE_MANIFEST}
48 echo "PACKAGE VERSION:" ${pkged_pv} >> ${LICENSE_MANIFEST}
49 echo "RECIPE NAME:" ${pkged_pn} >> ${LICENSE_MANIFEST}
50 printf "LICENSE:" >> ${LICENSE_MANIFEST}
51 for lic in ${pkged_lic}; do
52 # to reference a license file trim trailing + symbol
53 if ! [ -e "${LICENSE_DIRECTORY}/${pkged_pn}/generic_${lic%+}" ]; then
54 bbwarn "The license listed ${lic} was not in the licenses collected for ${pkged_pn}"
55 fi
56 printf " ${lic}" >> ${LICENSE_MANIFEST}
57 done
58 printf "\n\n" >> ${LICENSE_MANIFEST}
59 done
60
61 # Two options here:
62 # - Just copy the manifest
63 # - Copy the manifest and the license directories
64 # With both options set we see a .5 M increase in core-image-minimal
65 if [ -n "${COPY_LIC_MANIFEST}" ]; then
66 mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/
67 cp ${LICENSE_MANIFEST} ${IMAGE_ROOTFS}/usr/share/common-licenses/license.manifest
68 if [ -n "${COPY_LIC_DIRS}" ]; then
69 for pkg in ${INSTALLED_PKGS}; do
70 mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}
71 for lic in `ls ${LICENSE_DIRECTORY}/${pkg}`; do
72 # Really don't need to copy the generics as they're
73 # represented in the manifest and in the actual pkg licenses
74 # Doing so would make your image quite a bit larger
75 if [[ "${lic}" != "generic_"* ]]; then
76 cp ${LICENSE_DIRECTORY}/${pkg}/${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}/${lic}
77 elif [[ "${lic}" == "generic_"* ]]; then
78 if [ ! -f ${IMAGE_ROOTFS}/usr/share/common-licenses/${lic} ]; then
79 cp ${LICENSE_DIRECTORY}/${pkg}/${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/
80 fi
81 ln -s ../${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}/${lic}
82 fi
83 done
84 done
85 fi
86 fi
87
88}
89
90python do_populate_lic() {
91 """
92 Populate LICENSE_DIRECTORY with licenses.
93 """
94 lic_files_paths = find_license_files(d)
95
96 # The base directory we wrangle licenses to
97 destdir = os.path.join(d.getVar('LICSSTATEDIR', True), d.getVar('PN', True))
98 copy_license_files(lic_files_paths, destdir)
99}
100
101# it would be better to copy them in do_install_append, but find_license_filesa is python
102python perform_packagecopy_prepend () {
103 enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
104 if d.getVar('CLASSOVERRIDE', True) == 'class-target' and enabled:
105 lic_files_paths = find_license_files(d)
106
107 # LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
108 destdir = d.getVar('D', True) + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY', True), d.getVar('PN', True))
109 copy_license_files(lic_files_paths, destdir)
110 add_package_and_files(d)
111}
112
113def add_package_and_files(d):
114 packages = d.getVar('PACKAGES', True)
115 files = d.getVar('LICENSE_FILES_DIRECTORY', True)
116 pn = d.getVar('PN', True)
117 pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX'))
118 if pn_lic in packages:
119 bb.warn("%s package already existed in %s." % (pn_lic, pn))
120 else:
121 # first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
122 d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
123 d.setVar('FILES_' + pn_lic, files)
124 rrecommends_pn = d.getVar('RRECOMMENDS_' + pn, True)
125 if rrecommends_pn:
126 d.setVar('RRECOMMENDS_' + pn, "%s %s" % (pn_lic, rrecommends_pn))
127 else:
128 d.setVar('RRECOMMENDS_' + pn, "%s" % (pn_lic))
129
130def copy_license_files(lic_files_paths, destdir):
131 import shutil
132
133 bb.utils.mkdirhier(destdir)
134 for (basename, path) in lic_files_paths:
135 try:
136 ret = shutil.copyfile(path, os.path.join(destdir, basename))
137 except Exception as e:
138 bb.warn("Could not copy license file %s: %s" % (basename, e))
139
140def find_license_files(d):
141 """
142 Creates list of files used in LIC_FILES_CHKSUM and generic LICENSE files.
143 """
144 import shutil
145 import oe.license
146
147 pn = d.getVar('PN', True)
148 for package in d.getVar('PACKAGES', True):
149 if d.getVar('LICENSE_' + package, True):
150 license_types = license_types + ' & ' + \
151 d.getVar('LICENSE_' + package, True)
152
153 #If we get here with no license types, then that means we have a recipe
154 #level license. If so, we grab only those.
155 try:
156 license_types
157 except NameError:
158 # All the license types at the recipe level
159 license_types = d.getVar('LICENSE', True)
160
161 # All the license files for the package
162 lic_files = d.getVar('LIC_FILES_CHKSUM', True)
163 pn = d.getVar('PN', True)
164 # The license files are located in S/LIC_FILE_CHECKSUM.
165 srcdir = d.getVar('S', True)
166 # Directory we store the generic licenses as set in the distro configuration
167 generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
168 # List of basename, path tuples
169 lic_files_paths = []
170 license_source_dirs = []
171 license_source_dirs.append(generic_directory)
172 try:
173 additional_lic_dirs = d.getVar('LICENSE_PATH', True).split()
174 for lic_dir in additional_lic_dirs:
175 license_source_dirs.append(lic_dir)
176 except:
177 pass
178
179 class FindVisitor(oe.license.LicenseVisitor):
180 def visit_Str(self, node):
181 #
182 # Until I figure out what to do with
183 # the two modifiers I support (or greater = +
184 # and "with exceptions" being *
185 # we'll just strip out the modifier and put
186 # the base license.
187 find_license(node.s.replace("+", "").replace("*", ""))
188 self.generic_visit(node)
189
190 def find_license(license_type):
191 try:
192 bb.utils.mkdirhier(gen_lic_dest)
193 except:
194 pass
195 spdx_generic = None
196 license_source = None
197 # If the generic does not exist we need to check to see if there is an SPDX mapping to it
198 for lic_dir in license_source_dirs:
199 if not os.path.isfile(os.path.join(lic_dir, license_type)):
200 if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
201 # Great, there is an SPDXLICENSEMAP. We can copy!
202 bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
203 spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
204 license_source = lic_dir
205 break
206 elif os.path.isfile(os.path.join(lic_dir, license_type)):
207 spdx_generic = license_type
208 license_source = lic_dir
209 break
210
211 if spdx_generic and license_source:
212 # we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
213 # audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
214
215 lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic)))
216 else:
217 # And here is where we warn people that their licenses are lousy
218 bb.warn("%s: No generic license file exists for: %s in any provider" % (pn, license_type))
219 pass
220
221 if not generic_directory:
222 raise bb.build.FuncFailed("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
223
224 if not lic_files:
225 # No recipe should have an invalid license file. This is checked else
226 # where, but let's be pedantic
227 bb.note(pn + ": Recipe file does not have license file information.")
228 return lic_files_paths
229
230 for url in lic_files.split():
231 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
232 # We want the license filename and path
233 srclicfile = os.path.join(srcdir, path)
234 lic_files_paths.append((os.path.basename(path), srclicfile))
235
236 v = FindVisitor()
237 try:
238 v.visit_string(license_types)
239 except oe.license.InvalidLicense as exc:
240 bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
241 except SyntaxError:
242 bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF', True)))
243
244 return lic_files_paths
245
246def return_spdx(d, license):
247 """
248 This function returns the spdx mapping of a license if it exists.
249 """
250 return d.getVarFlag('SPDXLICENSEMAP', license, True)
251
252def incompatible_license(d, dont_want_licenses, package=None):
253 """
254 This function checks if a recipe has only incompatible licenses. It also take into consideration 'or'
255 operand.
256 """
257 import re
258 import oe.license
259 from fnmatch import fnmatchcase as fnmatch
260 license = d.getVar("LICENSE_%s" % package, True) if package else None
261 if not license:
262 license = d.getVar('LICENSE', True)
263
264 def license_ok(license):
265 for dwl in dont_want_licenses:
266 # If you want to exclude license named generically 'X', we
267 # surely want to exclude 'X+' as well. In consequence, we
268 # will exclude a trailing '+' character from LICENSE in
269 # case INCOMPATIBLE_LICENSE is not a 'X+' license.
270 lic = license
271 if not re.search('\+$', dwl):
272 lic = re.sub('\+', '', license)
273 if fnmatch(lic, dwl):
274 return False
275 return True
276
277 # Handles an "or" or two license sets provided by
278 # flattened_licenses(), pick one that works if possible.
279 def choose_lic_set(a, b):
280 return a if all(license_ok(lic) for lic in a) else b
281
282 try:
283 licenses = oe.license.flattened_licenses(license, choose_lic_set)
284 except oe.license.LicenseError as exc:
285 bb.fatal('%s: %s' % (d.getVar('P', True), exc))
286 return any(not license_ok(l) for l in licenses)
287
288def check_license_flags(d):
289 """
290 This function checks if a recipe has any LICENSE_FLAGs that
291 aren't whitelisted.
292
293 If it does, it returns the first LICENSE_FLAG missing from the
294 whitelist, or all the LICENSE_FLAGs if there is no whitelist.
295
296 If everything is is properly whitelisted, it returns None.
297 """
298
299 def license_flag_matches(flag, whitelist, pn):
300 """
301 Return True if flag matches something in whitelist, None if not.
302
303 Before we test a flag against the whitelist, we append _${PN}
304 to it. We then try to match that string against the
305 whitelist. This covers the normal case, where we expect
306 LICENSE_FLAGS to be a simple string like 'commercial', which
307 the user typically matches exactly in the whitelist by
308 explicitly appending the package name e.g 'commercial_foo'.
309 If we fail the match however, we then split the flag across
310 '_' and append each fragment and test until we either match or
311 run out of fragments.
312 """
313 flag_pn = ("%s_%s" % (flag, pn))
314 for candidate in whitelist:
315 if flag_pn == candidate:
316 return True
317
318 flag_cur = ""
319 flagments = flag_pn.split("_")
320 flagments.pop() # we've already tested the full string
321 for flagment in flagments:
322 if flag_cur:
323 flag_cur += "_"
324 flag_cur += flagment
325 for candidate in whitelist:
326 if flag_cur == candidate:
327 return True
328 return False
329
330 def all_license_flags_match(license_flags, whitelist):
331 """ Return first unmatched flag, None if all flags match """
332 pn = d.getVar('PN', True)
333 split_whitelist = whitelist.split()
334 for flag in license_flags.split():
335 if not license_flag_matches(flag, split_whitelist, pn):
336 return flag
337 return None
338
339 license_flags = d.getVar('LICENSE_FLAGS', True)
340 if license_flags:
341 whitelist = d.getVar('LICENSE_FLAGS_WHITELIST', True)
342 if not whitelist:
343 return license_flags
344 unmatched_flag = all_license_flags_match(license_flags, whitelist)
345 if unmatched_flag:
346 return unmatched_flag
347 return None
348
349SSTATETASKS += "do_populate_lic"
350do_populate_lic[sstate-name] = "populate-lic"
351do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
352do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
353
354ROOTFS_POSTPROCESS_COMMAND_prepend = "license_create_manifest; "
355
356python do_populate_lic_setscene () {
357 sstate_setscene(d)
358}
359addtask do_populate_lic_setscene
diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass
new file mode 100644
index 0000000000..4f2b0a4a98
--- /dev/null
+++ b/meta/classes/linux-kernel-base.bbclass
@@ -0,0 +1,32 @@
1# parse kernel ABI version out of <linux/version.h>
2def get_kernelversion(p):
3 import re
4
5 fn = p + '/include/linux/utsrelease.h'
6 if not os.path.isfile(fn):
7 # after 2.6.33-rc1
8 fn = p + '/include/generated/utsrelease.h'
9 if not os.path.isfile(fn):
10 fn = p + '/include/linux/version.h'
11
12 import re
13 try:
14 f = open(fn, 'r')
15 except IOError:
16 return None
17
18 l = f.readlines()
19 f.close()
20 r = re.compile("#define UTS_RELEASE \"(.*)\"")
21 for s in l:
22 m = r.match(s)
23 if m:
24 return m.group(1)
25 return None
26
27def linux_module_packages(s, d):
28 suffix = ""
29 return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
30
31# that's all
32
diff --git a/meta/classes/logging.bbclass b/meta/classes/logging.bbclass
new file mode 100644
index 0000000000..78d65bda3a
--- /dev/null
+++ b/meta/classes/logging.bbclass
@@ -0,0 +1,72 @@
1# The following logging mechanisms are to be used in bash functions of recipes.
2# They are intended to map one to one in intention and output format with the
3# python recipe logging functions of a similar naming convention: bb.plain(),
4# bb.note(), etc.
5#
6# For the time being, all of these print only to the task logs. Future
7# enhancements may integrate these calls with the bitbake logging
8# infrastructure, allowing for printing to the console as appropriate. The
9# interface and intention statements reflect that future goal. Once it is
10# in place, no changes will be necessary to recipes using these logging
11# mechanisms.
12
13# Print the output exactly as it is passed in. Typically used for output of
14# tasks that should be seen on the console. Use sparingly.
15# Output: logs console
16# NOTE: console output is not currently implemented.
17bbplain() {
18 echo "$*"
19}
20
21# Notify the user of a noteworthy condition.
22# Output: logs console
23# NOTE: console output is not currently implemented.
24bbnote() {
25 echo "NOTE: $*"
26}
27
28# Print a warning to the log. Warnings are non-fatal, and do not
29# indicate a build failure.
30# Output: logs
31bbwarn() {
32 echo "WARNING: $*"
33}
34
35# Print an error to the log. Errors are non-fatal in that the build can
36# continue, but they do indicate a build failure.
37# Output: logs
38bberror() {
39 echo "ERROR: $*"
40}
41
42# Print a fatal error to the log. Fatal errors indicate build failure
43# and halt the build, exiting with an error code.
44# Output: logs
45bbfatal() {
46 echo "ERROR: $*"
47 exit 1
48}
49
50# Print debug messages. These are appropriate for progress checkpoint
51# messages to the logs. Depending on the debug log level, they may also
52# go to the console.
53# Output: logs console
54# Usage: bbdebug 1 "first level debug message"
55# bbdebug 2 "second level debug message"
56# NOTE: console output is not currently implemented.
57bbdebug() {
58 USAGE='Usage: bbdebug [123] "message"'
59 if [ $# -lt 2 ]; then
60 bbfatal "$USAGE"
61 fi
62
63 # Strip off the debug level and ensure it is an integer
64 DBGLVL=$1; shift
65 if ! [[ "$DBGLVL" =~ ^[0-9]+ ]]; then
66 bbfatal "$USAGE"
67 fi
68
69 # All debug output is printed to the logs
70 echo "DEBUG: $*"
71}
72
diff --git a/meta/classes/meta.bbclass b/meta/classes/meta.bbclass
new file mode 100644
index 0000000000..d35c40bccd
--- /dev/null
+++ b/meta/classes/meta.bbclass
@@ -0,0 +1,4 @@
1
2PACKAGES = ""
3
4do_build[recrdeptask] = "do_build" \ No newline at end of file
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
new file mode 100644
index 0000000000..8d3988ace8
--- /dev/null
+++ b/meta/classes/metadata_scm.bbclass
@@ -0,0 +1,80 @@
1METADATA_BRANCH ?= "${@base_detect_branch(d)}"
2METADATA_REVISION ?= "${@base_detect_revision(d)}"
3
4def base_detect_revision(d):
5 path = base_get_scmbasepath(d)
6
7 scms = [base_get_metadata_git_revision, \
8 base_get_metadata_svn_revision]
9
10 for scm in scms:
11 rev = scm(path, d)
12 if rev != "<unknown>":
13 return rev
14
15 return "<unknown>"
16
17def base_detect_branch(d):
18 path = base_get_scmbasepath(d)
19
20 scms = [base_get_metadata_git_branch]
21
22 for scm in scms:
23 rev = scm(path, d)
24 if rev != "<unknown>":
25 return rev.strip()
26
27 return "<unknown>"
28
29def base_get_scmbasepath(d):
30 return d.getVar( 'COREBASE', True)
31
32def base_get_metadata_monotone_branch(path, d):
33 monotone_branch = "<unknown>"
34 try:
35 with open("%s/_MTN/options" % path) as f:
36 monotone_branch = f.read().strip()
37 if monotone_branch.startswith( "database" ):
38 monotone_branch_words = monotone_branch.split()
39 monotone_branch = monotone_branch_words[ monotone_branch_words.index( "branch" )+1][1:-1]
40 except:
41 pass
42 return monotone_branch
43
44def base_get_metadata_monotone_revision(path, d):
45 monotone_revision = "<unknown>"
46 try:
47 with open("%s/_MTN/revision" % path) as f:
48 monotone_revision = f.read().strip()
49 if monotone_revision.startswith( "format_version" ):
50 monotone_revision_words = monotone_revision.split()
51 monotone_revision = monotone_revision_words[ monotone_revision_words.index( "old_revision" )+1][1:-1]
52 except IOError:
53 pass
54 return monotone_revision
55
56def base_get_metadata_svn_revision(path, d):
57 revision = "<unknown>"
58 try:
59 with open("%s/.svn/entries" % path) as f:
60 revision = f.readlines()[3].strip()
61 except IOError:
62 pass
63 return revision
64
65def base_get_metadata_git_branch(path, d):
66 branch = os.popen('cd %s; git branch 2>&1 | grep "^* " | tr -d "* "' % path).read()
67
68 if len(branch) != 0:
69 return branch
70 return "<unknown>"
71
72def base_get_metadata_git_revision(path, d):
73 f = os.popen("cd %s; git log -n 1 --pretty=oneline -- 2>&1" % path)
74 data = f.read()
75 if f.close() is None:
76 rev = data.split(" ")[0]
77 if len(rev) != 0:
78 return rev
79 return "<unknown>"
80
diff --git a/meta/classes/migrate_localcount.bbclass b/meta/classes/migrate_localcount.bbclass
new file mode 100644
index 0000000000..aa0df8bb76
--- /dev/null
+++ b/meta/classes/migrate_localcount.bbclass
@@ -0,0 +1,46 @@
1PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
2LOCALCOUNT_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv-localcount-exports.inc"
3
4python migrate_localcount_handler () {
5 import bb.event
6 if not e.data:
7 return
8
9 pv = e.data.getVar('PV', True)
10 if not 'AUTOINC' in pv:
11 return
12
13 localcounts = bb.persist_data.persist('BB_URI_LOCALCOUNT', e.data)
14 pn = e.data.getVar('PN', True)
15 revs = localcounts.get_by_pattern('%%-%s_rev' % pn)
16 counts = localcounts.get_by_pattern('%%-%s_count' % pn)
17 if not revs or not counts:
18 return
19
20 if len(revs) != len(counts):
21 bb.warn("The number of revs and localcounts don't match in %s" % pn)
22 return
23
24 version = e.data.getVar('PRAUTOINX', True)
25 srcrev = bb.fetch2.get_srcrev(e.data)
26 base_ver = 'AUTOINC-%s' % version[:version.find(srcrev)]
27 pkgarch = e.data.getVar('PACKAGE_ARCH', True)
28 value = max(int(count) for count in counts)
29
30 if len(revs) == 1:
31 if srcrev != ('AUTOINC+%s' % revs[0]):
32 value += 1
33 else:
34 value += 1
35
36 bb.utils.mkdirhier(e.data.getVar('PRSERV_DUMPDIR', True))
37 df = e.data.getVar('LOCALCOUNT_DUMPFILE', True)
38 flock = bb.utils.lockfile("%s.lock" % df)
39 with open(df, 'a') as fd:
40 fd.write('PRAUTO$%s$%s$%s = "%s"\n' %
41 (base_ver, pkgarch, srcrev, str(value)))
42 bb.utils.unlockfile(flock)
43}
44
45addhandler migrate_localcount_handler
46migrate_localcount_handler[eventmask] = "bb.event.RecipeParsed"
diff --git a/meta/classes/mime.bbclass b/meta/classes/mime.bbclass
new file mode 100644
index 0000000000..690610e49d
--- /dev/null
+++ b/meta/classes/mime.bbclass
@@ -0,0 +1,58 @@
1DEPENDS += "shared-mime-info-native shared-mime-info"
2
3EXTRA_OECONF += "--disable-update-mimedb"
4
5mime_postinst() {
6if [ "$1" = configure ]; then
7 UPDATEMIMEDB=`which update-mime-database`
8 if [ -x "$UPDATEMIMEDB" ] ; then
9 echo "Updating MIME database... this may take a while."
10 $UPDATEMIMEDB $D${datadir}/mime
11 else
12 echo "Missing update-mime-database, update of mime database failed!"
13 exit 1
14 fi
15fi
16}
17
18mime_postrm() {
19if [ "$1" = remove ] || [ "$1" = upgrade ]; then
20 UPDATEMIMEDB=`which update-mime-database`
21 if [ -x "$UPDATEMIMEDB" ] ; then
22 echo "Updating MIME database... this may take a while."
23 $UPDATEMIMEDB $D${datadir}/mime
24 else
25 echo "Missing update-mime-database, update of mime database failed!"
26 exit 1
27 fi
28fi
29}
30
31python populate_packages_append () {
32 import re
33 packages = d.getVar('PACKAGES', True).split()
34 pkgdest = d.getVar('PKGDEST', True)
35
36 for pkg in packages:
37 mime_dir = '%s/%s/usr/share/mime/packages' % (pkgdest, pkg)
38 mimes = []
39 mime_re = re.compile(".*\.xml$")
40 if os.path.exists(mime_dir):
41 for f in os.listdir(mime_dir):
42 if mime_re.match(f):
43 mimes.append(f)
44 if mimes:
45 bb.note("adding mime postinst and postrm scripts to %s" % pkg)
46 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
47 if not postinst:
48 postinst = '#!/bin/sh\n'
49 postinst += d.getVar('mime_postinst', True)
50 d.setVar('pkg_postinst_%s' % pkg, postinst)
51 postrm = d.getVar('pkg_postrm_%s' % pkg, True)
52 if not postrm:
53 postrm = '#!/bin/sh\n'
54 postrm += d.getVar('mime_postrm', True)
55 d.setVar('pkg_postrm_%s' % pkg, postrm)
56 bb.note("adding shared-mime-info-data dependency to %s" % pkg)
57 d.appendVar('RDEPENDS_' + pkg, " shared-mime-info-data")
58}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
new file mode 100644
index 0000000000..1fd7cd88a7
--- /dev/null
+++ b/meta/classes/mirrors.bbclass
@@ -0,0 +1,68 @@
1MIRRORS += "\
2${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \n \
3${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \n \
4${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \n \
5${DEBIAN_MIRROR} ftp://ftp.de.debian.org/debian/pool \n \
6${DEBIAN_MIRROR} ftp://ftp.au.debian.org/debian/pool \n \
7${DEBIAN_MIRROR} ftp://ftp.cl.debian.org/debian/pool \n \
8${DEBIAN_MIRROR} ftp://ftp.hr.debian.org/debian/pool \n \
9${DEBIAN_MIRROR} ftp://ftp.fi.debian.org/debian/pool \n \
10${DEBIAN_MIRROR} ftp://ftp.hk.debian.org/debian/pool \n \
11${DEBIAN_MIRROR} ftp://ftp.hu.debian.org/debian/pool \n \
12${DEBIAN_MIRROR} ftp://ftp.ie.debian.org/debian/pool \n \
13${DEBIAN_MIRROR} ftp://ftp.it.debian.org/debian/pool \n \
14${DEBIAN_MIRROR} ftp://ftp.jp.debian.org/debian/pool \n \
15${DEBIAN_MIRROR} ftp://ftp.no.debian.org/debian/pool \n \
16${DEBIAN_MIRROR} ftp://ftp.pl.debian.org/debian/pool \n \
17${DEBIAN_MIRROR} ftp://ftp.ro.debian.org/debian/pool \n \
18${DEBIAN_MIRROR} ftp://ftp.si.debian.org/debian/pool \n \
19${DEBIAN_MIRROR} ftp://ftp.es.debian.org/debian/pool \n \
20${DEBIAN_MIRROR} ftp://ftp.se.debian.org/debian/pool \n \
21${DEBIAN_MIRROR} ftp://ftp.tr.debian.org/debian/pool \n \
22${GNU_MIRROR} ftp://mirrors.kernel.org/gnu \n \
23${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
24ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt/ \n \
25ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.surfnet.nl/pub/security/gnupg/ \n \
26ftp://ftp.gnupg.org/gcrypt/ http://gulus.USherbrooke.ca/pub/appl/GnuPG/ \n \
27ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
28ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
29ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
30ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.gnupg.org/gcrypt/gnutls/ \n \
31ftp://ftp.gnutls.org/pub/gnutls http://www.mirrors.wiretapped.net/security/network-security/gnutls/ \n \
32ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.mirrors.wiretapped.net/pub/security/network-security/gnutls/ \n \
33ftp://ftp.gnutls.org/pub/gnutls http://josefsson.org/gnutls/releases/ \n \
34http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
35http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
36ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/ \n \
37ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tau.ac.il/pub/unix/admin/ \n \
38ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cert.dfn.de/pub/tools/admin/lsof/ \n \
39ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.fu-berlin.de/pub/unix/tools/lsof/ \n \
40ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.kaizo.org/pub/lsof/ \n \
41ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tu-darmstadt.de/pub/sysadmin/lsof/ \n \
42ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tux.org/pub/sites/vic.cc.purdue.edu/tools/unix/lsof/ \n \
43ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://gd.tuwien.ac.at/utils/admin-tools/lsof/ \n \
44ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://sunsite.ualberta.ca/pub/Mirror/lsof/ \n \
45ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://the.wiretapped.net/pub/security/host-security/lsof/ \n \
46http://www.apache.org/dist http://archive.apache.org/dist \n \
47http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \
48cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
49svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
50git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
51hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
52bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
53svk://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
54p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
55osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
56https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
57ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
58cvs://.*/.* http://sources.openembedded.org/ \n \
59svn://.*/.* http://sources.openembedded.org/ \n \
60git://.*/.* http://sources.openembedded.org/ \n \
61hg://.*/.* http://sources.openembedded.org/ \n \
62bzr://.*/.* http://sources.openembedded.org/ \n \
63svk://.*/.* http://sources.openembedded.org/ \n \
64p4://.*/.* http://sources.openembedded.org/ \n \
65osc://.*/.* http://sources.openembedded.org/ \n \
66https?$://.*/.* http://sources.openembedded.org/ \n \
67ftp://.*/.* http://sources.openembedded.org/ \n \
68"
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
new file mode 100644
index 0000000000..9dbb4b424b
--- /dev/null
+++ b/meta/classes/module-base.bbclass
@@ -0,0 +1,23 @@
1inherit module_strip
2
3inherit kernel-arch
4
5export OS = "${TARGET_OS}"
6export CROSS_COMPILE = "${TARGET_PREFIX}"
7
8export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-abiversion')}"
9KERNEL_OBJECT_SUFFIX = ".ko"
10
11# kernel modules are generally machine specific
12PACKAGE_ARCH = "${MACHINE_ARCH}"
13
14#
15# Ensure the hostprogs are available for module compilation. Modules that
16# inherit this recipe and override do_compile() should be sure to call
17# do_make_scripts() or ensure the scripts are built independently.
18#
19do_make_scripts() {
20 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
21 make CC="${KERNEL_CC}" LD="${KERNEL_LD}" AR="${KERNEL_AR}" \
22 -C ${STAGING_KERNEL_DIR} scripts
23}
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
new file mode 100644
index 0000000000..ad6f7af1bb
--- /dev/null
+++ b/meta/classes/module.bbclass
@@ -0,0 +1,32 @@
1DEPENDS += "virtual/kernel"
2
3inherit module-base kernel-module-split
4
5addtask make_scripts after do_patch before do_compile
6do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
7do_make_scripts[deptask] = "do_populate_sysroot"
8
9module_do_compile() {
10 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
11 oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
12 KERNEL_SRC=${STAGING_KERNEL_DIR} \
13 KERNEL_VERSION=${KERNEL_VERSION} \
14 CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
15 AR="${KERNEL_AR}" \
16 ${MAKE_TARGETS}
17}
18
19module_do_install() {
20 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
21 oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" \
22 KERNEL_SRC=${STAGING_KERNEL_DIR} \
23 CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
24 modules_install
25}
26
27EXPORT_FUNCTIONS do_compile do_install
28
29# add all splitted modules to PN RDEPENDS, PN can be empty now
30KERNEL_MODULES_META_PACKAGE = "${PN}"
31FILES_${PN} = ""
32ALLOW_EMPTY_${PN} = "1"
diff --git a/meta/classes/module_strip.bbclass b/meta/classes/module_strip.bbclass
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/meta/classes/module_strip.bbclass
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
new file mode 100644
index 0000000000..9503096245
--- /dev/null
+++ b/meta/classes/multilib.bbclass
@@ -0,0 +1,140 @@
1python multilib_virtclass_handler () {
2 cls = e.data.getVar("BBEXTENDCURR", True)
3 variant = e.data.getVar("BBEXTENDVARIANT", True)
4 if cls != "multilib" or not variant:
5 return
6
7 e.data.setVar('STAGING_KERNEL_DIR', e.data.getVar('STAGING_KERNEL_DIR', True))
8
9 # There should only be one kernel in multilib configs
10 # We also skip multilib setup for module packages.
11 provides = (e.data.getVar("PROVIDES", True) or "").split()
12 if "virtual/kernel" in provides or bb.data.inherits_class('module-base', e.data):
13 raise bb.parse.SkipPackage("We shouldn't have multilib variants for the kernel")
14
15 save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME", True) or ""
16 for name in save_var_name.split():
17 val=e.data.getVar(name, True)
18 if val:
19 e.data.setVar(name + "_MULTILIB_ORIGINAL", val)
20
21 if bb.data.inherits_class('image', e.data):
22 e.data.setVar("MLPREFIX", variant + "-")
23 e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
24 return
25
26 if bb.data.inherits_class('cross-canadian', e.data):
27 e.data.setVar("MLPREFIX", variant + "-")
28 override = ":virtclass-multilib-" + variant
29 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
30 bb.data.update_data(e.data)
31 return
32
33 if bb.data.inherits_class('native', e.data):
34 raise bb.parse.SkipPackage("We can't extend native recipes")
35
36 if bb.data.inherits_class('nativesdk', e.data) or bb.data.inherits_class('crosssdk', e.data):
37 raise bb.parse.SkipPackage("We can't extend nativesdk recipes")
38
39 if bb.data.inherits_class('allarch', e.data) and not bb.data.inherits_class('packagegroup', e.data):
40 raise bb.parse.SkipPackage("Don't extend allarch recipes which are not packagegroups")
41
42
43 # Expand this since this won't work correctly once we set a multilib into place
44 e.data.setVar("ALL_MULTILIB_PACKAGE_ARCHS", e.data.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
45
46 override = ":virtclass-multilib-" + variant
47
48 e.data.setVar("MLPREFIX", variant + "-")
49 e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
50 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
51
52 # Expand the WHITELISTs with multilib prefix
53 for whitelist in ["HOSTTOOLS_WHITELIST_GPLv3", "WHITELIST_GPLv3", "LGPLv2_WHITELIST_GPLv3"]:
54 pkgs = e.data.getVar(whitelist, True)
55 for pkg in pkgs.split():
56 pkgs += " " + variant + "-" + pkg
57 e.data.setVar(whitelist, pkgs)
58
59 # DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
60 newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False)
61 if newtune:
62 e.data.setVar("DEFAULTTUNE", newtune)
63}
64
65addhandler multilib_virtclass_handler
66multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
67
68STAGINGCC_prepend = "${BBEXTENDVARIANT}-"
69
70python __anonymous () {
71 variant = d.getVar("BBEXTENDVARIANT", True)
72
73 import oe.classextend
74
75 clsextend = oe.classextend.ClassExtender(variant, d)
76
77 if bb.data.inherits_class('image', d):
78 clsextend.map_depends_variable("PACKAGE_INSTALL")
79 clsextend.map_depends_variable("LINGUAS_INSTALL")
80 clsextend.map_depends_variable("RDEPENDS")
81 pinstall = d.getVar("LINGUAS_INSTALL", True) + " " + d.getVar("PACKAGE_INSTALL", True)
82 d.setVar("PACKAGE_INSTALL", pinstall)
83 d.setVar("LINGUAS_INSTALL", "")
84 # FIXME, we need to map this to something, not delete it!
85 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
86
87 if bb.data.inherits_class('populate_sdk_base', d):
88 clsextend.map_depends_variable("TOOLCHAIN_TARGET_TASK")
89 clsextend.map_depends_variable("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY")
90
91 if bb.data.inherits_class('image', d):
92 return
93
94 clsextend.map_depends_variable("DEPENDS")
95 clsextend.map_variable("PROVIDES")
96
97 if bb.data.inherits_class('cross-canadian', d):
98 return
99
100 clsextend.rename_packages()
101 clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
102
103 clsextend.map_packagevars()
104 clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
105 clsextend.map_variable("PACKAGE_INSTALL")
106 clsextend.map_variable("INITSCRIPT_PACKAGES")
107 clsextend.map_variable("USERADD_PACKAGES")
108}
109
110PACKAGEFUNCS_append = " do_package_qa_multilib"
111
112python do_package_qa_multilib() {
113
114 def check_mlprefix(pkg, var, mlprefix):
115 values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg), True) or d.getVar(var, True) or "")
116 candidates = []
117 for i in values:
118 if i.startswith('virtual/'):
119 i = i[len('virtual/'):]
120 if (not i.startswith('kernel-module')) and (not i.startswith(mlprefix)) and \
121 (not 'cross-canadian' in i) and (not i.startswith("nativesdk-")) and \
122 (not i.startswith("rtld")):
123 candidates.append(i)
124 if len(candidates) > 0:
125 bb.warn("Multilib QA Issue: %s package %s - suspicious values '%s' in %s"
126 % (d.getVar('PN', True), pkg, ' '.join(candidates), var))
127
128 ml = d.getVar('MLPREFIX', True)
129 if not ml:
130 return
131
132 packages = d.getVar('PACKAGES', True)
133 for pkg in packages.split():
134 check_mlprefix(pkg, 'RDEPENDS', ml)
135 check_mlprefix(pkg, 'RPROVIDES', ml)
136 check_mlprefix(pkg, 'RRECOMMENDS', ml)
137 check_mlprefix(pkg, 'RSUGGESTS', ml)
138 check_mlprefix(pkg, 'RREPLACES', ml)
139 check_mlprefix(pkg, 'RCONFLICTS', ml)
140}
diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass
new file mode 100644
index 0000000000..3315ba9327
--- /dev/null
+++ b/meta/classes/multilib_global.bbclass
@@ -0,0 +1,47 @@
1python multilib_virtclass_handler_global () {
2 if not e.data:
3 return
4
5 if isinstance(e, bb.event.RecipePreFinalise):
6 for v in e.data.getVar("MULTILIB_VARIANTS", True).split():
7 if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None:
8 e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
9
10 variant = e.data.getVar("BBEXTENDVARIANT", True)
11
12 if isinstance(e, bb.event.RecipeParsed) and not variant:
13 if bb.data.inherits_class('kernel', e.data) or \
14 bb.data.inherits_class('module-base', e.data) or \
15 (bb.data.inherits_class('allarch', e.data) and\
16 not bb.data.inherits_class('packagegroup', e.data)):
17 variants = (e.data.getVar("MULTILIB_VARIANTS", True) or "").split()
18
19 import oe.classextend
20 clsextends = []
21 for variant in variants:
22 clsextends.append(oe.classextend.ClassExtender(variant, e.data))
23
24 # Process PROVIDES
25 origprovs = provs = e.data.getVar("PROVIDES", True) or ""
26 for clsextend in clsextends:
27 provs = provs + " " + clsextend.map_variable("PROVIDES", setvar=False)
28 e.data.setVar("PROVIDES", provs)
29
30 # Process RPROVIDES
31 origrprovs = rprovs = e.data.getVar("RPROVIDES", True) or ""
32 for clsextend in clsextends:
33 rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES", setvar=False)
34 e.data.setVar("RPROVIDES", rprovs)
35
36 # Process RPROVIDES_${PN}...
37 for pkg in (e.data.getVar("PACKAGES", True) or "").split():
38 origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg, True) or ""
39 for clsextend in clsextends:
40 rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
41 rprovs = rprovs + " " + clsextend.extname + "-" + pkg
42 e.data.setVar("RPROVIDES_%s" % pkg, rprovs)
43}
44
45addhandler multilib_virtclass_handler_global
46multilib_virtclass_handler_global[eventmask] = "bb.event.RecipePreFinalise bb.event.RecipeParsed"
47
diff --git a/meta/classes/multilib_header.bbclass b/meta/classes/multilib_header.bbclass
new file mode 100644
index 0000000000..4d049a82e6
--- /dev/null
+++ b/meta/classes/multilib_header.bbclass
@@ -0,0 +1,47 @@
1inherit siteinfo
2
3# If applicable on the architecture, this routine will rename the header and
4# add a unique identifier to the name for the ABI/bitsize that is being used.
5# A wrapper will be generated for the architecture that knows how to call
6# all of the ABI variants for that given architecture.
7#
8oe_multilib_header() {
9 # We use
10 # For ARM: We don't support multilib builds.
11 # For MIPS: "n32" is a special case, which needs to be
12 # distinct from both 64-bit and 32-bit.
13 case ${TARGET_ARCH} in
14 arm*) return
15 ;;
16 mips*) case "${MIPSPKGSFX_ABI}" in
17 "-n32")
18 ident=n32
19 ;;
20 *)
21 ident=${SITEINFO_BITS}
22 ;;
23 esac
24 ;;
25 *) ident=${SITEINFO_BITS}
26 esac
27 if echo ${TARGET_ARCH} | grep -q arm; then
28 return
29 fi
30 for each_header in "$@" ; do
31 if [ ! -f "${D}/${includedir}/$each_header" ]; then
32 bberror "oe_multilib_header: Unable to find header $each_header."
33 continue
34 fi
35 stem=$(echo $each_header | sed 's#\.h$##')
36 # if mips64/n32 set ident to n32
37 mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h
38
39 sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header
40 done
41}
42
43# Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic.
44# We don't need multilib headers for native builds so brute force things.
45oe_multilib_header_class-native () {
46 return
47}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
new file mode 100644
index 0000000000..7c4e8c35c6
--- /dev/null
+++ b/meta/classes/native.bbclass
@@ -0,0 +1,163 @@
1# We want native packages to be relocatable
2inherit relocatable
3
4# Native packages are built indirectly via dependency,
5# no need for them to be a direct target of 'world'
6EXCLUDE_FROM_WORLD = "1"
7
8PACKAGES = ""
9PACKAGES_class-native = ""
10PACKAGES_DYNAMIC = ""
11PACKAGES_DYNAMIC_class-native = ""
12PACKAGE_ARCH = "${BUILD_ARCH}"
13
14# used by cmake class
15OECMAKE_RPATH = "${libdir}"
16OECMAKE_RPATH_class-native = "${libdir}"
17
18# When this class has packaging enabled, setting
19# RPROVIDES becomes unnecessary.
20RPROVIDES = "${PN}"
21
22TARGET_ARCH = "${BUILD_ARCH}"
23TARGET_OS = "${BUILD_OS}"
24TARGET_VENDOR = "${BUILD_VENDOR}"
25TARGET_PREFIX = "${BUILD_PREFIX}"
26TARGET_CC_ARCH = "${BUILD_CC_ARCH}"
27TARGET_LD_ARCH = "${BUILD_LD_ARCH}"
28TARGET_AS_ARCH = "${BUILD_AS_ARCH}"
29TARGET_FPU = ""
30
31HOST_ARCH = "${BUILD_ARCH}"
32HOST_OS = "${BUILD_OS}"
33HOST_VENDOR = "${BUILD_VENDOR}"
34HOST_PREFIX = "${BUILD_PREFIX}"
35HOST_CC_ARCH = "${BUILD_CC_ARCH}"
36HOST_LD_ARCH = "${BUILD_LD_ARCH}"
37HOST_AS_ARCH = "${BUILD_AS_ARCH}"
38
39CPPFLAGS = "${BUILD_CPPFLAGS}"
40CFLAGS = "${BUILD_CFLAGS}"
41CXXFLAGS = "${BUILD_CFLAGS}"
42LDFLAGS = "${BUILD_LDFLAGS}"
43LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE} "
44
45STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
46STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
47
48# native pkg doesn't need the TOOLCHAIN_OPTIONS.
49TOOLCHAIN_OPTIONS = ""
50
51DEPENDS_GETTEXT = "gettext-native"
52
53# Don't build ptest natively
54PTEST_ENABLED = "0"
55
56# Don't use site files for native builds
57export CONFIG_SITE = "${COREBASE}/meta/site/native"
58
59# set the compiler as well. It could have been set to something else
60export CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_ARCH}"
61export CXX = "${CCACHE}${HOST_PREFIX}g++ ${HOST_CC_ARCH}"
62export F77 = "${CCACHE}${HOST_PREFIX}g77 ${HOST_CC_ARCH}"
63export CPP = "${HOST_PREFIX}gcc ${HOST_CC_ARCH} -E"
64export LD = "${HOST_PREFIX}ld ${HOST_LD_ARCH} "
65export CCLD = "${CC}"
66export AR = "${HOST_PREFIX}ar"
67export AS = "${HOST_PREFIX}as ${HOST_AS_ARCH}"
68export RANLIB = "${HOST_PREFIX}ranlib"
69export STRIP = "${HOST_PREFIX}strip"
70
71# Path prefixes
72base_prefix = "${STAGING_DIR_NATIVE}"
73prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
74exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
75
76libdir = "${STAGING_DIR_NATIVE}${libdir_native}"
77
78baselib = "lib"
79
80# Libtool's default paths are correct for the native machine
81lt_cv_sys_lib_dlsearch_path_spec[unexport] = "1"
82
83NATIVE_PACKAGE_PATH_SUFFIX ?= ""
84bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
85libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
86libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
87
88do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
89do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_NATIVE}/"
90
91# Since we actually install these into situ there is no staging prefix
92STAGING_DIR_HOST = ""
93STAGING_DIR_TARGET = ""
94PKG_CONFIG_DIR = "${libdir}/pkgconfig"
95
96EXTRA_NATIVE_PKGCONFIG_PATH ?= ""
97PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}"
98PKG_CONFIG_SYSROOT_DIR = ""
99
100# we dont want libc-uclibc or libc-glibc to kick in for native recipes
101LIBCOVERRIDE = ""
102CLASSOVERRIDE = "class-native"
103
104PATH_prepend = "${COREBASE}/scripts/native-intercept:"
105
106python native_virtclass_handler () {
107 classextend = e.data.getVar('BBCLASSEXTEND', True) or ""
108 if "native" not in classextend:
109 return
110
111 pn = e.data.getVar("PN", True)
112 if not pn.endswith("-native"):
113 return
114
115 def map_dependencies(varname, d, suffix = ""):
116 if suffix:
117 varname = varname + "_" + suffix
118 deps = d.getVar(varname, True)
119 if not deps:
120 return
121 deps = bb.utils.explode_deps(deps)
122 newdeps = []
123 for dep in deps:
124 if dep.endswith("-cross"):
125 newdeps.append(dep.replace("-cross", "-native"))
126 elif not dep.endswith("-native"):
127 newdeps.append(dep + "-native")
128 else:
129 newdeps.append(dep)
130 d.setVar(varname, " ".join(newdeps))
131
132 map_dependencies("DEPENDS", e.data)
133 for pkg in [e.data.getVar("PN", True), "", "${PN}"]:
134 map_dependencies("RDEPENDS", e.data, pkg)
135 map_dependencies("RRECOMMENDS", e.data, pkg)
136 map_dependencies("RSUGGESTS", e.data, pkg)
137 map_dependencies("RPROVIDES", e.data, pkg)
138 map_dependencies("RREPLACES", e.data, pkg)
139
140 provides = e.data.getVar("PROVIDES", True)
141 for prov in provides.split():
142 if prov.find(pn) != -1:
143 continue
144 if not prov.endswith("-native"):
145 provides = provides.replace(prov, prov + "-native")
146 e.data.setVar("PROVIDES", provides)
147
148 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-native")
149}
150
151addhandler native_virtclass_handler
152native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
153
154do_package[noexec] = "1"
155do_packagedata[noexec] = "1"
156do_package_write_ipk[noexec] = "1"
157do_package_write_deb[noexec] = "1"
158do_package_write_rpm[noexec] = "1"
159
160do_packagedata[stamp-extra-info] = ""
161do_populate_sysroot[stamp-extra-info] = ""
162
163USE_NLS = "no"
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
new file mode 100644
index 0000000000..ed276ef0a4
--- /dev/null
+++ b/meta/classes/nativesdk.bbclass
@@ -0,0 +1,92 @@
1# SDK packages are built either explicitly by the user,
2# or indirectly via dependency. No need to be in 'world'.
3EXCLUDE_FROM_WORLD = "1"
4
5STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}"
6
7# we dont want libc-uclibc or libc-glibc to kick in for nativesdk recipes
8LIBCOVERRIDE = ""
9CLASSOVERRIDE = "class-nativesdk"
10
11#
12# Update PACKAGE_ARCH and PACKAGE_ARCHS
13#
14PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
15PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
16
17#
18# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
19# binaries
20#
21DEPENDS_append = " chrpath-replacement-native"
22EXTRANATIVEPATH += "chrpath-native"
23
24STAGING_DIR_HOST = "${STAGING_DIR}/${MULTIMACH_HOST_SYS}"
25STAGING_DIR_TARGET = "${STAGING_DIR}/${MULTIMACH_TARGET_SYS}"
26
27HOST_ARCH = "${SDK_ARCH}"
28HOST_VENDOR = "${SDK_VENDOR}"
29HOST_OS = "${SDK_OS}"
30HOST_PREFIX = "${SDK_PREFIX}"
31HOST_CC_ARCH = "${SDK_CC_ARCH}"
32HOST_LD_ARCH = "${SDK_LD_ARCH}"
33HOST_AS_ARCH = "${SDK_AS_ARCH}"
34#HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}"
35
36TARGET_ARCH = "${SDK_ARCH}"
37TARGET_VENDOR = "${SDK_VENDOR}"
38TARGET_OS = "${SDK_OS}"
39TARGET_PREFIX = "${SDK_PREFIX}"
40TARGET_CC_ARCH = "${SDK_CC_ARCH}"
41TARGET_LD_ARCH = "${SDK_LD_ARCH}"
42TARGET_AS_ARCH = "${SDK_AS_ARCH}"
43TARGET_FPU = ""
44EXTRA_OECONF_FPU = ""
45
46CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
47CFLAGS = "${BUILDSDK_CFLAGS}"
48CXXFLAGS = "${BUILDSDK_CFLAGS}"
49LDFLAGS = "${BUILDSDK_LDFLAGS}"
50
51# Change to place files in SDKPATH
52base_prefix = "${SDKPATHNATIVE}"
53prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
54exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
55baselib = "lib"
56
57export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig"
58export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
59
60python nativesdk_virtclass_handler () {
61 pn = e.data.getVar("PN", True)
62 if not pn.endswith("-nativesdk") or pn.startswith("nativesdk-"):
63 return
64
65 e.data.setVar("MLPREFIX", "nativesdk-")
66 e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN", True).replace("-nativesdk", "").replace("nativesdk-", ""))
67 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-nativesdk")
68}
69
70python () {
71 pn = d.getVar("PN", True)
72 if not pn.startswith("nativesdk-"):
73 return
74
75 import oe.classextend
76
77 clsextend = oe.classextend.NativesdkClassExtender("nativesdk", d)
78 clsextend.rename_packages()
79 clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
80
81 clsextend.map_depends_variable("DEPENDS")
82 clsextend.map_packagevars()
83 clsextend.map_variable("PROVIDES")
84}
85
86addhandler nativesdk_virtclass_handler
87nativesdk_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
88
89do_populate_sysroot[stamp-extra-info] = ""
90do_packagedata[stamp-extra-info] = ""
91
92USE_NLS = "${SDKUSE_NLS}"
diff --git a/meta/classes/oelint.bbclass b/meta/classes/oelint.bbclass
new file mode 100644
index 0000000000..f2e7540dcf
--- /dev/null
+++ b/meta/classes/oelint.bbclass
@@ -0,0 +1,174 @@
1addtask lint before do_fetch
2do_lint[nostamp] = "1"
3python do_lint() {
4 def testVar(var, explain=None):
5 try:
6 s = d[var]
7 return s["content"]
8 except KeyError:
9 bb.error("%s is not set" % var)
10 if explain: bb.note(explain)
11 return None
12
13
14 ##############################
15 # Test that DESCRIPTION exists
16 #
17 testVar("DESCRIPTION")
18
19
20 ##############################
21 # Test that HOMEPAGE exists
22 #
23 s = testVar("HOMEPAGE")
24 if s=="unknown":
25 bb.error("HOMEPAGE is not set")
26 elif not s.startswith("http://"):
27 bb.error("HOMEPAGE doesn't start with http://")
28
29
30
31 ##############################
32 # Test for valid LICENSE
33 #
34 valid_licenses = {
35 "GPL-2" : "GPLv2",
36 "GPL LGPL FDL" : True,
37 "GPL PSF" : True,
38 "GPL/QPL" : True,
39 "GPL" : True,
40 "GPLv2" : True,
41 "IBM" : True,
42 "LGPL GPL" : True,
43 "LGPL" : True,
44 "MIT" : True,
45 "OSL" : True,
46 "Perl" : True,
47 "Public Domain" : True,
48 "QPL" : "GPL/QPL",
49 }
50 s = testVar("LICENSE")
51 if s=="unknown":
52 bb.error("LICENSE is not set")
53 elif s.startswith("Vendor"):
54 pass
55 else:
56 try:
57 newlic = valid_licenses[s]
58 if newlic == False:
59 bb.note("LICENSE '%s' is not recommended" % s)
60 elif newlic != True:
61 bb.note("LICENSE '%s' is not recommended, better use '%s'" % (s, newsect))
62 except:
63 bb.note("LICENSE '%s' is not recommended" % s)
64
65
66 ##############################
67 # Test for valid MAINTAINER
68 #
69 s = testVar("MAINTAINER")
70 if s=="OpenEmbedded Team <openembedded-devel@openembedded.org>":
71 bb.error("explicit MAINTAINER is missing, using default")
72 elif s and s.find("@") == -1:
73 bb.error("You forgot to put an e-mail address into MAINTAINER")
74
75
76 ##############################
77 # Test for valid SECTION
78 #
79 # if Correct section: True section name is valid
80 # False section name is invalid, no suggestion
81 # string section name is invalid, better name suggested
82 #
83 valid_sections = {
84 # Current Section Correct section
85 "apps" : True,
86 "audio" : True,
87 "base" : True,
88 "console/games" : True,
89 "console/net" : "console/network",
90 "console/network" : True,
91 "console/utils" : True,
92 "devel" : True,
93 "developing" : "devel",
94 "devel/python" : True,
95 "fonts" : True,
96 "games" : True,
97 "games/libs" : True,
98 "gnome/base" : True,
99 "gnome/libs" : True,
100 "gpe" : True,
101 "gpe/libs" : True,
102 "gui" : False,
103 "libc" : "libs",
104 "libs" : True,
105 "libs/net" : True,
106 "multimedia" : True,
107 "net" : "network",
108 "NET" : "network",
109 "network" : True,
110 "opie/applets" : True,
111 "opie/applications" : True,
112 "opie/base" : True,
113 "opie/codecs" : True,
114 "opie/decorations" : True,
115 "opie/fontfactories" : True,
116 "opie/fonts" : True,
117 "opie/games" : True,
118 "opie/help" : True,
119 "opie/inputmethods" : True,
120 "opie/libs" : True,
121 "opie/multimedia" : True,
122 "opie/pim" : True,
123 "opie/setting" : "opie/settings",
124 "opie/settings" : True,
125 "opie/Shell" : False,
126 "opie/styles" : True,
127 "opie/today" : True,
128 "scientific" : True,
129 "utils" : True,
130 "x11" : True,
131 "x11/libs" : True,
132 "x11/wm" : True,
133 }
134 s = testVar("SECTION")
135 if s:
136 try:
137 newsect = valid_sections[s]
138 if newsect == False:
139 bb.note("SECTION '%s' is not recommended" % s)
140 elif newsect != True:
141 bb.note("SECTION '%s' is not recommended, better use '%s'" % (s, newsect))
142 except:
143 bb.note("SECTION '%s' is not recommended" % s)
144
145 if not s.islower():
146 bb.error("SECTION should only use lower case")
147
148
149
150
151 ##############################
152 # Test for valid PRIORITY
153 #
154 valid_priorities = {
155 "standard" : True,
156 "required" : True,
157 "optional" : True,
158 "extra" : True,
159 }
160 s = testVar("PRIORITY")
161 if s:
162 try:
163 newprio = valid_priorities[s]
164 if newprio == False:
165 bb.note("PRIORITY '%s' is not recommended" % s)
166 elif newprio != True:
167 bb.note("PRIORITY '%s' is not recommended, better use '%s'" % (s, newprio))
168 except:
169 bb.note("PRIORITY '%s' is not recommended" % s)
170
171 if not s.islower():
172 bb.error("PRIORITY should only use lower case")
173
174}
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
new file mode 100644
index 0000000000..8a6feaf4d5
--- /dev/null
+++ b/meta/classes/own-mirrors.bbclass
@@ -0,0 +1,12 @@
1PREMIRRORS() {
2cvs://.*/.* ${SOURCE_MIRROR_URL}
3svn://.*/.* ${SOURCE_MIRROR_URL}
4git://.*/.* ${SOURCE_MIRROR_URL}
5hg://.*/.* ${SOURCE_MIRROR_URL}
6bzr://.*/.* ${SOURCE_MIRROR_URL}
7svk://.*/.* ${SOURCE_MIRROR_URL}
8p4://.*/.* ${SOURCE_MIRROR_URL}
9osc://.*/.* ${SOURCE_MIRROR_URL}
10https?$://.*/.* ${SOURCE_MIRROR_URL}
11ftp://.*/.* ${SOURCE_MIRROR_URL}
12}
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
new file mode 100644
index 0000000000..5b1e902c07
--- /dev/null
+++ b/meta/classes/package.bbclass
@@ -0,0 +1,1983 @@
1#
2# Packaging process
3#
4# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS
5# Taking D and splitting it up into the packages listed in PACKAGES, placing the
6# resulting output in PKGDEST.
7#
8# There are the following default steps but PACKAGEFUNCS can be extended:
9#
10# a) package_get_auto_pr - get PRAUTO from remote PR service
11#
12# b) perform_packagecopy - Copy D into PKGD
13#
14# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES
15#
16# d) split_and_strip_files - split the files into runtime and debug and strip them.
17# Debug files include debug info split, and associated sources that end up in -dbg packages
18#
19# e) fixup_perms - Fix up permissions in the package before we split it.
20#
21# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/<pkgname>
22# Also triggers the binary stripping code to put files in -dbg packages.
23#
24# g) package_do_filedeps - Collect perfile run-time dependency metadata
25# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with
26# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
27#
28# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
29# depenedencies found. Also stores the package name so anyone else using this library
30# knows which package to depend on.
31#
32# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
33#
34# j) read_shlibdeps - Reads the stored shlibs information into the metadata
35#
36# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages
37#
38# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later
39# packaging steps
40
41inherit packagedata
42inherit prserv
43inherit chrpath
44
45# Need the package_qa_handle_error() in insane.bbclass
46inherit insane
47
48PKGD = "${WORKDIR}/package"
49PKGDEST = "${WORKDIR}/packages-split"
50
51LOCALE_SECTION ?= ''
52
53ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
54
55# rpm is used for the per-file dependency identification
56PACKAGE_DEPENDS += "rpm-native"
57
58def legitimize_package_name(s):
59 """
60 Make sure package names are legitimate strings
61 """
62 import re
63
64 def fixutf(m):
65 cp = m.group(1)
66 if cp:
67 return ('\u%s' % cp).decode('unicode_escape').encode('utf-8')
68
69 # Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
70 s = re.sub('<U([0-9A-Fa-f]{1,4})>', fixutf, s)
71
72 # Remaining package name validity fixes
73 return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
74
75def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False):
76 """
77 Used in .bb files to split up dynamically generated subpackages of a
78 given package, usually plugins or modules.
79
80 Arguments:
81 root -- the path in which to search
82 file_regex -- regular expression to match searched files. Use
83 parentheses () to mark the part of this expression
84 that should be used to derive the module name (to be
85 substituted where %s is used in other function
86 arguments as noted below)
87 output_pattern -- pattern to use for the package names. Must include %s.
88 description -- description to set for each package. Must include %s.
89 postinst -- postinstall script to use for all packages (as a
90 string)
91 recursive -- True to perform a recursive search - default False
92 hook -- a hook function to be called for every match. The
93 function will be called with the following arguments
94 (in the order listed):
95 f: full path to the file/directory match
96 pkg: the package name
97 file_regex: as above
98 output_pattern: as above
99 modulename: the module name derived using file_regex
100 extra_depends -- extra runtime dependencies (RDEPENDS) to be set for
101 all packages. The default value of None causes a
102 dependency on the main package (${PN}) - if you do
103 not want this, pass '' for this parameter.
104 aux_files_pattern -- extra item(s) to be added to FILES for each
105 package. Can be a single string item or a list of
106 strings for multiple items. Must include %s.
107 postrm -- postrm script to use for all packages (as a string)
108 allow_dirs -- True allow directories to be matched - default False
109 prepend -- if True, prepend created packages to PACKAGES instead
110 of the default False which appends them
111 match_path -- match file_regex on the whole relative path to the
112 root rather than just the file name
113 aux_files_pattern_verbatim -- extra item(s) to be added to FILES for
114 each package, using the actual derived module name
115 rather than converting it to something legal for a
116 package name. Can be a single string item or a list
117 of strings for multiple items. Must include %s.
118 allow_links -- True to allow symlinks to be matched - default False
119
120 """
121
122 dvar = d.getVar('PKGD', True)
123
124 # If the root directory doesn't exist, don't error out later but silently do
125 # no splitting.
126 if not os.path.exists(dvar + root):
127 return
128
129 ml = d.getVar("MLPREFIX", True)
130 if ml:
131 if not output_pattern.startswith(ml):
132 output_pattern = ml + output_pattern
133
134 newdeps = []
135 for dep in (extra_depends or "").split():
136 if dep.startswith(ml):
137 newdeps.append(dep)
138 else:
139 newdeps.append(ml + dep)
140 if newdeps:
141 extra_depends = " ".join(newdeps)
142
143
144 packages = d.getVar('PACKAGES', True).split()
145 split_packages = []
146
147 if postinst:
148 postinst = '#!/bin/sh\n' + postinst + '\n'
149 if postrm:
150 postrm = '#!/bin/sh\n' + postrm + '\n'
151 if not recursive:
152 objs = os.listdir(dvar + root)
153 else:
154 objs = []
155 for walkroot, dirs, files in os.walk(dvar + root):
156 for file in files:
157 relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
158 if relpath:
159 objs.append(relpath)
160
161 if extra_depends == None:
162 extra_depends = d.getVar("PN", True)
163
164 for o in sorted(objs):
165 import re, stat
166 if match_path:
167 m = re.match(file_regex, o)
168 else:
169 m = re.match(file_regex, os.path.basename(o))
170
171 if not m:
172 continue
173 f = os.path.join(dvar + root, o)
174 mode = os.lstat(f).st_mode
175 if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
176 continue
177 on = legitimize_package_name(m.group(1))
178 pkg = output_pattern % on
179 split_packages.append(pkg)
180 if not pkg in packages:
181 if prepend:
182 packages = [pkg] + packages
183 else:
184 packages.append(pkg)
185 oldfiles = d.getVar('FILES_' + pkg, True)
186 if not oldfiles:
187 the_files = [os.path.join(root, o)]
188 if aux_files_pattern:
189 if type(aux_files_pattern) is list:
190 for fp in aux_files_pattern:
191 the_files.append(fp % on)
192 else:
193 the_files.append(aux_files_pattern % on)
194 if aux_files_pattern_verbatim:
195 if type(aux_files_pattern_verbatim) is list:
196 for fp in aux_files_pattern_verbatim:
197 the_files.append(fp % m.group(1))
198 else:
199 the_files.append(aux_files_pattern_verbatim % m.group(1))
200 d.setVar('FILES_' + pkg, " ".join(the_files))
201 if extra_depends != '':
202 d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
203 d.setVar('DESCRIPTION_' + pkg, description % on)
204 if postinst:
205 d.setVar('pkg_postinst_' + pkg, postinst)
206 if postrm:
207 d.setVar('pkg_postrm_' + pkg, postrm)
208 else:
209 d.setVar('FILES_' + pkg, oldfiles + " " + os.path.join(root, o))
210 if callable(hook):
211 hook(f, pkg, file_regex, output_pattern, m.group(1))
212
213 d.setVar('PACKAGES', ' '.join(packages))
214 return split_packages
215
216PACKAGE_DEPENDS += "file-native"
217
218python () {
219 if d.getVar('PACKAGES', True) != '':
220 deps = ""
221 for dep in (d.getVar('PACKAGE_DEPENDS', True) or "").split():
222 deps += " %s:do_populate_sysroot" % dep
223 d.appendVarFlag('do_package', 'depends', deps)
224
225 # shlibs requires any DEPENDS to have already packaged for the *.list files
226 d.appendVarFlag('do_package', 'deptask', " do_packagedata")
227
228 elif not bb.data.inherits_class('image', d):
229 d.setVar("PACKAGERDEPTASK", "")
230}
231
232def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
233 # Function to split a single file into two components, one is the stripped
234 # target system binary, the other contains any debugging information. The
235 # two files are linked to reference each other.
236 #
237 # sourcefile is also generated containing a list of debugsources
238
239 import stat
240
241 dvar = d.getVar('PKGD', True)
242 objcopy = d.getVar("OBJCOPY", True)
243 debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
244 workdir = d.getVar("WORKDIR", True)
245 workparentdir = d.getVar("DEBUGSRC_OVERRIDE_PATH", True) or os.path.dirname(os.path.dirname(workdir))
246
247 # We ignore kernel modules, we don't generate debug info files.
248 if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
249 return 1
250
251 newmode = None
252 if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
253 origmode = os.stat(file)[stat.ST_MODE]
254 newmode = origmode | stat.S_IWRITE | stat.S_IREAD
255 os.chmod(file, newmode)
256
257 # We need to extract the debug src information here...
258 if debugsrcdir:
259 cmd = "'%s' -b '%s' -d '%s' -i -l '%s' '%s'" % (debugedit, workparentdir, debugsrcdir, sourcefile, file)
260 (retval, output) = oe.utils.getstatusoutput(cmd)
261 if retval:
262 bb.fatal("debugedit failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
263
264 bb.utils.mkdirhier(os.path.dirname(debugfile))
265
266 cmd = "'%s' --only-keep-debug '%s' '%s'" % (objcopy, file, debugfile)
267 (retval, output) = oe.utils.getstatusoutput(cmd)
268 if retval:
269 bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
270
271 # Set the debuglink to have the view of the file path on the target
272 cmd = "'%s' --add-gnu-debuglink='%s' '%s'" % (objcopy, debugfile, file)
273 (retval, output) = oe.utils.getstatusoutput(cmd)
274 if retval:
275 bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
276
277 if newmode:
278 os.chmod(file, origmode)
279
280 return 0
281
282def copydebugsources(debugsrcdir, d):
283 # The debug src information written out to sourcefile is further procecessed
284 # and copied to the destination here.
285
286 import stat
287
288 sourcefile = d.expand("${WORKDIR}/debugsources.list")
289 if debugsrcdir and os.path.isfile(sourcefile):
290 dvar = d.getVar('PKGD', True)
291 strip = d.getVar("STRIP", True)
292 objcopy = d.getVar("OBJCOPY", True)
293 debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
294 workdir = d.getVar("WORKDIR", True)
295 workparentdir = os.path.dirname(os.path.dirname(workdir))
296 workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
297
298 nosuchdir = []
299 basepath = dvar
300 for p in debugsrcdir.split("/"):
301 basepath = basepath + "/" + p
302 if not cpath.exists(basepath):
303 nosuchdir.append(basepath)
304 bb.utils.mkdirhier(basepath)
305 cpath.updatecache(basepath)
306
307 processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '(<internal>|<built-in>)$' | "
308 # We need to ignore files that are not actually ours
309 # we do this by only paying attention to items from this package
310 processdebugsrc += "fgrep -zw '%s' | "
311 processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
312
313 cmd = processdebugsrc % (sourcefile, workbasedir, workparentdir, dvar, debugsrcdir)
314 (retval, output) = oe.utils.getstatusoutput(cmd)
315 # Can "fail" if internal headers/transient sources are attempted
316 #if retval:
317 # bb.fatal("debug source copy failed with exit code %s (cmd was %s)" % (retval, cmd))
318
319 # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
320 # Work around this by manually finding and copying any symbolic links that made it through.
321 cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s' 2>/dev/null)" % (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
322 (retval, output) = oe.utils.getstatusoutput(cmd)
323 if retval:
324 bb.fatal("debugsrc symlink fixup failed with exit code %s (cmd was %s)" % (retval, cmd))
325
326 # The copy by cpio may have resulted in some empty directories! Remove these
327 cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
328 (retval, output) = oe.utils.getstatusoutput(cmd)
329 if retval:
330 bb.fatal("empty directory removal failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
331
332 # Also remove debugsrcdir if its empty
333 for p in nosuchdir[::-1]:
334 if os.path.exists(p) and not os.listdir(p):
335 os.rmdir(p)
336
337#
338# Package data handling routines
339#
340
341def get_package_mapping (pkg, basepkg, d):
342 import oe.packagedata
343
344 data = oe.packagedata.read_subpkgdata(pkg, d)
345 key = "PKG_%s" % pkg
346
347 if key in data:
348 # Have to avoid undoing the write_extra_pkgs(global_variants...)
349 if bb.data.inherits_class('allarch', d) and data[key] == basepkg:
350 return pkg
351 return data[key]
352
353 return pkg
354
355def runtime_mapping_rename (varname, pkg, d):
356 #bb.note("%s before: %s" % (varname, d.getVar(varname, True)))
357
358 new_depends = {}
359 deps = bb.utils.explode_dep_versions2(d.getVar(varname, True) or "")
360 for depend in deps:
361 new_depend = get_package_mapping(depend, pkg, d)
362 new_depends[new_depend] = deps[depend]
363
364 d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
365
366 #bb.note("%s after: %s" % (varname, d.getVar(varname, True)))
367
368#
369# Package functions suitable for inclusion in PACKAGEFUNCS
370#
371
372python package_get_auto_pr() {
373 # per recipe PRSERV_HOST
374 pn = d.getVar('PN', True)
375 host = d.getVar("PRSERV_HOST_" + pn, True)
376 if not (host is None):
377 d.setVar("PRSERV_HOST", host)
378
379 if d.getVar('PRSERV_HOST', True):
380 try:
381 auto_pr=prserv_get_pr_auto(d)
382 except Exception as e:
383 bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
384 if auto_pr is None:
385 if d.getVar('PRSERV_LOCKDOWN', True):
386 bb.fatal("Can NOT get PRAUTO from lockdown exported file")
387 else:
388 bb.fatal("Can NOT get PRAUTO from remote PR service")
389 return
390 d.setVar('PRAUTO',str(auto_pr))
391 else:
392 pkgv = d.getVar("PKGV", True)
393 if 'AUTOINC' in pkgv:
394 d.setVar("PKGV", pkgv.replace("AUTOINC", "0"))
395}
396
397LOCALEBASEPN ??= "${PN}"
398
399python package_do_split_locales() {
400 if (d.getVar('PACKAGE_NO_LOCALE', True) == '1'):
401 bb.debug(1, "package requested not splitting locales")
402 return
403
404 packages = (d.getVar('PACKAGES', True) or "").split()
405
406 datadir = d.getVar('datadir', True)
407 if not datadir:
408 bb.note("datadir not defined")
409 return
410
411 dvar = d.getVar('PKGD', True)
412 pn = d.getVar('LOCALEBASEPN', True)
413
414 if pn + '-locale' in packages:
415 packages.remove(pn + '-locale')
416
417 localedir = os.path.join(dvar + datadir, 'locale')
418
419 if not cpath.isdir(localedir):
420 bb.debug(1, "No locale files in this package")
421 return
422
423 locales = os.listdir(localedir)
424
425 summary = d.getVar('SUMMARY', True) or pn
426 description = d.getVar('DESCRIPTION', True) or ""
427 locale_section = d.getVar('LOCALE_SECTION', True)
428 mlprefix = d.getVar('MLPREFIX', True) or ""
429 for l in sorted(locales):
430 ln = legitimize_package_name(l)
431 pkg = pn + '-locale-' + ln
432 packages.append(pkg)
433 d.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l))
434 d.setVar('RRECOMMENDS_' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
435 d.setVar('RPROVIDES_' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
436 d.setVar('SUMMARY_' + pkg, '%s - %s translations' % (summary, l))
437 d.setVar('DESCRIPTION_' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
438 if locale_section:
439 d.setVar('SECTION_' + pkg, locale_section)
440
441 d.setVar('PACKAGES', ' '.join(packages))
442
443 # Disabled by RP 18/06/07
444 # Wildcards aren't supported in debian
445 # They break with ipkg since glibc-locale* will mean that
446 # glibc-localedata-translit* won't install as a dependency
447 # for some other package which breaks meta-toolchain
448 # Probably breaks since virtual-locale- isn't provided anywhere
449 #rdep = (d.getVar('RDEPENDS_%s' % pn, True) or "").split()
450 #rdep.append('%s-locale*' % pn)
451 #d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep))
452}
453
454python perform_packagecopy () {
455 dest = d.getVar('D', True)
456 dvar = d.getVar('PKGD', True)
457
458 # Start by package population by taking a copy of the installed
459 # files to operate on
460 # Preserve sparse files and hard links
461 cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (dest, dvar)
462 (retval, output) = oe.utils.getstatusoutput(cmd)
463 if retval:
464 bb.fatal("file copy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
465
466 # replace RPATHs for the nativesdk binaries, to make them relocatable
467 if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
468 rpath_replace (dvar, d)
469}
470perform_packagecopy[cleandirs] = "${PKGD}"
471perform_packagecopy[dirs] = "${PKGD}"
472
473# We generate a master list of directories to process, we start by
474# seeding this list with reasonable defaults, then load from
475# the fs-perms.txt files
476python fixup_perms () {
477 import pwd, grp
478
479 # init using a string with the same format as a line as documented in
480 # the fs-perms.txt file
481 # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid>
482 # <path> link <link target>
483 #
484 # __str__ can be used to print out an entry in the input format
485 #
486 # if fs_perms_entry.path is None:
487 # an error occured
488 # if fs_perms_entry.link, you can retrieve:
489 # fs_perms_entry.path = path
490 # fs_perms_entry.link = target of link
491 # if not fs_perms_entry.link, you can retrieve:
492 # fs_perms_entry.path = path
493 # fs_perms_entry.mode = expected dir mode or None
494 # fs_perms_entry.uid = expected uid or -1
495 # fs_perms_entry.gid = expected gid or -1
496 # fs_perms_entry.walk = 'true' or something else
497 # fs_perms_entry.fmode = expected file mode or None
498 # fs_perms_entry.fuid = expected file uid or -1
499 # fs_perms_entry_fgid = expected file gid or -1
500 class fs_perms_entry():
501 def __init__(self, line):
502 lsplit = line.split()
503 if len(lsplit) == 3 and lsplit[1].lower() == "link":
504 self._setlink(lsplit[0], lsplit[2])
505 elif len(lsplit) == 8:
506 self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
507 else:
508 msg = "Fixup Perms: invalid config line %s" % line
509 package_qa_handle_error("perm-config", msg, d)
510 self.path = None
511 self.link = None
512
513 def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid):
514 self.path = os.path.normpath(path)
515 self.link = None
516 self.mode = self._procmode(mode)
517 self.uid = self._procuid(uid)
518 self.gid = self._procgid(gid)
519 self.walk = walk.lower()
520 self.fmode = self._procmode(fmode)
521 self.fuid = self._procuid(fuid)
522 self.fgid = self._procgid(fgid)
523
524 def _setlink(self, path, link):
525 self.path = os.path.normpath(path)
526 self.link = link
527
528 def _procmode(self, mode):
529 if not mode or (mode and mode == "-"):
530 return None
531 else:
532 return int(mode,8)
533
534 # Note uid/gid -1 has special significance in os.lchown
535 def _procuid(self, uid):
536 if uid is None or uid == "-":
537 return -1
538 elif uid.isdigit():
539 return int(uid)
540 else:
541 return pwd.getpwnam(uid).pw_uid
542
543 def _procgid(self, gid):
544 if gid is None or gid == "-":
545 return -1
546 elif gid.isdigit():
547 return int(gid)
548 else:
549 return grp.getgrnam(gid).gr_gid
550
551 # Use for debugging the entries
552 def __str__(self):
553 if self.link:
554 return "%s link %s" % (self.path, self.link)
555 else:
556 mode = "-"
557 if self.mode:
558 mode = "0%o" % self.mode
559 fmode = "-"
560 if self.fmode:
561 fmode = "0%o" % self.fmode
562 uid = self._mapugid(self.uid)
563 gid = self._mapugid(self.gid)
564 fuid = self._mapugid(self.fuid)
565 fgid = self._mapugid(self.fgid)
566 return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid)
567
568 def _mapugid(self, id):
569 if id is None or id == -1:
570 return "-"
571 else:
572 return "%d" % id
573
574 # Fix the permission, owner and group of path
575 def fix_perms(path, mode, uid, gid, dir):
576 if mode and not os.path.islink(path):
577 #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
578 os.chmod(path, mode)
579 # -1 is a special value that means don't change the uid/gid
580 # if they are BOTH -1, don't bother to lchown
581 if not (uid == -1 and gid == -1):
582 #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
583 os.lchown(path, uid, gid)
584
585 # Return a list of configuration files based on either the default
586 # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES
587 # paths are resolved via BBPATH
588 def get_fs_perms_list(d):
589 str = ""
590 bbpath = d.getVar('BBPATH', True)
591 fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES', True)
592 if not fs_perms_tables:
593 fs_perms_tables = 'files/fs-perms.txt'
594 for conf_file in fs_perms_tables.split():
595 str += " %s" % bb.utils.which(bbpath, conf_file)
596 return str
597
598
599
600 dvar = d.getVar('PKGD', True)
601
602 fs_perms_table = {}
603
604 # By default all of the standard directories specified in
605 # bitbake.conf will get 0755 root:root.
606 target_path_vars = [ 'base_prefix',
607 'prefix',
608 'exec_prefix',
609 'base_bindir',
610 'base_sbindir',
611 'base_libdir',
612 'datadir',
613 'sysconfdir',
614 'servicedir',
615 'sharedstatedir',
616 'localstatedir',
617 'infodir',
618 'mandir',
619 'docdir',
620 'bindir',
621 'sbindir',
622 'libexecdir',
623 'libdir',
624 'includedir',
625 'oldincludedir' ]
626
627 for path in target_path_vars:
628 dir = d.getVar(path, True) or ""
629 if dir == "":
630 continue
631 fs_perms_table[dir] = fs_perms_entry(bb.data.expand("%s 0755 root root false - - -" % (dir), d))
632
633 # Now we actually load from the configuration files
634 for conf in get_fs_perms_list(d).split():
635 if os.path.exists(conf):
636 f = open(conf)
637 for line in f:
638 if line.startswith('#'):
639 continue
640 lsplit = line.split()
641 if len(lsplit) == 0:
642 continue
643 if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
644 msg = "Fixup perms: %s invalid line: %s" % (conf, line)
645 package_qa_handle_error("perm-line", msg, d)
646 continue
647 entry = fs_perms_entry(d.expand(line))
648 if entry and entry.path:
649 fs_perms_table[entry.path] = entry
650 f.close()
651
652 # Debug -- list out in-memory table
653 #for dir in fs_perms_table:
654 # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
655
656 # We process links first, so we can go back and fixup directory ownership
657 # for any newly created directories
658 for dir in fs_perms_table:
659 if not fs_perms_table[dir].link:
660 continue
661
662 origin = dvar + dir
663 if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)):
664 continue
665
666 link = fs_perms_table[dir].link
667 if link[0] == "/":
668 target = dvar + link
669 ptarget = link
670 else:
671 target = os.path.join(os.path.dirname(origin), link)
672 ptarget = os.path.join(os.path.dirname(dir), link)
673 if os.path.exists(target):
674 msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
675 package_qa_handle_error("perm-link", msg, d)
676 continue
677
678 # Create path to move directory to, move it, and then setup the symlink
679 bb.utils.mkdirhier(os.path.dirname(target))
680 #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
681 os.rename(origin, target)
682 #bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
683 os.symlink(link, origin)
684
685 for dir in fs_perms_table:
686 if fs_perms_table[dir].link:
687 continue
688
689 origin = dvar + dir
690 if not (cpath.exists(origin) and cpath.isdir(origin)):
691 continue
692
693 fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
694
695 if fs_perms_table[dir].walk == 'true':
696 for root, dirs, files in os.walk(origin):
697 for dr in dirs:
698 each_dir = os.path.join(root, dr)
699 fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
700 for f in files:
701 each_file = os.path.join(root, f)
702 fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
703}
704
705python split_and_strip_files () {
706 import stat, errno
707
708 dvar = d.getVar('PKGD', True)
709 pn = d.getVar('PN', True)
710
711 # We default to '.debug' style
712 if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory':
713 # Single debug-file-directory style debug info
714 debugappend = ".debug"
715 debugdir = ""
716 debuglibdir = "/usr/lib/debug"
717 debugsrcdir = "/usr/src/debug"
718 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-without-src':
719 # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
720 debugappend = ""
721 debugdir = "/.debug"
722 debuglibdir = ""
723 debugsrcdir = ""
724 else:
725 # Original OE-core, a.k.a. ".debug", style debug info
726 debugappend = ""
727 debugdir = "/.debug"
728 debuglibdir = ""
729 debugsrcdir = "/usr/src/debug"
730
731 sourcefile = d.expand("${WORKDIR}/debugsources.list")
732 bb.utils.remove(sourcefile)
733
734 os.chdir(dvar)
735
736 # Return type (bits):
737 # 0 - not elf
738 # 1 - ELF
739 # 2 - stripped
740 # 4 - executable
741 # 8 - shared library
742 # 16 - kernel module
743 def isELF(path):
744 type = 0
745 ret, result = oe.utils.getstatusoutput("file '%s'" % path)
746
747 if ret:
748 msg = "split_and_strip_files: 'file %s' failed" % path
749 package_qa_handle_error("split-strip", msg, d)
750 return type
751
752 # Not stripped
753 if "ELF" in result:
754 type |= 1
755 if "not stripped" not in result:
756 type |= 2
757 if "executable" in result:
758 type |= 4
759 if "shared" in result:
760 type |= 8
761 return type
762
763
764 #
765 # First lets figure out all of the files we may have to process ... do this only once!
766 #
767 elffiles = {}
768 symlinks = {}
769 hardlinks = {}
770 kernmods = []
771 libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir", True))
772 baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir", True))
773 if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1') and \
774 (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
775 for root, dirs, files in cpath.walk(dvar):
776 for f in files:
777 file = os.path.join(root, f)
778 if file.endswith(".ko") and file.find("/lib/modules/") != -1:
779 kernmods.append(file)
780 continue
781
782 # Skip debug files
783 if debugappend and file.endswith(debugappend):
784 continue
785 if debugdir and debugdir in os.path.dirname(file[len(dvar):]):
786 continue
787
788 try:
789 ltarget = cpath.realpath(file, dvar, False)
790 s = cpath.lstat(ltarget)
791 except OSError as e:
792 (err, strerror) = e.args
793 if err != errno.ENOENT:
794 raise
795 # Skip broken symlinks
796 continue
797 if not s:
798 continue
799 # Check its an excutable
800 if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
801 or ((file.startswith(libdir) or file.startswith(baselibdir)) and ".so" in f):
802 # If it's a symlink, and points to an ELF file, we capture the readlink target
803 if cpath.islink(file):
804 target = os.readlink(file)
805 if isELF(ltarget):
806 #bb.note("Sym: %s (%d)" % (ltarget, isELF(ltarget)))
807 symlinks[file] = target
808 continue
809 # It's a file (or hardlink), not a link
810 # ...but is it ELF, and is it already stripped?
811 elf_file = isELF(file)
812 if elf_file & 1:
813 if elf_file & 2:
814 if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
815 bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
816 else:
817 msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
818 package_qa_handle_error("already-stripped", msg, d)
819 continue
820 # Check if it's a hard link to something else
821 if s.st_nlink > 1:
822 file_reference = "%d_%d" % (s.st_dev, s.st_ino)
823 # Hard link to something else
824 hardlinks[file] = file_reference
825 continue
826 elffiles[file] = elf_file
827
828 #
829 # First lets process debug splitting
830 #
831 if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1'):
832 hardlinkmap = {}
833 # For hardlinks, process only one of the files
834 for file in hardlinks:
835 file_reference = hardlinks[file]
836 if file_reference not in hardlinkmap:
837 # If this is a new file, add it as a reference, and
838 # update it's type, so we can fall through and split
839 elffiles[file] = isELF(file)
840 hardlinkmap[file_reference] = file
841
842 for file in elffiles:
843 src = file[len(dvar):]
844 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
845 fpath = dvar + dest
846
847 # Split the file...
848 bb.utils.mkdirhier(os.path.dirname(fpath))
849 #bb.note("Split %s -> %s" % (file, fpath))
850 # Only store off the hard link reference if we successfully split!
851 splitdebuginfo(file, fpath, debugsrcdir, sourcefile, d)
852
853 # Hardlink our debug symbols to the other hardlink copies
854 for file in hardlinks:
855 if file not in elffiles:
856 src = file[len(dvar):]
857 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
858 fpath = dvar + dest
859 file_reference = hardlinks[file]
860 target = hardlinkmap[file_reference][len(dvar):]
861 ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend
862 bb.utils.mkdirhier(os.path.dirname(fpath))
863 #bb.note("Link %s -> %s" % (fpath, ftarget))
864 os.link(ftarget, fpath)
865
866 # Create symlinks for all cases we were able to split symbols
867 for file in symlinks:
868 src = file[len(dvar):]
869 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
870 fpath = dvar + dest
871 # Skip it if the target doesn't exist
872 try:
873 s = os.stat(fpath)
874 except OSError as e:
875 (err, strerror) = e.args
876 if err != errno.ENOENT:
877 raise
878 continue
879
880 ltarget = symlinks[file]
881 lpath = os.path.dirname(ltarget)
882 lbase = os.path.basename(ltarget)
883 ftarget = ""
884 if lpath and lpath != ".":
885 ftarget += lpath + debugdir + "/"
886 ftarget += lbase + debugappend
887 if lpath.startswith(".."):
888 ftarget = os.path.join("..", ftarget)
889 bb.utils.mkdirhier(os.path.dirname(fpath))
890 #bb.note("Symlink %s -> %s" % (fpath, ftarget))
891 os.symlink(ftarget, fpath)
892
893 # Process the debugsrcdir if requested...
894 # This copies and places the referenced sources for later debugging...
895 copydebugsources(debugsrcdir, d)
896 #
897 # End of debug splitting
898 #
899
900 #
901 # Now lets go back over things and strip them
902 #
903 if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
904 strip = d.getVar("STRIP", True)
905 sfiles = []
906 for file in elffiles:
907 elf_file = int(elffiles[file])
908 #bb.note("Strip %s" % file)
909 sfiles.append((file, elf_file, strip))
910 for f in kernmods:
911 sfiles.append((f, 16, strip))
912
913
914 import multiprocessing
915 nproc = multiprocessing.cpu_count()
916 pool = bb.utils.multiprocessingpool(nproc)
917 processed = list(pool.imap(oe.package.runstrip, sfiles))
918 pool.close()
919 pool.join()
920
921 #
922 # End of strip
923 #
924}
925
926python populate_packages () {
927 import glob, re
928
929 workdir = d.getVar('WORKDIR', True)
930 outdir = d.getVar('DEPLOY_DIR', True)
931 dvar = d.getVar('PKGD', True)
932 packages = d.getVar('PACKAGES', True)
933 pn = d.getVar('PN', True)
934
935 bb.utils.mkdirhier(outdir)
936 os.chdir(dvar)
937
938 # Sanity check PACKAGES for duplicates and for LICENSE_EXCLUSION
939 # Sanity should be moved to sanity.bbclass once we have the infrastucture
940 package_list = []
941
942 for pkg in packages.split():
943 if d.getVar('LICENSE_EXCLUSION-' + pkg, True):
944 msg = "%s has an incompatible license. Excluding from packaging." % pkg
945 package_qa_handle_error("incompatible-license", msg, d)
946 if pkg in package_list:
947 msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
948 package_qa_handle_error("packages-list", msg, d)
949 else:
950 package_list.append(pkg)
951 d.setVar('PACKAGES', ' '.join(package_list))
952 pkgdest = d.getVar('PKGDEST', True)
953
954 seen = []
955
956 # os.mkdir masks the permissions with umask so we have to unset it first
957 oldumask = os.umask(0)
958
959 for pkg in package_list:
960 root = os.path.join(pkgdest, pkg)
961 bb.utils.mkdirhier(root)
962
963 filesvar = d.getVar('FILES_%s' % pkg, True) or ""
964 if "//" in filesvar:
965 msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
966 package_qa_handle_error("files-invalid", msg, d)
967 filesvar.replace("//", "/")
968 files = filesvar.split()
969 for file in files:
970 if os.path.isabs(file):
971 file = '.' + file
972 if not file.startswith("./"):
973 file = './' + file
974 if not cpath.islink(file):
975 if cpath.isdir(file):
976 newfiles = [ os.path.join(file,x) for x in os.listdir(file) ]
977 if newfiles:
978 files += newfiles
979 continue
980 globbed = glob.glob(file)
981 if globbed:
982 if [ file ] != globbed:
983 files += globbed
984 continue
985 if (not cpath.islink(file)) and (not cpath.exists(file)):
986 continue
987 if file in seen:
988 continue
989 seen.append(file)
990
991 if d.getVar('LICENSE_EXCLUSION-' + pkg, True):
992 continue
993
994 def mkdir(src, dest, p):
995 src = os.path.join(src, p)
996 dest = os.path.join(dest, p)
997 fstat = cpath.stat(src)
998 os.mkdir(dest, fstat.st_mode)
999 os.chown(dest, fstat.st_uid, fstat.st_gid)
1000 if p not in seen:
1001 seen.append(p)
1002 cpath.updatecache(dest)
1003
1004 def mkdir_recurse(src, dest, paths):
1005 if cpath.exists(dest + '/' + paths):
1006 return
1007 while paths.startswith("./"):
1008 paths = paths[2:]
1009 p = "."
1010 for c in paths.split("/"):
1011 p = os.path.join(p, c)
1012 if not cpath.exists(os.path.join(dest, p)):
1013 mkdir(src, dest, p)
1014
1015 if cpath.isdir(file) and not cpath.islink(file):
1016 mkdir_recurse(dvar, root, file)
1017 continue
1018
1019 mkdir_recurse(dvar, root, os.path.dirname(file))
1020 fpath = os.path.join(root,file)
1021 if not cpath.islink(file):
1022 os.link(file, fpath)
1023 fstat = cpath.stat(file)
1024 os.chmod(fpath, fstat.st_mode)
1025 os.chown(fpath, fstat.st_uid, fstat.st_gid)
1026 continue
1027 ret = bb.utils.copyfile(file, fpath)
1028 if ret is False or ret == 0:
1029 raise bb.build.FuncFailed("File population failed")
1030
1031 os.umask(oldumask)
1032 os.chdir(workdir)
1033
1034 unshipped = []
1035 for root, dirs, files in cpath.walk(dvar):
1036 dir = root[len(dvar):]
1037 if not dir:
1038 dir = os.sep
1039 for f in (files + dirs):
1040 path = os.path.join(dir, f)
1041 if ('.' + path) not in seen:
1042 unshipped.append(path)
1043
1044 if unshipped != []:
1045 msg = pn + ": Files/directories were installed but not shipped"
1046 if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
1047 bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
1048 else:
1049 for f in unshipped:
1050 msg = msg + "\n " + f
1051 package_qa_handle_error("installed-vs-shipped", msg, d)
1052}
1053populate_packages[dirs] = "${D}"
1054
1055python package_fixsymlinks () {
1056 import errno
1057 pkgdest = d.getVar('PKGDEST', True)
1058 packages = d.getVar("PACKAGES").split()
1059
1060 dangling_links = {}
1061 pkg_files = {}
1062 for pkg in packages:
1063 dangling_links[pkg] = []
1064 pkg_files[pkg] = []
1065 inst_root = os.path.join(pkgdest, pkg)
1066 for path in pkgfiles[pkg]:
1067 rpath = path[len(inst_root):]
1068 pkg_files[pkg].append(rpath)
1069 rtarget = cpath.realpath(path, inst_root, True, assume_dir = True)
1070 if not cpath.lexists(rtarget):
1071 dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):]))
1072
1073 newrdepends = {}
1074 for pkg in dangling_links:
1075 for l in dangling_links[pkg]:
1076 found = False
1077 bb.debug(1, "%s contains dangling link %s" % (pkg, l))
1078 for p in packages:
1079 if l in pkg_files[p]:
1080 found = True
1081 bb.debug(1, "target found in %s" % p)
1082 if p == pkg:
1083 break
1084 if pkg not in newrdepends:
1085 newrdepends[pkg] = []
1086 newrdepends[pkg].append(p)
1087 break
1088 if found == False:
1089 bb.note("%s contains dangling symlink to %s" % (pkg, l))
1090
1091 for pkg in newrdepends:
1092 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
1093 for p in newrdepends[pkg]:
1094 if p not in rdepends:
1095 rdepends[p] = []
1096 d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
1097}
1098
1099PKGDESTWORK = "${WORKDIR}/pkgdata"
1100
1101python emit_pkgdata() {
1102 from glob import glob
1103
1104 def write_if_exists(f, pkg, var):
1105 def encode(str):
1106 import codecs
1107 c = codecs.getencoder("string_escape")
1108 return c(str)[0]
1109
1110 val = d.getVar('%s_%s' % (var, pkg), True)
1111 if val:
1112 f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
1113 return
1114 val = d.getVar('%s' % (var), True)
1115 if val:
1116 f.write('%s: %s\n' % (var, encode(val)))
1117 return
1118
1119 def get_directory_size(dir):
1120 if os.listdir(dir):
1121 with os.popen('du -sk %s' % dir) as f:
1122 size = int(f.readlines()[0].split('\t')[0])
1123 else:
1124 size = 0
1125 return size
1126
1127 def write_extra_pkgs(variants, pn, packages, pkgdatadir):
1128 for variant in variants:
1129 with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
1130 fd.write("PACKAGES: %s\n" % ' '.join(
1131 map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
1132
1133 def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
1134 for variant in variants:
1135 for pkg in packages.split():
1136 ml_pkg = "%s-%s" % (variant, pkg)
1137 subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
1138 with open(subdata_file, 'w') as fd:
1139 fd.write("PKG_%s: %s" % (ml_pkg, pkg))
1140
1141 packages = d.getVar('PACKAGES', True)
1142 pkgdest = d.getVar('PKGDEST', True)
1143 pkgdatadir = d.getVar('PKGDESTWORK', True)
1144
1145 # Take shared lock since we're only reading, not writing
1146 lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
1147
1148 data_file = pkgdatadir + d.expand("/${PN}" )
1149 f = open(data_file, 'w')
1150 f.write("PACKAGES: %s\n" % packages)
1151 f.close()
1152
1153 pn = d.getVar('PN', True)
1154 global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS', True) or "").split()
1155 variants = (d.getVar('MULTILIB_VARIANTS', True) or "").split()
1156
1157 if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
1158 write_extra_pkgs(variants, pn, packages, pkgdatadir)
1159
1160 if (bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d)):
1161 write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
1162
1163 workdir = d.getVar('WORKDIR', True)
1164
1165 for pkg in packages.split():
1166 items = {}
1167 for files_list in pkgfiles[pkg]:
1168 item_name = os.path.basename(files_list)
1169 item_path = os.path.dirname(files_list)
1170 if item_path not in items:
1171 items[item_path] = []
1172 items[item_path].append(item_name)
1173 subdata_file = pkgdatadir + "/runtime/%s" % pkg
1174
1175 pkgval = d.getVar('PKG_%s' % pkg, True)
1176 if pkgval is None:
1177 pkgval = pkg
1178 d.setVar('PKG_%s' % pkg, pkg)
1179
1180 d.setVar('FILES_INFO', str(items))
1181
1182 sf = open(subdata_file, 'w')
1183 write_if_exists(sf, pkg, 'PN')
1184 write_if_exists(sf, pkg, 'PV')
1185 write_if_exists(sf, pkg, 'PR')
1186 write_if_exists(sf, pkg, 'PKGV')
1187 write_if_exists(sf, pkg, 'PKGR')
1188 write_if_exists(sf, pkg, 'LICENSE')
1189 write_if_exists(sf, pkg, 'DESCRIPTION')
1190 write_if_exists(sf, pkg, 'SUMMARY')
1191 write_if_exists(sf, pkg, 'RDEPENDS')
1192 write_if_exists(sf, pkg, 'RPROVIDES')
1193 write_if_exists(sf, pkg, 'RRECOMMENDS')
1194 write_if_exists(sf, pkg, 'RSUGGESTS')
1195 write_if_exists(sf, pkg, 'RREPLACES')
1196 write_if_exists(sf, pkg, 'RCONFLICTS')
1197 write_if_exists(sf, pkg, 'SECTION')
1198 write_if_exists(sf, pkg, 'PKG')
1199 write_if_exists(sf, pkg, 'ALLOW_EMPTY')
1200 write_if_exists(sf, pkg, 'FILES')
1201 write_if_exists(sf, pkg, 'pkg_postinst')
1202 write_if_exists(sf, pkg, 'pkg_postrm')
1203 write_if_exists(sf, pkg, 'pkg_preinst')
1204 write_if_exists(sf, pkg, 'pkg_prerm')
1205 write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
1206 write_if_exists(sf, pkg, 'FILES_INFO')
1207 for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg, True) or "").split():
1208 write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
1209
1210 write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
1211 for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg, True) or "").split():
1212 write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
1213
1214 sf.write('%s_%s: %s\n' % ('PKGSIZE', pkg, get_directory_size(pkgdest + "/%s" % pkg)))
1215 sf.close()
1216
1217 # Symlinks needed for reverse lookups (from the final package name)
1218 subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
1219 oe.path.symlink("../runtime/%s" % pkg, subdata_sym, True)
1220
1221 allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg, True)
1222 if not allow_empty:
1223 allow_empty = d.getVar('ALLOW_EMPTY', True)
1224 root = "%s/%s" % (pkgdest, pkg)
1225 os.chdir(root)
1226 g = glob('*')
1227 if g or allow_empty == "1":
1228 packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
1229 open(packagedfile, 'w').close()
1230
1231 if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
1232 write_extra_runtime_pkgs(variants, packages, pkgdatadir)
1233
1234 if bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d):
1235 write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
1236
1237 bb.utils.unlockfile(lf)
1238}
1239emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse"
1240
1241ldconfig_postinst_fragment() {
1242if [ x"$D" = "x" ]; then
1243 if [ -x /sbin/ldconfig ]; then /sbin/ldconfig ; fi
1244fi
1245}
1246
1247RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/bin/rpmdeps-oecore --macros ${STAGING_LIBDIR_NATIVE}/rpm/macros --define '_rpmfc_magic_path ${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc' --rpmpopt ${STAGING_LIBDIR_NATIVE}/rpm/rpmpopt"
1248
1249# Collect perfile run-time dependency metadata
1250# Output:
1251# FILERPROVIDESFLIST_pkg - list of all files w/ deps
1252# FILERPROVIDES_filepath_pkg - per file dep
1253#
1254# FILERDEPENDSFLIST_pkg - list of all files w/ deps
1255# FILERDEPENDS_filepath_pkg - per file dep
1256
1257python package_do_filedeps() {
1258 if d.getVar('SKIP_FILEDEPS', True) == '1':
1259 return
1260
1261 pkgdest = d.getVar('PKGDEST', True)
1262 packages = d.getVar('PACKAGES', True)
1263 rpmdeps = d.getVar('RPMDEPS', True)
1264
1265 def chunks(files, n):
1266 return [files[i:i+n] for i in range(0, len(files), n)]
1267
1268 pkglist = []
1269 for pkg in packages.split():
1270 if d.getVar('SKIP_FILEDEPS_' + pkg, True) == '1':
1271 continue
1272 if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-'):
1273 continue
1274 for files in chunks(pkgfiles[pkg], 100):
1275 pkglist.append((pkg, files, rpmdeps, pkgdest))
1276
1277 import multiprocessing
1278 nproc = multiprocessing.cpu_count()
1279 pool = bb.utils.multiprocessingpool(nproc)
1280 processed = list(pool.imap(oe.package.filedeprunner, pkglist))
1281 pool.close()
1282 pool.join()
1283
1284 provides_files = {}
1285 requires_files = {}
1286
1287 for result in processed:
1288 (pkg, provides, requires) = result
1289
1290 if pkg not in provides_files:
1291 provides_files[pkg] = []
1292 if pkg not in requires_files:
1293 requires_files[pkg] = []
1294
1295 for file in provides:
1296 provides_files[pkg].append(file)
1297 key = "FILERPROVIDES_" + file + "_" + pkg
1298 d.setVar(key, " ".join(provides[file]))
1299
1300 for file in requires:
1301 requires_files[pkg].append(file)
1302 key = "FILERDEPENDS_" + file + "_" + pkg
1303 d.setVar(key, " ".join(requires[file]))
1304
1305 for pkg in requires_files:
1306 d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files[pkg]))
1307 for pkg in provides_files:
1308 d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
1309}
1310
1311SHLIBSDIRS = "${PKGDATA_DIR}/${MLPREFIX}shlibs"
1312SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs"
1313
1314python package_do_shlibs() {
1315 import re, pipes
1316
1317 exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', 0)
1318 if exclude_shlibs:
1319 bb.note("not generating shlibs")
1320 return
1321
1322 lib_re = re.compile("^.*\.so")
1323 libdir_re = re.compile(".*/%s$" % d.getVar('baselib', True))
1324
1325 packages = d.getVar('PACKAGES', True)
1326 targetos = d.getVar('TARGET_OS', True)
1327
1328 workdir = d.getVar('WORKDIR', True)
1329
1330 ver = d.getVar('PKGV', True)
1331 if not ver:
1332 msg = "PKGV not defined"
1333 package_qa_handle_error("pkgv-undefined", msg, d)
1334 return
1335
1336 pkgdest = d.getVar('PKGDEST', True)
1337
1338 shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
1339 shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
1340
1341 # Take shared lock since we're only reading, not writing
1342 lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
1343
1344 def linux_so(file):
1345 needs_ldconfig = False
1346 cmd = d.getVar('OBJDUMP', True) + " -p " + pipes.quote(file) + " 2>/dev/null"
1347 fd = os.popen(cmd)
1348 lines = fd.readlines()
1349 fd.close()
1350 for l in lines:
1351 m = re.match("\s+NEEDED\s+([^\s]*)", l)
1352 if m:
1353 if m.group(1) not in needed[pkg]:
1354 needed[pkg].append(m.group(1))
1355 m = re.match("\s+SONAME\s+([^\s]*)", l)
1356 if m:
1357 this_soname = m.group(1)
1358 if not this_soname in sonames:
1359 # if library is private (only used by package) then do not build shlib for it
1360 if not private_libs or -1 == private_libs.find(this_soname):
1361 sonames.append(this_soname)
1362 if libdir_re.match(os.path.dirname(file)):
1363 needs_ldconfig = True
1364 if snap_symlinks and (os.path.basename(file) != this_soname):
1365 renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
1366 return needs_ldconfig
1367
1368 def darwin_so(file):
1369 if not os.path.exists(file):
1370 return
1371
1372 def get_combinations(base):
1373 #
1374 # Given a base library name, find all combinations of this split by "." and "-"
1375 #
1376 combos = []
1377 options = base.split(".")
1378 for i in range(1, len(options) + 1):
1379 combos.append(".".join(options[0:i]))
1380 options = base.split("-")
1381 for i in range(1, len(options) + 1):
1382 combos.append("-".join(options[0:i]))
1383 return combos
1384
1385 if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg'):
1386 # Drop suffix
1387 name = os.path.basename(file).rsplit(".",1)[0]
1388 # Find all combinations
1389 combos = get_combinations(name)
1390 for combo in combos:
1391 if not combo in sonames:
1392 sonames.append(combo)
1393 if file.endswith('.dylib') or file.endswith('.so'):
1394 lafile = file.replace(os.path.join(pkgdest, pkg), d.getVar('PKGD', True))
1395 # Drop suffix
1396 lafile = lafile.rsplit(".",1)[0]
1397 lapath = os.path.dirname(lafile)
1398 lafile = os.path.basename(lafile)
1399 # Find all combinations
1400 combos = get_combinations(lafile)
1401 for combo in combos:
1402 if os.path.exists(lapath + '/' + combo + '.la'):
1403 break
1404 lafile = lapath + '/' + combo + '.la'
1405
1406 #bb.note("Foo2: %s" % lafile)
1407 #bb.note("Foo %s" % file)
1408 if os.path.exists(lafile):
1409 fd = open(lafile, 'r')
1410 lines = fd.readlines()
1411 fd.close()
1412 for l in lines:
1413 m = re.match("\s*dependency_libs=\s*'(.*)'", l)
1414 if m:
1415 deps = m.group(1).split(" ")
1416 for dep in deps:
1417 #bb.note("Trying %s for %s" % (dep, pkg))
1418 name = None
1419 if dep.endswith(".la"):
1420 name = os.path.basename(dep).replace(".la", "")
1421 elif dep.startswith("-l"):
1422 name = dep.replace("-l", "lib")
1423 if pkg not in needed:
1424 needed[pkg] = []
1425 if name and name not in needed[pkg]:
1426 needed[pkg].append(name)
1427 #bb.note("Adding %s for %s" % (name, pkg))
1428
1429 if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS', True) == "1":
1430 snap_symlinks = True
1431 else:
1432 snap_symlinks = False
1433
1434 if (d.getVar('USE_LDCONFIG', True) or "1") == "1":
1435 use_ldconfig = True
1436 else:
1437 use_ldconfig = False
1438
1439 needed = {}
1440 shlib_provider = {}
1441 for pkg in packages.split():
1442 private_libs = d.getVar('PRIVATE_LIBS_' + pkg, True) or d.getVar('PRIVATE_LIBS', True)
1443 needs_ldconfig = False
1444 bb.debug(2, "calculating shlib provides for %s" % pkg)
1445
1446 pkgver = d.getVar('PKGV_' + pkg, True)
1447 if not pkgver:
1448 pkgver = d.getVar('PV_' + pkg, True)
1449 if not pkgver:
1450 pkgver = ver
1451
1452 needed[pkg] = []
1453 sonames = list()
1454 renames = list()
1455 for file in pkgfiles[pkg]:
1456 soname = None
1457 if cpath.islink(file):
1458 continue
1459 if targetos == "darwin" or targetos == "darwin8":
1460 darwin_so(file)
1461 elif os.access(file, os.X_OK) or lib_re.match(file):
1462 ldconfig = linux_so(file)
1463 needs_ldconfig = needs_ldconfig or ldconfig
1464 for (old, new) in renames:
1465 bb.note("Renaming %s to %s" % (old, new))
1466 os.rename(old, new)
1467 shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
1468 shver_file = os.path.join(shlibswork_dir, pkg + ".ver")
1469 if len(sonames):
1470 fd = open(shlibs_file, 'w')
1471 for s in sonames:
1472 fd.write(s + '\n')
1473 shlib_provider[s] = (pkg, pkgver)
1474 fd.close()
1475 fd = open(shver_file, 'w')
1476 fd.write(pkgver + '\n')
1477 fd.close()
1478 if needs_ldconfig and use_ldconfig:
1479 bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
1480 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
1481 if not postinst:
1482 postinst = '#!/bin/sh\n'
1483 postinst += d.getVar('ldconfig_postinst_fragment', True)
1484 d.setVar('pkg_postinst_%s' % pkg, postinst)
1485
1486 list_re = re.compile('^(.*)\.list$')
1487 # Go from least to most specific since the last one found wins
1488 for dir in reversed(shlibs_dirs):
1489 if not os.path.exists(dir):
1490 continue
1491 for file in os.listdir(dir):
1492 m = list_re.match(file)
1493 if m:
1494 dep_pkg = m.group(1)
1495 fd = open(os.path.join(dir, file))
1496 lines = fd.readlines()
1497 fd.close()
1498 ver_file = os.path.join(dir, dep_pkg + '.ver')
1499 lib_ver = None
1500 if os.path.exists(ver_file):
1501 fd = open(ver_file)
1502 lib_ver = fd.readline().rstrip()
1503 fd.close()
1504 for l in lines:
1505 shlib_provider[l.rstrip()] = (dep_pkg, lib_ver)
1506
1507 bb.utils.unlockfile(lf)
1508
1509 assumed_libs = d.getVar('ASSUME_SHLIBS', True)
1510 if assumed_libs:
1511 for e in assumed_libs.split():
1512 l, dep_pkg = e.split(":")
1513 lib_ver = None
1514 dep_pkg = dep_pkg.rsplit("_", 1)
1515 if len(dep_pkg) == 2:
1516 lib_ver = dep_pkg[1]
1517 dep_pkg = dep_pkg[0]
1518 shlib_provider[l] = (dep_pkg, lib_ver)
1519
1520 for pkg in packages.split():
1521 bb.debug(2, "calculating shlib requirements for %s" % pkg)
1522
1523 deps = list()
1524 for n in needed[pkg]:
1525 if n in shlib_provider.keys():
1526 (dep_pkg, ver_needed) = shlib_provider[n]
1527
1528 bb.debug(2, '%s: Dependency %s requires package %s' % (pkg, n, dep_pkg))
1529
1530 if dep_pkg == pkg:
1531 continue
1532
1533 if ver_needed:
1534 dep = "%s (>= %s)" % (dep_pkg, ver_needed)
1535 else:
1536 dep = dep_pkg
1537 if not dep in deps:
1538 deps.append(dep)
1539 else:
1540 bb.note("Couldn't find shared library provider for %s" % n)
1541
1542 deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
1543 if os.path.exists(deps_file):
1544 os.remove(deps_file)
1545 if len(deps):
1546 fd = open(deps_file, 'w')
1547 for dep in deps:
1548 fd.write(dep + '\n')
1549 fd.close()
1550}
1551
1552python package_do_pkgconfig () {
1553 import re
1554
1555 packages = d.getVar('PACKAGES', True)
1556 workdir = d.getVar('WORKDIR', True)
1557 pkgdest = d.getVar('PKGDEST', True)
1558
1559 shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
1560 shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
1561
1562 pc_re = re.compile('(.*)\.pc$')
1563 var_re = re.compile('(.*)=(.*)')
1564 field_re = re.compile('(.*): (.*)')
1565
1566 pkgconfig_provided = {}
1567 pkgconfig_needed = {}
1568 for pkg in packages.split():
1569 pkgconfig_provided[pkg] = []
1570 pkgconfig_needed[pkg] = []
1571 for file in pkgfiles[pkg]:
1572 m = pc_re.match(file)
1573 if m:
1574 pd = bb.data.init()
1575 name = m.group(1)
1576 pkgconfig_provided[pkg].append(name)
1577 if not os.access(file, os.R_OK):
1578 continue
1579 f = open(file, 'r')
1580 lines = f.readlines()
1581 f.close()
1582 for l in lines:
1583 m = var_re.match(l)
1584 if m:
1585 name = m.group(1)
1586 val = m.group(2)
1587 pd.setVar(name, pd.expand(val))
1588 continue
1589 m = field_re.match(l)
1590 if m:
1591 hdr = m.group(1)
1592 exp = bb.data.expand(m.group(2), pd)
1593 if hdr == 'Requires':
1594 pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
1595
1596 # Take shared lock since we're only reading, not writing
1597 lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
1598
1599 for pkg in packages.split():
1600 pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
1601 if pkgconfig_provided[pkg] != []:
1602 f = open(pkgs_file, 'w')
1603 for p in pkgconfig_provided[pkg]:
1604 f.write('%s\n' % p)
1605 f.close()
1606
1607 # Go from least to most specific since the last one found wins
1608 for dir in reversed(shlibs_dirs):
1609 if not os.path.exists(dir):
1610 continue
1611 for file in os.listdir(dir):
1612 m = re.match('^(.*)\.pclist$', file)
1613 if m:
1614 pkg = m.group(1)
1615 fd = open(os.path.join(dir, file))
1616 lines = fd.readlines()
1617 fd.close()
1618 pkgconfig_provided[pkg] = []
1619 for l in lines:
1620 pkgconfig_provided[pkg].append(l.rstrip())
1621
1622 for pkg in packages.split():
1623 deps = []
1624 for n in pkgconfig_needed[pkg]:
1625 found = False
1626 for k in pkgconfig_provided.keys():
1627 if n in pkgconfig_provided[k]:
1628 if k != pkg and not (k in deps):
1629 deps.append(k)
1630 found = True
1631 if found == False:
1632 bb.note("couldn't find pkgconfig module '%s' in any package" % n)
1633 deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
1634 if len(deps):
1635 fd = open(deps_file, 'w')
1636 for dep in deps:
1637 fd.write(dep + '\n')
1638 fd.close()
1639
1640 bb.utils.unlockfile(lf)
1641}
1642
1643def read_libdep_files(d):
1644 pkglibdeps = {}
1645 packages = d.getVar('PACKAGES', True).split()
1646 for pkg in packages:
1647 pkglibdeps[pkg] = {}
1648 for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
1649 depsfile = d.expand("${PKGDEST}/" + pkg + extension)
1650 if os.access(depsfile, os.R_OK):
1651 fd = open(depsfile)
1652 lines = fd.readlines()
1653 fd.close()
1654 for l in lines:
1655 l.rstrip()
1656 deps = bb.utils.explode_dep_versions2(l)
1657 for dep in deps:
1658 if not dep in pkglibdeps[pkg]:
1659 pkglibdeps[pkg][dep] = deps[dep]
1660 return pkglibdeps
1661
1662python read_shlibdeps () {
1663 pkglibdeps = read_libdep_files(d)
1664
1665 packages = d.getVar('PACKAGES', True).split()
1666 for pkg in packages:
1667 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
1668 for dep in pkglibdeps[pkg]:
1669 # Add the dep if it's not already there, or if no comparison is set
1670 if dep not in rdepends:
1671 rdepends[dep] = []
1672 for v in pkglibdeps[pkg][dep]:
1673 if v not in rdepends[dep]:
1674 rdepends[dep].append(v)
1675 d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
1676}
1677
1678python package_depchains() {
1679 """
1680 For a given set of prefix and postfix modifiers, make those packages
1681 RRECOMMENDS on the corresponding packages for its RDEPENDS.
1682
1683 Example: If package A depends upon package B, and A's .bb emits an
1684 A-dev package, this would make A-dev Recommends: B-dev.
1685
1686 If only one of a given suffix is specified, it will take the RRECOMMENDS
1687 based on the RDEPENDS of *all* other packages. If more than one of a given
1688 suffix is specified, its will only use the RDEPENDS of the single parent
1689 package.
1690 """
1691
1692 packages = d.getVar('PACKAGES', True)
1693 postfixes = (d.getVar('DEPCHAIN_POST', True) or '').split()
1694 prefixes = (d.getVar('DEPCHAIN_PRE', True) or '').split()
1695
1696 def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
1697
1698 #bb.note('depends for %s is %s' % (base, depends))
1699 rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "")
1700
1701 for depend in depends:
1702 if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
1703 #bb.note("Skipping %s" % depend)
1704 continue
1705 if depend.endswith('-dev'):
1706 depend = depend[:-4]
1707 if depend.endswith('-dbg'):
1708 depend = depend[:-4]
1709 pkgname = getname(depend, suffix)
1710 #bb.note("Adding %s for %s" % (pkgname, depend))
1711 if pkgname not in rreclist and pkgname != pkg:
1712 rreclist[pkgname] = []
1713
1714 #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
1715 d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
1716
1717 def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
1718
1719 #bb.note('rdepends for %s is %s' % (base, rdepends))
1720 rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "")
1721
1722 for depend in rdepends:
1723 if depend.find('virtual-locale-') != -1:
1724 #bb.note("Skipping %s" % depend)
1725 continue
1726 if depend.endswith('-dev'):
1727 depend = depend[:-4]
1728 if depend.endswith('-dbg'):
1729 depend = depend[:-4]
1730 pkgname = getname(depend, suffix)
1731 #bb.note("Adding %s for %s" % (pkgname, depend))
1732 if pkgname not in rreclist and pkgname != pkg:
1733 rreclist[pkgname] = []
1734
1735 #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
1736 d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
1737
1738 def add_dep(list, dep):
1739 if dep not in list:
1740 list.append(dep)
1741
1742 depends = []
1743 for dep in bb.utils.explode_deps(d.getVar('DEPENDS', True) or ""):
1744 add_dep(depends, dep)
1745
1746 rdepends = []
1747 for pkg in packages.split():
1748 for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg, True) or ""):
1749 add_dep(rdepends, dep)
1750
1751 #bb.note('rdepends is %s' % rdepends)
1752
1753 def post_getname(name, suffix):
1754 return '%s%s' % (name, suffix)
1755 def pre_getname(name, suffix):
1756 return '%s%s' % (suffix, name)
1757
1758 pkgs = {}
1759 for pkg in packages.split():
1760 for postfix in postfixes:
1761 if pkg.endswith(postfix):
1762 if not postfix in pkgs:
1763 pkgs[postfix] = {}
1764 pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname)
1765
1766 for prefix in prefixes:
1767 if pkg.startswith(prefix):
1768 if not prefix in pkgs:
1769 pkgs[prefix] = {}
1770 pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname)
1771
1772 if "-dbg" in pkgs:
1773 pkglibdeps = read_libdep_files(d)
1774 pkglibdeplist = []
1775 for pkg in pkglibdeps:
1776 for k in pkglibdeps[pkg]:
1777 add_dep(pkglibdeplist, k)
1778 # FIXME this should not look at PN once all task recipes inherit from task.bbclass
1779 dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS', True) == '1') or (d.getVar('PN', True) or '').startswith('packagegroup-'))
1780
1781 for suffix in pkgs:
1782 for pkg in pkgs[suffix]:
1783 if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'):
1784 continue
1785 (base, func) = pkgs[suffix][pkg]
1786 if suffix == "-dev":
1787 pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
1788 elif suffix == "-dbg":
1789 if not dbgdefaultdeps:
1790 pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d)
1791 continue
1792 if len(pkgs[suffix]) == 1:
1793 pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
1794 else:
1795 rdeps = []
1796 for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base, True) or ""):
1797 add_dep(rdeps, dep)
1798 pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
1799}
1800
1801# Since bitbake can't determine which variables are accessed during package
1802# iteration, we need to list them here:
1803PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM"
1804
1805def gen_packagevar(d):
1806 ret = []
1807 pkgs = (d.getVar("PACKAGES", True) or "").split()
1808 vars = (d.getVar("PACKAGEVARS", True) or "").split()
1809 for p in pkgs:
1810 for v in vars:
1811 ret.append(v + "_" + p)
1812
1813 # Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
1814 # affected recipes.
1815 ret.append('LICENSE_EXCLUSION-%s' % p)
1816 return " ".join(ret)
1817
1818PACKAGE_PREPROCESS_FUNCS ?= ""
1819# Functions for setting up PKGD
1820PACKAGEBUILDPKGD ?= " \
1821 perform_packagecopy \
1822 ${PACKAGE_PREPROCESS_FUNCS} \
1823 split_and_strip_files \
1824 fixup_perms \
1825 "
1826# Functions which split PKGD up into separate packages
1827PACKAGESPLITFUNCS ?= " \
1828 package_do_split_locales \
1829 populate_packages"
1830# Functions which process metadata based on split packages
1831PACKAGEFUNCS += " \
1832 package_fixsymlinks \
1833 package_name_hook \
1834 package_do_filedeps \
1835 package_do_shlibs \
1836 package_do_pkgconfig \
1837 read_shlibdeps \
1838 package_depchains \
1839 emit_pkgdata"
1840
1841python do_package () {
1842 # Change the following version to cause sstate to invalidate the package
1843 # cache. This is useful if an item this class depends on changes in a
1844 # way that the output of this class changes. rpmdeps is a good example
1845 # as any change to rpmdeps requires this to be rerun.
1846 # PACKAGE_BBCLASS_VERSION = "1"
1847
1848 # Init cachedpath
1849 global cpath
1850 cpath = oe.cachedpath.CachedPath()
1851
1852 ###########################################################################
1853 # Sanity test the setup
1854 ###########################################################################
1855
1856 packages = (d.getVar('PACKAGES', True) or "").split()
1857 if len(packages) < 1:
1858 bb.debug(1, "No packages to build, skipping do_package")
1859 return
1860
1861 workdir = d.getVar('WORKDIR', True)
1862 outdir = d.getVar('DEPLOY_DIR', True)
1863 dest = d.getVar('D', True)
1864 dvar = d.getVar('PKGD', True)
1865 pn = d.getVar('PN', True)
1866
1867 if not workdir or not outdir or not dest or not dvar or not pn:
1868 msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
1869 package_qa_handle_error("var-undefined", msg, d)
1870 return
1871
1872 bb.build.exec_func("package_get_auto_pr", d)
1873
1874 ###########################################################################
1875 # Optimisations
1876 ###########################################################################
1877
1878 # Contunually rexpanding complex expressions is inefficient, particularly when
1879 # we write to the datastore and invalidate the expansion cache. This code
1880 # pre-expands some frequently used variables
1881
1882 def expandVar(x, d):
1883 d.setVar(x, d.getVar(x, True))
1884
1885 for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
1886 expandVar(x, d)
1887
1888 ###########################################################################
1889 # Setup PKGD (from D)
1890 ###########################################################################
1891
1892 for f in (d.getVar('PACKAGEBUILDPKGD', True) or '').split():
1893 bb.build.exec_func(f, d)
1894
1895 ###########################################################################
1896 # Split up PKGD into PKGDEST
1897 ###########################################################################
1898
1899 cpath = oe.cachedpath.CachedPath()
1900
1901 for f in (d.getVar('PACKAGESPLITFUNCS', True) or '').split():
1902 bb.build.exec_func(f, d)
1903
1904 ###########################################################################
1905 # Process PKGDEST
1906 ###########################################################################
1907
1908 # Build global list of files in each split package
1909 global pkgfiles
1910 pkgfiles = {}
1911 packages = d.getVar('PACKAGES', True).split()
1912 pkgdest = d.getVar('PKGDEST', True)
1913 for pkg in packages:
1914 pkgfiles[pkg] = []
1915 for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
1916 for file in files:
1917 pkgfiles[pkg].append(walkroot + os.sep + file)
1918
1919 for f in (d.getVar('PACKAGEFUNCS', True) or '').split():
1920 bb.build.exec_func(f, d)
1921}
1922
1923do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
1924do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
1925addtask package before do_build after do_install
1926
1927PACKAGELOCK = "${STAGING_DIR}/package-output.lock"
1928SSTATETASKS += "do_package"
1929do_package[sstate-name] = "package"
1930do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
1931do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
1932do_package[sstate-lockfile-shared] = "${PACKAGELOCK}"
1933do_package_setscene[dirs] = "${STAGING_DIR}"
1934
1935python do_package_setscene () {
1936 sstate_setscene(d)
1937}
1938addtask do_package_setscene
1939
1940do_packagedata () {
1941 :
1942}
1943
1944addtask packagedata before do_build after do_package
1945
1946SSTATETASKS += "do_packagedata"
1947do_packagedata[sstate-name] = "packagedata"
1948do_packagedata[sstate-inputdirs] = "${PKGDESTWORK}"
1949do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
1950do_packagedata[sstate-lockfile-shared] = "${PACKAGELOCK}"
1951do_packagedata[stamp-extra-info] = "${MACHINE}"
1952
1953python do_packagedata_setscene () {
1954 sstate_setscene(d)
1955}
1956addtask do_packagedata_setscene
1957
1958# Dummy task to mark when all packaging is complete
1959do_package_write () {
1960 :
1961}
1962do_package_write[noexec] = "1"
1963PACKAGERDEPTASK = "do_package_write"
1964do_build[recrdeptask] += "${PACKAGERDEPTASK}"
1965addtask package_write before do_build after do_packagedata
1966
1967#
1968# Helper functions for the package writing classes
1969#
1970
1971def mapping_rename_hook(d):
1972 """
1973 Rewrite variables to account for package renaming in things
1974 like debian.bbclass or manual PKG variable name changes
1975 """
1976 pkg = d.getVar("PKG", True)
1977 runtime_mapping_rename("RDEPENDS", pkg, d)
1978 runtime_mapping_rename("RRECOMMENDS", pkg, d)
1979 runtime_mapping_rename("RSUGGESTS", pkg, d)
1980 runtime_mapping_rename("RPROVIDES", pkg, d)
1981 runtime_mapping_rename("RREPLACES", pkg, d)
1982 runtime_mapping_rename("RCONFLICTS", pkg, d)
1983
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
new file mode 100644
index 0000000000..6a8e080138
--- /dev/null
+++ b/meta/classes/package_deb.bbclass
@@ -0,0 +1,449 @@
1#
2# Copyright 2006-2008 OpenedHand Ltd.
3#
4
5inherit package
6
7IMAGE_PKGTYPE ?= "deb"
8
9DPKG_ARCH ?= "${TARGET_ARCH}"
10
11PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
12
13APTCONF_TARGET = "${WORKDIR}"
14
15APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
16
17#
18# Update the Packages index files in ${DEPLOY_DIR_DEB}
19#
20package_update_index_deb () {
21
22 local debarchs=""
23
24 if [ ! -z "${DEPLOY_KEEP_PACKAGES}" ]; then
25 return
26 fi
27
28 for arch in ${PACKAGE_ARCHS} ${SDK_PACKAGE_ARCHS}; do
29 if [ -e ${DEPLOY_DIR_DEB}/$arch ]; then
30 debarchs="$debarchs $arch"
31 fi
32 done
33
34 found=0
35 for arch in $debarchs; do
36 if [ ! -d ${DEPLOY_DIR_DEB}/$arch ]; then
37 continue;
38 fi
39 cd ${DEPLOY_DIR_DEB}/$arch
40 dpkg-scanpackages . | gzip > Packages.gz
41 echo "Label: $arch" > Release
42 found=1
43 done
44 if [ "$found" != "1" ]; then
45 bbfatal "There are no packages in ${DEPLOY_DIR_DEB}!"
46 fi
47}
48
49#
50# install a bunch of packages using apt
51# the following shell variables needs to be set before calling this func:
52# INSTALL_ROOTFS_DEB - install root dir
53# INSTALL_BASEARCH_DEB - install base architecutre
54# INSTALL_ARCHS_DEB - list of available archs
55# INSTALL_PACKAGES_NORMAL_DEB - packages to be installed
56# INSTALL_PACKAGES_ATTEMPTONLY_DEB - packages attemped to be installed only
57# INSTALL_PACKAGES_LINGUAS_DEB - additional packages for uclibc
58# INSTALL_TASK_DEB - task name
59
60package_install_internal_deb () {
61
62 local target_rootfs="${INSTALL_ROOTFS_DEB}"
63 local dpkg_arch="${INSTALL_BASEARCH_DEB}"
64 local archs="${INSTALL_ARCHS_DEB}"
65 local package_to_install="${INSTALL_PACKAGES_NORMAL_DEB}"
66 local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_DEB}"
67 local package_linguas="${INSTALL_PACKAGES_LINGUAS_DEB}"
68 local task="${INSTALL_TASK_DEB}"
69
70 mkdir -p ${APTCONF_TARGET}/apt
71 rm -f ${APTCONF_TARGET}/apt/sources.list.rev
72 rm -f ${APTCONF_TARGET}/apt/preferences
73
74 priority=1
75 for arch in $archs; do
76 if [ ! -d ${DEPLOY_DIR_DEB}/$arch ]; then
77 continue;
78 fi
79
80 echo "deb file:${DEPLOY_DIR_DEB}/$arch/ ./" >> ${APTCONF_TARGET}/apt/sources.list.rev
81 (echo "Package: *"
82 echo "Pin: release l=$arch"
83 echo "Pin-Priority: $(expr 800 + $priority)"
84 echo) >> ${APTCONF_TARGET}/apt/preferences
85 priority=$(expr $priority + 5)
86 done
87
88 for pkg in ${PACKAGE_EXCLUDE}; do
89 (echo "Package: $pkg"
90 echo "Pin: release *"
91 echo "Pin-Priority: -1"
92 echo) >> ${APTCONF_TARGET}/apt/preferences
93 done
94
95 tac ${APTCONF_TARGET}/apt/sources.list.rev > ${APTCONF_TARGET}/apt/sources.list
96
97 # The params in deb package control don't allow character `_', so
98 # change the arch's `_' to `-' in it.
99 dpkg_arch=`echo ${dpkg_arch} | sed 's/_/-/g'`
100 cat "${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample" \
101 | sed -e "s#Architecture \".*\";#Architecture \"${dpkg_arch}\";#" \
102 | sed -e "s:#ROOTFS#:${target_rootfs}:g" \
103 | sed -e "s:#APTCONF#:${APTCONF_TARGET}/apt:g" \
104 > "${APTCONF_TARGET}/apt/apt.conf"
105
106 export APT_CONFIG="${APTCONF_TARGET}/apt/apt.conf"
107
108 mkdir -p ${target_rootfs}/var/lib/dpkg/info
109 mkdir -p ${target_rootfs}/var/lib/dpkg/updates
110
111 > ${target_rootfs}/var/lib/dpkg/status
112 > ${target_rootfs}/var/lib/dpkg/available
113
114 apt-get update
115
116 if [ ! -z "${package_linguas}" ]; then
117 for i in ${package_linguas}; do
118 apt-get ${APT_ARGS} install $i --force-yes --allow-unauthenticated
119 if [ $? -ne 0 ]; then
120 exit 1
121 fi
122 done
123 fi
124
125 # normal install
126 if [ ! -z "${package_to_install}" ]; then
127 apt-get ${APT_ARGS} install ${package_to_install} --force-yes --allow-unauthenticated
128 if [ $? -ne 0 ]; then
129 exit 1
130 fi
131
132 # Attempt to correct the probable broken dependencies in place.
133 apt-get ${APT_ARGS} -f install
134 if [ $? -ne 0 ]; then
135 exit 1
136 fi
137 fi
138
139 rm -f `dirname ${BB_LOGFILE}`/log.do_${task}-attemptonly.${PID}
140 if [ ! -z "${package_attemptonly}" ]; then
141 for i in ${package_attemptonly}; do
142 apt-get ${APT_ARGS} install $i --force-yes --allow-unauthenticated >> `dirname ${BB_LOGFILE}`/log.do_${task}-attemptonly.${PID} 2>&1 || true
143 done
144 fi
145
146 find ${target_rootfs} -name \*.dpkg-new | for i in `cat`; do
147 mv $i `echo $i | sed -e's,\.dpkg-new$,,'`
148 done
149
150 # Mark all packages installed
151 sed -i -e "s/Status: install ok unpacked/Status: install ok installed/;" ${target_rootfs}/var/lib/dpkg/status
152}
153
154deb_log_check() {
155 target="$1"
156 lf_path="$2"
157
158 lf_txt="`cat $lf_path`"
159 for keyword_die in "^E:"
160 do
161 if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
162 then
163 echo "log_check: There were error messages in the logfile"
164 printf "log_check: Matched keyword: [$keyword_die]\n\n"
165 echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
166 echo ""
167 do_exit=1
168 fi
169 done
170 test "$do_exit" = 1 && exit 1
171 true
172}
173
174python do_package_deb () {
175 import re, copy
176 import textwrap
177 import subprocess
178
179 workdir = d.getVar('WORKDIR', True)
180 if not workdir:
181 bb.error("WORKDIR not defined, unable to package")
182 return
183
184 outdir = d.getVar('PKGWRITEDIRDEB', True)
185 if not outdir:
186 bb.error("PKGWRITEDIRDEB not defined, unable to package")
187 return
188
189 packages = d.getVar('PACKAGES', True)
190 if not packages:
191 bb.debug(1, "PACKAGES not defined, nothing to package")
192 return
193
194 tmpdir = d.getVar('TMPDIR', True)
195
196 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
197 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
198
199 if packages == []:
200 bb.debug(1, "No packages; nothing to do")
201 return
202
203 pkgdest = d.getVar('PKGDEST', True)
204
205 for pkg in packages.split():
206 localdata = bb.data.createCopy(d)
207 root = "%s/%s" % (pkgdest, pkg)
208
209 lf = bb.utils.lockfile(root + ".lock")
210
211 localdata.setVar('ROOT', '')
212 localdata.setVar('ROOT_%s' % pkg, root)
213 pkgname = localdata.getVar('PKG_%s' % pkg, True)
214 if not pkgname:
215 pkgname = pkg
216 localdata.setVar('PKG', pkgname)
217
218 localdata.setVar('OVERRIDES', pkg)
219
220 bb.data.update_data(localdata)
221 basedir = os.path.join(os.path.dirname(root))
222
223 pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH', True))
224 bb.utils.mkdirhier(pkgoutdir)
225
226 os.chdir(root)
227 from glob import glob
228 g = glob('*')
229 try:
230 del g[g.index('DEBIAN')]
231 del g[g.index('./DEBIAN')]
232 except ValueError:
233 pass
234 if not g and localdata.getVar('ALLOW_EMPTY') != "1":
235 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
236 bb.utils.unlockfile(lf)
237 continue
238
239 controldir = os.path.join(root, 'DEBIAN')
240 bb.utils.mkdirhier(controldir)
241 os.chmod(controldir, 0755)
242 try:
243 ctrlfile = open(os.path.join(controldir, 'control'), 'w')
244 # import codecs
245 # ctrlfile = codecs.open("someFile", "w", "utf-8")
246 except OSError:
247 bb.utils.unlockfile(lf)
248 raise bb.build.FuncFailed("unable to open control file for writing.")
249
250 fields = []
251 pe = d.getVar('PKGE', True)
252 if pe and int(pe) > 0:
253 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
254 else:
255 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
256 fields.append(["Description: %s\n", ['DESCRIPTION']])
257 fields.append(["Section: %s\n", ['SECTION']])
258 fields.append(["Priority: %s\n", ['PRIORITY']])
259 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
260 fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
261 fields.append(["OE: %s\n", ['PN']])
262 fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
263 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
264
265 # Package, Version, Maintainer, Description - mandatory
266 # Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
267
268
269 def pullData(l, d):
270 l2 = []
271 for i in l:
272 data = d.getVar(i, True)
273 if data is None:
274 raise KeyError(f)
275 if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH', True) == 'all':
276 data = 'all'
277 elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
278 # The params in deb package control don't allow character
279 # `_', so change the arch's `_' to `-'. Such as `x86_64'
280 # -->`x86-64'
281 data = data.replace('_', '-')
282 l2.append(data)
283 return l2
284
285 ctrlfile.write("Package: %s\n" % pkgname)
286 # check for required fields
287 try:
288 for (c, fs) in fields:
289 for f in fs:
290 if localdata.getVar(f) is None:
291 raise KeyError(f)
292 # Special behavior for description...
293 if 'DESCRIPTION' in fs:
294 summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
295 ctrlfile.write('Description: %s\n' % unicode(summary))
296 description = localdata.getVar('DESCRIPTION', True) or "."
297 description = textwrap.dedent(description).strip()
298 if '\\n' in description:
299 # Manually indent
300 for t in description.split('\\n'):
301 # We don't limit the width when manually indent, but we do
302 # need the textwrap.fill() to set the initial_indent and
303 # subsequent_indent, so set a large width
304 ctrlfile.write('%s\n' % unicode(textwrap.fill(t, width=100000, initial_indent=' ', subsequent_indent=' ')))
305 else:
306 # Auto indent
307 ctrlfile.write('%s\n' % unicode(textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' ')))
308
309 else:
310 ctrlfile.write(unicode(c % tuple(pullData(fs, localdata))))
311 except KeyError:
312 import sys
313 (type, value, traceback) = sys.exc_info()
314 bb.utils.unlockfile(lf)
315 ctrlfile.close()
316 raise bb.build.FuncFailed("Missing field for deb generation: %s" % value)
317 # more fields
318
319 mapping_rename_hook(localdata)
320
321 def debian_cmp_remap(var):
322 # dpkg does not allow for '(' or ')' in a dependency name
323 # replace these instances with '__' and '__'
324 #
325 # In debian '>' and '<' do not mean what it appears they mean
326 # '<' = less or equal
327 # '>' = greater or equal
328 # adjust these to the '<<' and '>>' equivalents
329 #
330 for dep in var:
331 if '(' in dep:
332 newdep = dep.replace('(', '__')
333 newdep = newdep.replace(')', '__')
334 if newdep != dep:
335 var[newdep] = var[dep]
336 del var[dep]
337 for dep in var:
338 for i, v in enumerate(var[dep]):
339 if (v or "").startswith("< "):
340 var[dep][i] = var[dep][i].replace("< ", "<< ")
341 elif (v or "").startswith("> "):
342 var[dep][i] = var[dep][i].replace("> ", ">> ")
343
344 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
345 debian_cmp_remap(rdepends)
346 for dep in rdepends:
347 if '*' in dep:
348 del rdepends[dep]
349 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
350 debian_cmp_remap(rrecommends)
351 for dep in rrecommends:
352 if '*' in dep:
353 del rrecommends[dep]
354 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
355 debian_cmp_remap(rsuggests)
356 rprovides = bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or "")
357 debian_cmp_remap(rprovides)
358 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
359 debian_cmp_remap(rreplaces)
360 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
361 debian_cmp_remap(rconflicts)
362 if rdepends:
363 ctrlfile.write("Depends: %s\n" % unicode(bb.utils.join_deps(rdepends)))
364 if rsuggests:
365 ctrlfile.write("Suggests: %s\n" % unicode(bb.utils.join_deps(rsuggests)))
366 if rrecommends:
367 ctrlfile.write("Recommends: %s\n" % unicode(bb.utils.join_deps(rrecommends)))
368 if rprovides:
369 ctrlfile.write("Provides: %s\n" % unicode(bb.utils.join_deps(rprovides)))
370 if rreplaces:
371 ctrlfile.write("Replaces: %s\n" % unicode(bb.utils.join_deps(rreplaces)))
372 if rconflicts:
373 ctrlfile.write("Conflicts: %s\n" % unicode(bb.utils.join_deps(rconflicts)))
374 ctrlfile.close()
375
376 for script in ["preinst", "postinst", "prerm", "postrm"]:
377 scriptvar = localdata.getVar('pkg_%s' % script, True)
378 if not scriptvar:
379 continue
380 try:
381 scriptfile = open(os.path.join(controldir, script), 'w')
382 except OSError:
383 bb.utils.unlockfile(lf)
384 raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
385 scriptfile.write("#!/bin/sh\n")
386 scriptfile.write(scriptvar)
387 scriptfile.close()
388 os.chmod(os.path.join(controldir, script), 0755)
389
390 conffiles_str = localdata.getVar("CONFFILES", True)
391 if conffiles_str:
392 try:
393 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
394 except OSError:
395 bb.utils.unlockfile(lf)
396 raise bb.build.FuncFailed("unable to open conffiles for writing.")
397 for f in conffiles_str.split():
398 if os.path.exists(oe.path.join(root, f)):
399 conffiles.write('%s\n' % f)
400 conffiles.close()
401
402 os.chdir(basedir)
403 ret = subprocess.call("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH", True), root, pkgoutdir), shell=True)
404 if ret != 0:
405 bb.utils.prunedir(controldir)
406 bb.utils.unlockfile(lf)
407 raise bb.build.FuncFailed("dpkg-deb execution failed")
408
409 bb.utils.prunedir(controldir)
410 bb.utils.unlockfile(lf)
411}
412
413SSTATETASKS += "do_package_write_deb"
414do_package_write_deb[sstate-name] = "deploy-deb"
415do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
416do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
417
418python do_package_write_deb_setscene () {
419 sstate_setscene(d)
420}
421addtask do_package_write_deb_setscene
422
423python () {
424 if d.getVar('PACKAGES', True) != '':
425 deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
426 d.appendVarFlag('do_package_write_deb', 'depends', deps)
427 d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
428
429 # Map TARGET_ARCH to Debian's ideas about architectures
430 darch = d.getVar('DPKG_ARCH', True)
431 if darch in ["x86", "i486", "i586", "i686", "pentium"]:
432 d.setVar('DPKG_ARCH', 'i386')
433 elif darch == "arm":
434 d.setVar('DPKG_ARCH', 'armel')
435}
436
437python do_package_write_deb () {
438 bb.build.exec_func("read_subpackage_metadata", d)
439 bb.build.exec_func("do_package_deb", d)
440}
441do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
442do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
443do_package_write_deb[umask] = "022"
444addtask package_write_deb before do_package_write after do_packagedata do_package
445
446
447PACKAGEINDEXES += "[ ! -e ${DEPLOY_DIR_DEB} ] || package_update_index_deb;"
448PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
449PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
new file mode 100644
index 0000000000..a633cfcc76
--- /dev/null
+++ b/meta/classes/package_ipk.bbclass
@@ -0,0 +1,443 @@
1inherit package
2
3IMAGE_PKGTYPE ?= "ipk"
4
5IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
6IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
7
8PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
9
10# Program to be used to build opkg packages
11OPKGBUILDCMD ??= "opkg-build"
12
13OPKG_ARGS = "-f $INSTALL_CONF_IPK -o $INSTALL_ROOTFS_IPK --force_postinstall --prefer-arch-to-version"
14OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
15OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE', True) or "").split())][(d.getVar("PACKAGE_EXCLUDE", True) or "") != ""]}"
16
17OPKGLIBDIR = "${localstatedir}/lib"
18
19package_tryout_install_multilib_ipk() {
20 #try install multilib
21 multilib_tryout_dirs=""
22 for item in ${MULTILIB_VARIANTS}; do
23 local target_rootfs="${MULTILIB_TEMP_ROOTFS}/${item}"
24 local ipkg_args="${OPKG_ARGS}"
25 local selected_pkg=""
26 local pkgname_prefix="${item}-"
27 local pkgname_len=${#pkgname_prefix}
28 for pkg in ${INSTALL_PACKAGES_MULTILIB_IPK}; do
29 local pkgname=$(echo $pkg | awk -v var=$pkgname_len '{ pkgname=substr($1, 1, var); print pkgname; }' )
30 if [ ${pkgname} = ${pkgname_prefix} ]; then
31 selected_pkg="${selected_pkg} ${pkg}"
32 fi
33 done
34 if [ ! -z "${selected_pkg}" ]; then
35 rm -f ${target_rootfs}
36 mkdir -p ${target_rootfs}/${opkglibdir}
37 opkg-cl ${ipkg_args} update
38 opkg-cl ${ipkg_args} install ${selected_pkg}
39 multilib_tryout_dirs="${multilib_tryout_dirs} ${target_rootfs}"
40 fi
41 done
42}
43
44split_multilib_packages() {
45 INSTALL_PACKAGES_NORMAL_IPK=""
46 INSTALL_PACKAGES_MULTILIB_IPK=""
47 for pkg in ${INSTALL_PACKAGES_IPK}; do
48 is_multilib=0
49 for item in ${MULTILIB_VARIANTS}; do
50 local pkgname_prefix="${item}-"
51 local pkgname_len=${#pkgname_prefix}
52 local pkgname=$(echo $pkg | awk -v var=$pkgname_len '{ pkgname=substr($1, 1, var); print pkgname; }' )
53 if [ ${pkgname} = ${pkgname_prefix} ]; then
54 is_multilib=1
55 break
56 fi
57 done
58
59 if [ ${is_multilib} = 0 ]; then
60 INSTALL_PACKAGES_NORMAL_IPK="${INSTALL_PACKAGES_NORMAL_IPK} ${pkg}"
61 else
62 INSTALL_PACKAGES_MULTILIB_IPK="${INSTALL_PACKAGES_MULTILIB_IPK} ${pkg}"
63 fi
64 done
65}
66
67#
68# install a bunch of packages using opkg
69# the following shell variables needs to be set before calling this func:
70# INSTALL_ROOTFS_IPK - install root dir
71# INSTALL_CONF_IPK - configuration file
72# INSTALL_PACKAGES_IPK - packages to be installed
73# INSTALL_PACKAGES_ATTEMPTONLY_IPK - packages attemped to be installed only
74# INSTALL_PACKAGES_LINGUAS_IPK - additional packages for uclibc
75# INSTALL_TASK_IPK - task name
76
77package_install_internal_ipk() {
78
79 local target_rootfs="${INSTALL_ROOTFS_IPK}"
80 local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_IPK}"
81 local package_linguas="${INSTALL_PACKAGES_LINGUAS_IPK}"
82 local task="${INSTALL_TASK_IPK}"
83
84 split_multilib_packages
85
86 local package_to_install="${INSTALL_PACKAGES_NORMAL_IPK}"
87 local package_multilib="${INSTALL_PACKAGES_MULTILIB_IPK}"
88
89 mkdir -p ${target_rootfs}${OPKGLIBDIR}/opkg
90 touch ${target_rootfs}${OPKGLIBDIR}/opkg/status
91
92 local ipkg_args="${OPKG_ARGS}"
93
94 opkg-cl ${ipkg_args} update
95
96 for i in ${package_linguas}; do
97 opkg-cl ${ipkg_args} install $i
98 done
99
100 if [ ! -z "${package_to_install}" ]; then
101 opkg-cl ${ipkg_args} install ${package_to_install}
102 fi
103
104 if [ ! -z "${package_attemptonly}" ]; then
105 opkg-cl ${ipkg_args} install ${package_attemptonly} > "`dirname ${BB_LOGFILE}`/log.do_${task}_attemptonly.${PID}" || true
106 fi
107
108 package_tryout_install_multilib_ipk
109 if [ ! -z "${MULTILIB_CHECK_FILE}" ]; then
110 #sanity check
111 multilib_sanity_check ${target_rootfs} ${multilib_tryout_dirs} || exit 1
112 fi
113
114 if [ ! -z "${package_multilib}" ]; then
115 opkg-cl ${ipkg_args} install ${package_multilib}
116 fi
117}
118
119ipk_log_check() {
120 target="$1"
121 lf_path="$2"
122
123 lf_txt="`cat $lf_path`"
124 for keyword_die in "exit 1" "Collected errors" ERR Fail
125 do
126 if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
127 then
128 echo "log_check: There were error messages in the logfile"
129 printf "log_check: Matched keyword: [$keyword_die]\n\n"
130 echo "$lf_txt" | grep -v log_check | grep -C 5 "$keyword_die"
131 echo ""
132 do_exit=1
133 fi
134 done
135 test "$do_exit" = 1 && exit 1
136 true
137}
138
139#
140# Update the Packages index files in ${DEPLOY_DIR_IPK}
141#
142package_update_index_ipk () {
143 #set -x
144
145 ipkgarchs="${ALL_MULTILIB_PACKAGE_ARCHS} ${SDK_PACKAGE_ARCHS}"
146
147 if [ ! -z "${DEPLOY_KEEP_PACKAGES}" ]; then
148 return
149 fi
150
151 packagedirs="${DEPLOY_DIR_IPK}"
152 for arch in $ipkgarchs; do
153 packagedirs="$packagedirs ${DEPLOY_DIR_IPK}/$arch"
154 done
155
156 multilib_archs="${MULTILIB_ARCHS}"
157 for arch in $multilib_archs; do
158 packagedirs="$packagedirs ${DEPLOY_DIR_IPK}/$arch"
159 done
160
161 found=0
162 for pkgdir in $packagedirs; do
163 if [ -e $pkgdir/ ]; then
164 found=1
165 touch $pkgdir/Packages
166 flock $pkgdir/Packages.flock -c "opkg-make-index -r $pkgdir/Packages -p $pkgdir/Packages -m $pkgdir/"
167 fi
168 done
169 if [ "$found" != "1" ]; then
170 bbfatal "There are no packages in ${DEPLOY_DIR_IPK}!"
171 fi
172}
173
174#
175# Generate an ipkg conf file ${IPKGCONF_TARGET} suitable for use against
176# the target system and an ipkg conf file ${IPKGCONF_SDK} suitable for
177# use against the host system in sdk builds
178#
179package_generate_ipkg_conf () {
180 package_generate_archlist
181 echo "src oe file:${DEPLOY_DIR_IPK}" >> ${IPKGCONF_SDK}
182 ipkgarchs="${SDK_PACKAGE_ARCHS}"
183 for arch in $ipkgarchs; do
184 if [ -e ${DEPLOY_DIR_IPK}/$arch/Packages ] ; then
185 echo "src oe-$arch file:${DEPLOY_DIR_IPK}/$arch" >> ${IPKGCONF_SDK}
186 fi
187 done
188
189 echo "src oe file:${DEPLOY_DIR_IPK}" >> ${IPKGCONF_TARGET}
190 ipkgarchs="${ALL_MULTILIB_PACKAGE_ARCHS}"
191 for arch in $ipkgarchs; do
192 if [ -e ${DEPLOY_DIR_IPK}/$arch/Packages ] ; then
193 echo "src oe-$arch file:${DEPLOY_DIR_IPK}/$arch" >> ${IPKGCONF_TARGET}
194 fi
195 done
196}
197
198package_generate_archlist () {
199 ipkgarchs="${SDK_PACKAGE_ARCHS}"
200 priority=1
201 for arch in $ipkgarchs; do
202 echo "arch $arch $priority" >> ${IPKGCONF_SDK}
203 priority=$(expr $priority + 5)
204 done
205
206 ipkgarchs="${ALL_MULTILIB_PACKAGE_ARCHS}"
207 priority=1
208 for arch in $ipkgarchs; do
209 echo "arch $arch $priority" >> ${IPKGCONF_TARGET}
210 priority=$(expr $priority + 5)
211 done
212}
213
214python do_package_ipk () {
215 import re, copy
216 import textwrap
217 import subprocess
218
219 workdir = d.getVar('WORKDIR', True)
220 outdir = d.getVar('PKGWRITEDIRIPK', True)
221 tmpdir = d.getVar('TMPDIR', True)
222 pkgdest = d.getVar('PKGDEST', True)
223 if not workdir or not outdir or not tmpdir:
224 bb.error("Variables incorrectly set, unable to package")
225 return
226
227 packages = d.getVar('PACKAGES', True)
228 if not packages or packages == '':
229 bb.debug(1, "No packages; nothing to do")
230 return
231
232 # We're about to add new packages so the index needs to be checked
233 # so remove the appropriate stamp file.
234 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
235 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
236
237 for pkg in packages.split():
238 localdata = bb.data.createCopy(d)
239 root = "%s/%s" % (pkgdest, pkg)
240
241 lf = bb.utils.lockfile(root + ".lock")
242
243 localdata.setVar('ROOT', '')
244 localdata.setVar('ROOT_%s' % pkg, root)
245 pkgname = localdata.getVar('PKG_%s' % pkg, True)
246 if not pkgname:
247 pkgname = pkg
248 localdata.setVar('PKG', pkgname)
249
250 localdata.setVar('OVERRIDES', pkg)
251
252 bb.data.update_data(localdata)
253 basedir = os.path.join(os.path.dirname(root))
254 arch = localdata.getVar('PACKAGE_ARCH', True)
255 pkgoutdir = "%s/%s" % (outdir, arch)
256 bb.utils.mkdirhier(pkgoutdir)
257 os.chdir(root)
258 from glob import glob
259 g = glob('*')
260 try:
261 del g[g.index('CONTROL')]
262 del g[g.index('./CONTROL')]
263 except ValueError:
264 pass
265 if not g and localdata.getVar('ALLOW_EMPTY') != "1":
266 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
267 bb.utils.unlockfile(lf)
268 continue
269
270 controldir = os.path.join(root, 'CONTROL')
271 bb.utils.mkdirhier(controldir)
272 try:
273 ctrlfile = open(os.path.join(controldir, 'control'), 'w')
274 except OSError:
275 bb.utils.unlockfile(lf)
276 raise bb.build.FuncFailed("unable to open control file for writing.")
277
278 fields = []
279 pe = d.getVar('PKGE', True)
280 if pe and int(pe) > 0:
281 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
282 else:
283 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
284 fields.append(["Description: %s\n", ['DESCRIPTION']])
285 fields.append(["Section: %s\n", ['SECTION']])
286 fields.append(["Priority: %s\n", ['PRIORITY']])
287 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
288 fields.append(["License: %s\n", ['LICENSE']])
289 fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
290 fields.append(["OE: %s\n", ['PN']])
291 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
292
293 def pullData(l, d):
294 l2 = []
295 for i in l:
296 l2.append(d.getVar(i, True))
297 return l2
298
299 ctrlfile.write("Package: %s\n" % pkgname)
300 # check for required fields
301 try:
302 for (c, fs) in fields:
303 for f in fs:
304 if localdata.getVar(f) is None:
305 raise KeyError(f)
306 # Special behavior for description...
307 if 'DESCRIPTION' in fs:
308 summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
309 ctrlfile.write('Description: %s\n' % summary)
310 description = localdata.getVar('DESCRIPTION', True) or "."
311 description = textwrap.dedent(description).strip()
312 if '\\n' in description:
313 # Manually indent
314 for t in description.split('\\n'):
315 # We don't limit the width when manually indent, but we do
316 # need the textwrap.fill() to set the initial_indent and
317 # subsequent_indent, so set a large width
318 ctrlfile.write('%s\n' % textwrap.fill(t.strip(), width=100000, initial_indent=' ', subsequent_indent=' '))
319 else:
320 # Auto indent
321 ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
322 else:
323 ctrlfile.write(c % tuple(pullData(fs, localdata)))
324 except KeyError:
325 import sys
326 (type, value, traceback) = sys.exc_info()
327 ctrlfile.close()
328 bb.utils.unlockfile(lf)
329 raise bb.build.FuncFailed("Missing field for ipk generation: %s" % value)
330 # more fields
331
332 mapping_rename_hook(localdata)
333
334 def debian_cmp_remap(var):
335 # In debian '>' and '<' do not mean what it appears they mean
336 # '<' = less or equal
337 # '>' = greater or equal
338 # adjust these to the '<<' and '>>' equivalents
339 #
340 for dep in var:
341 for i, v in enumerate(var[dep]):
342 if (v or "").startswith("< "):
343 var[dep][i] = var[dep][i].replace("< ", "<< ")
344 elif (v or "").startswith("> "):
345 var[dep][i] = var[dep][i].replace("> ", ">> ")
346
347 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
348 debian_cmp_remap(rdepends)
349 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
350 debian_cmp_remap(rrecommends)
351 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
352 debian_cmp_remap(rsuggests)
353 rprovides = bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or "")
354 debian_cmp_remap(rprovides)
355 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
356 debian_cmp_remap(rreplaces)
357 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
358 debian_cmp_remap(rconflicts)
359
360 if rdepends:
361 ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
362 if rsuggests:
363 ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
364 if rrecommends:
365 ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
366 if rprovides:
367 ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
368 if rreplaces:
369 ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
370 if rconflicts:
371 ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
372 src_uri = localdata.getVar("SRC_URI", True) or "None"
373 if src_uri:
374 src_uri = re.sub("\s+", " ", src_uri)
375 ctrlfile.write("Source: %s\n" % " ".join(src_uri.split()))
376 ctrlfile.close()
377
378 for script in ["preinst", "postinst", "prerm", "postrm"]:
379 scriptvar = localdata.getVar('pkg_%s' % script, True)
380 if not scriptvar:
381 continue
382 try:
383 scriptfile = open(os.path.join(controldir, script), 'w')
384 except OSError:
385 bb.utils.unlockfile(lf)
386 raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
387 scriptfile.write(scriptvar)
388 scriptfile.close()
389 os.chmod(os.path.join(controldir, script), 0755)
390
391 conffiles_str = localdata.getVar("CONFFILES", True)
392 if conffiles_str:
393 try:
394 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
395 except OSError:
396 bb.utils.unlockfile(lf)
397 raise bb.build.FuncFailed("unable to open conffiles for writing.")
398 for f in conffiles_str.split():
399 if os.path.exists(oe.path.join(root, f)):
400 conffiles.write('%s\n' % f)
401 conffiles.close()
402
403 os.chdir(basedir)
404 ret = subprocess.call("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH", True),
405 d.getVar("OPKGBUILDCMD",1), pkg, pkgoutdir), shell=True)
406 if ret != 0:
407 bb.utils.unlockfile(lf)
408 raise bb.build.FuncFailed("opkg-build execution failed")
409
410 bb.utils.prunedir(controldir)
411 bb.utils.unlockfile(lf)
412
413}
414
415SSTATETASKS += "do_package_write_ipk"
416do_package_write_ipk[sstate-name] = "deploy-ipk"
417do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
418do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
419
420python do_package_write_ipk_setscene () {
421 sstate_setscene(d)
422}
423addtask do_package_write_ipk_setscene
424
425python () {
426 if d.getVar('PACKAGES', True) != '':
427 deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
428 d.appendVarFlag('do_package_write_ipk', 'depends', deps)
429 d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
430}
431
432python do_package_write_ipk () {
433 bb.build.exec_func("read_subpackage_metadata", d)
434 bb.build.exec_func("do_package_ipk", d)
435}
436do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
437do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
438do_package_write_ipk[umask] = "022"
439addtask package_write_ipk before do_package_write after do_packagedata do_package
440
441PACKAGEINDEXES += "[ ! -e ${DEPLOY_DIR_IPK} ] || package_update_index_ipk;"
442PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
443PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
new file mode 100644
index 0000000000..36bad09ea1
--- /dev/null
+++ b/meta/classes/package_rpm.bbclass
@@ -0,0 +1,1201 @@
1inherit package
2
3IMAGE_PKGTYPE ?= "rpm"
4
5RPM="rpm"
6RPMBUILD="rpmbuild"
7
8PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
9PKGWRITEDIRSRPM = "${DEPLOY_DIR}/sources/deploy-srpm"
10
11# Maintaining the perfile dependencies has singificant overhead when writing the
12# packages. When set, this value merges them for efficiency.
13MERGEPERFILEDEPS = "1"
14
15#
16# Update the packages indexes ${DEPLOY_DIR_RPM}
17#
18package_update_index_rpm () {
19 if [ ! -z "${DEPLOY_KEEP_PACKAGES}" ]; then
20 return
21 fi
22
23 sdk_archs=`echo "${SDK_PACKAGE_ARCHS}" | tr - _`
24
25 target_archs=""
26 for i in ${MULTILIB_PREFIX_LIST} ; do
27 old_IFS="$IFS"
28 IFS=":"
29 set $i
30 IFS="$old_IFS"
31 shift # remove mlib
32 while [ -n "$1" ]; do
33 target_archs="$target_archs $1"
34 shift
35 done
36 done
37
38 # FIXME stopgap for broken "bitbake package-index" since MULTILIB_PREFIX_LIST isn't set for that
39 if [ "$target_archs" = "" ] ; then
40 target_archs="${ALL_MULTILIB_PACKAGE_ARCHS}"
41 fi
42
43 target_archs=`echo "$target_archs" | tr - _`
44
45 archs=`for arch in $target_archs $sdk_archs ; do
46 echo $arch
47 done | sort | uniq`
48
49 found=0
50 for arch in $archs; do
51 if [ -d ${DEPLOY_DIR_RPM}/$arch ] ; then
52 createrepo --update -q ${DEPLOY_DIR_RPM}/$arch
53 found=1
54 fi
55 done
56 if [ "$found" != "1" ]; then
57 bbfatal "There are no packages in ${DEPLOY_DIR_RPM}!"
58 fi
59}
60
61rpm_log_check() {
62 target="$1"
63 lf_path="$2"
64
65 lf_txt="`cat $lf_path`"
66 for keyword_die in "unpacking of archive failed" "Cannot find package" "exit 1" ERR Fail
67 do
68 if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
69 then
70 echo "log_check: There were error messages in the logfile"
71 printf "log_check: Matched keyword: [$keyword_die]\n\n"
72 echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
73 echo ""
74 do_exit=1
75 fi
76 done
77 test "$do_exit" = 1 && exit 1
78 true
79}
80
81# Translate the RPM/Smart format names to the OE multilib format names
82# Input via stdin (only the first item per line is converted!)
83# Output via stdout
84translate_smart_to_oe() {
85 arg1="$1"
86
87 # Dump installed packages
88 while read pkg arch other ; do
89 found=0
90 if [ -z "$pkg" ]; then
91 continue
92 fi
93 new_pkg=$pkg
94 fixed_arch=`echo "$arch" | tr _ -`
95 for i in ${MULTILIB_PREFIX_LIST} ; do
96 old_IFS="$IFS"
97 IFS=":"
98 set $i
99 IFS="$old_IFS"
100 mlib="$1"
101 shift
102 while [ -n "$1" ]; do
103 cmp_arch=$1
104 shift
105 fixed_cmp_arch=`echo "$cmp_arch" | tr _ -`
106 if [ "$fixed_arch" = "$fixed_cmp_arch" ]; then
107 if [ "$mlib" = "default" ]; then
108 new_pkg="$pkg"
109 new_arch=$cmp_arch
110 else
111 new_pkg="$mlib-$pkg"
112 # We need to strip off the ${mlib}_ prefix on the arch
113 new_arch=${cmp_arch#${mlib}_}
114 fi
115 # Workaround for bug 3565
116 # Simply look to see if we know of a package with that name, if not try again!
117 filename=`ls ${PKGDATA_DIR}/runtime-reverse/$new_pkg 2>/dev/null | head -n 1`
118 if [ -n "$filename" ] ; then
119 found=1
120 break
121 fi
122 # 'real' code
123 # found=1
124 # break
125 fi
126 done
127 if [ "$found" = "1" ] && [ "$fixed_arch" = "$fixed_cmp_arch" ]; then
128 break
129 fi
130 done
131
132 #echo "$pkg -> $new_pkg" >&2
133 if [ "$arg1" = "arch" ]; then
134 echo $new_pkg $new_arch $other
135 elif [ "$arg1" = "file" ]; then
136 echo $new_pkg $other $new_arch
137 else
138 echo $new_pkg $other
139 fi
140 done
141}
142
143# Translate the OE multilib format names to the RPM/Smart format names
144# Input via arguments
145# Ouput via pkgs_to_install
146translate_oe_to_smart() {
147 default_archs=""
148 sdk_mode=""
149 if [ "$1" = "--sdk" ]; then
150 shift
151 sdk_mode="true"
152 # Need to reverse the order of the SDK_ARCHS highest -> lowest priority
153 archs=`echo "${SDK_PACKAGE_ARCHS}" | tr - _`
154 for arch in $archs ; do
155 default_archs="$arch $default_archs"
156 done
157 fi
158
159 attemptonly="Error"
160 if [ "$1" = "--attemptonly" ]; then
161 attemptonly="Warning"
162 shift
163 fi
164
165 # Dump a list of all available packages
166 [ ! -e ${target_rootfs}/install/tmp/fullpkglist.query ] && smart --data-dir=${target_rootfs}/var/lib/smart query --output ${target_rootfs}/install/tmp/fullpkglist.query
167
168 pkgs_to_install=""
169 for pkg in "$@" ; do
170 new_pkg="$pkg"
171 if [ -z "$sdk_mode" ]; then
172 for i in ${MULTILIB_PREFIX_LIST} ; do
173 old_IFS="$IFS"
174 IFS=":"
175 set $i
176 IFS="$old_IFS"
177 mlib="$1"
178 shift
179 if [ "$mlib" = "default" ]; then
180 if [ -z "$default_archs" ]; then
181 default_archs=$@
182 fi
183 continue
184 fi
185 subst=${pkg#${mlib}-}
186 if [ "$subst" != "$pkg" ]; then
187 feeds=$@
188 while [ -n "$1" ]; do
189 arch="$1"
190 arch=`echo "$arch" | tr - _`
191 shift
192 if grep -q '^'$subst'-[^-]*-[^-]*@'$arch'$' ${target_rootfs}/install/tmp/fullpkglist.query ; then
193 new_pkg="$subst@$arch"
194 # First found is best match
195 break
196 fi
197 done
198 if [ "$pkg" = "$new_pkg" ]; then
199 # Failed to translate, package not found!
200 echo "$attemptonly: $pkg not found in the $mlib feeds ($feeds)." >&2
201 if [ "$attemptonly" = "Error" ]; then
202 exit 1
203 fi
204 continue
205 fi
206 fi
207 done
208 fi
209 # Apparently not a multilib package...
210 if [ "$pkg" = "$new_pkg" ]; then
211 default_archs_fixed=`echo "$default_archs" | tr - _`
212 for arch in $default_archs_fixed ; do
213 if grep -q '^'$pkg'-[^-]*-[^-]*@'$arch'$' ${target_rootfs}/install/tmp/fullpkglist.query ; then
214 new_pkg="$pkg@$arch"
215 # First found is best match
216 break
217 fi
218 done
219 if [ "$pkg" = "$new_pkg" ]; then
220 # Failed to translate, package not found!
221 echo "$attemptonly: $pkg not found in the base feeds ($default_archs)." >&2
222 if [ "$attemptonly" = "Error" ]; then
223 exit 1
224 fi
225 continue
226 fi
227 fi
228 #echo "$pkg -> $new_pkg" >&2
229 pkgs_to_install="${pkgs_to_install} ${new_pkg}"
230 done
231 export pkgs_to_install
232}
233
234package_write_smart_config() {
235 # Write common configuration for host and target usage
236 smart --data-dir=$1/var/lib/smart config --set rpm-nolinktos=1
237 smart --data-dir=$1/var/lib/smart config --set rpm-noparentdirs=1
238 for i in ${BAD_RECOMMENDATIONS}; do
239 smart --data-dir=$1/var/lib/smart flag --set ignore-recommends $i
240 done
241}
242
243#
244# Install a bunch of packages using rpm.
245# There are two solutions in an image's FRESH generation:
246# 1) main package solution
247# 2) complementary solution
248#
249# It is different when incremental image generation is enabled:
250# 1) The incremental image generation takes action during the main package
251# installation, the previous installed complementary packages would
252# usually be removed here, and the new complementary ones would be
253# installed in the next step.
254# 2) The complementary would always be installed since it is
255# generated based on the first step's image.
256#
257# the following shell variables needs to be set before calling this func:
258# INSTALL_ROOTFS_RPM - install root dir
259# INSTALL_PLATFORM_RPM - main platform
260# INSTALL_PLATFORM_EXTRA_RPM - extra platform
261# INSTALL_PACKAGES_RPM - packages to be installed
262# INSTALL_PACKAGES_ATTEMPTONLY_RPM - packages attemped to be installed only
263# INSTALL_PACKAGES_LINGUAS_RPM - additional packages for uclibc
264# INSTALL_PROVIDENAME_RPM - content for provide name
265# INSTALL_TASK_RPM - task name
266# INSTALL_COMPLEMENTARY_RPM - 1 to enable complementary package install mode
267
268package_install_internal_rpm () {
269
270 local target_rootfs="$INSTALL_ROOTFS_RPM"
271 local package_to_install="$INSTALL_PACKAGES_RPM"
272 local package_attemptonly="$INSTALL_PACKAGES_ATTEMPTONLY_RPM"
273 local package_linguas="$INSTALL_PACKAGES_LINGUAS_RPM"
274 local providename="$INSTALL_PROVIDENAME_RPM"
275 local task="$INSTALL_TASK_RPM"
276
277 local sdk_mode=""
278 if [ "$1" = "--sdk" ]; then
279 sdk_mode="--sdk"
280 fi
281
282 # Configure internal RPM environment when using Smart
283 export RPM_ETCRPM=${target_rootfs}/etc/rpm
284
285 # Setup temporary directory -- install...
286 rm -rf ${target_rootfs}/install
287 mkdir -p ${target_rootfs}/install/tmp
288
289 channel_priority=5
290 if [ "${INSTALL_COMPLEMENTARY_RPM}" != "1" ] ; then
291 # Setup base system configuration
292 echo "Note: configuring RPM platform settings"
293 mkdir -p ${target_rootfs}/etc/rpm/
294 echo "$INSTALL_PLATFORM_RPM" > ${target_rootfs}/etc/rpm/platform
295
296 if [ ! -z "$INSTALL_PLATFORM_EXTRA_RPM" ]; then
297 for pt in $INSTALL_PLATFORM_EXTRA_RPM ; do
298 channel_priority=$(expr $channel_priority + 5)
299 case $pt in
300 noarch-* | any-* | all-*)
301 pt=$(echo $pt | sed "s,-linux.*$,-linux\.*,")
302 ;;
303 esac
304 echo "$pt" >> ${target_rootfs}/etc/rpm/platform
305 done
306 fi
307
308 # Tell RPM that the "/" directory exist and is available
309 echo "Note: configuring RPM system provides"
310 mkdir -p ${target_rootfs}/etc/rpm/sysinfo
311 echo "/" >${target_rootfs}/etc/rpm/sysinfo/Dirnames
312
313 if [ ! -z "$providename" ]; then
314 cat /dev/null > ${target_rootfs}/etc/rpm/sysinfo/Providename
315 for provide in $providename ; do
316 echo $provide >> ${target_rootfs}/etc/rpm/sysinfo/Providename
317 done
318 fi
319
320 # Configure RPM... we enforce these settings!
321 echo "Note: configuring RPM DB settings"
322 mkdir -p ${target_rootfs}${rpmlibdir}
323 mkdir -p ${target_rootfs}${rpmlibdir}/log
324 # After change the __db.* cache size, log file will not be generated automatically,
325 # that will raise some warnings, so touch a bare log for rpm write into it.
326 touch ${target_rootfs}${rpmlibdir}/log/log.0000000001
327 if [ ! -e ${target_rootfs}${rpmlibdir}/DB_CONFIG ]; then
328 cat > ${target_rootfs}${rpmlibdir}/DB_CONFIG << EOF
329# ================ Environment
330set_data_dir .
331set_create_dir .
332set_lg_dir ./log
333set_tmp_dir ./tmp
334set_flags db_log_autoremove on
335
336# -- thread_count must be >= 8
337set_thread_count 64
338
339# ================ Logging
340
341# ================ Memory Pool
342set_cachesize 0 1048576 0
343set_mp_mmapsize 268435456
344
345# ================ Locking
346set_lk_max_locks 16384
347set_lk_max_lockers 16384
348set_lk_max_objects 16384
349 mutex_set_max 163840
350
351# ================ Replication
352EOF
353 fi
354
355 # Create database so that smart doesn't complain (lazy init)
356 rpm --root $target_rootfs --dbpath /var/lib/rpm -qa > /dev/null
357
358 # Configure smart
359 echo "Note: configuring Smart settings"
360 rm -rf ${target_rootfs}/var/lib/smart
361 smart --data-dir=${target_rootfs}/var/lib/smart config --set rpm-root=${target_rootfs}
362 smart --data-dir=${target_rootfs}/var/lib/smart config --set rpm-dbpath=${rpmlibdir}
363 smart --data-dir=${target_rootfs}/var/lib/smart config --set rpm-extra-macros._var=${localstatedir}
364 smart --data-dir=${target_rootfs}/var/lib/smart config --set rpm-extra-macros._tmppath=/install/tmp
365 package_write_smart_config ${target_rootfs}
366 # Do the following configurations here, to avoid them being saved for field upgrade
367 if [ "x${NO_RECOMMENDATIONS}" = "x1" ]; then
368 smart --data-dir=${target_rootfs}/var/lib/smart config --set ignore-all-recommends=1
369 fi
370 for i in ${PACKAGE_EXCLUDE}; do
371 smart --data-dir=${target_rootfs}/var/lib/smart flag --set exclude-packages $i
372 done
373
374 # Optional debugging
375 #smart --data-dir=${target_rootfs}/var/lib/smart config --set rpm-log-level=debug
376 #smart --data-dir=${target_rootfs}/var/lib/smart config --set rpm-log-file=/tmp/smart-debug-logfile
377
378 # Delay this until later...
379 #smart --data-dir=${target_rootfs}/var/lib/smart channel --add rpmsys type=rpm-sys -y
380
381 for canonical_arch in $INSTALL_PLATFORM_EXTRA_RPM; do
382 arch=$(echo $canonical_arch | sed "s,\([^-]*\)-.*,\1,")
383 if [ -d ${DEPLOY_DIR_RPM}/$arch -a ! -e ${target_rootfs}/install/channel.$arch.stamp ] ; then
384 echo "Note: adding Smart channel $arch ($channel_priority)"
385 smart --data-dir=${target_rootfs}/var/lib/smart channel --add $arch type=rpm-md type=rpm-md baseurl=${DEPLOY_DIR_RPM}/$arch -y
386 smart --data-dir=${target_rootfs}/var/lib/smart channel --set $arch priority=$channel_priority
387 touch ${target_rootfs}/install/channel.$arch.stamp
388 fi
389 channel_priority=$(expr $channel_priority - 5)
390 done
391 fi
392
393 # Construct install scriptlet wrapper
394 # Scripts need to be ordered when executed, this ensures numeric order
395 # If we ever run into needing more the 899 scripts, we'll have to
396 # change num to start with 1000.
397 #
398 cat << EOF > ${WORKDIR}/scriptlet_wrapper
399#!/bin/bash
400
401export PATH="${PATH}"
402export D="${target_rootfs}"
403export OFFLINE_ROOT="\$D"
404export IPKG_OFFLINE_ROOT="\$D"
405export OPKG_OFFLINE_ROOT="\$D"
406export INTERCEPT_DIR="${WORKDIR}/intercept_scripts"
407export NATIVE_ROOT=${STAGING_DIR_NATIVE}
408
409\$2 \$1/\$3 \$4
410if [ \$? -ne 0 ]; then
411 if [ \$4 -eq 1 ]; then
412 mkdir -p \$1/etc/rpm-postinsts
413 num=100
414 while [ -e \$1/etc/rpm-postinsts/\${num}-* ]; do num=\$((num + 1)); done
415 name=\`head -1 \$1/\$3 | cut -d' ' -f 2\`
416 echo "#!\$2" > \$1/etc/rpm-postinsts/\${num}-\${name}
417 echo "# Arg: \$4" >> \$1/etc/rpm-postinsts/\${num}-\${name}
418 cat \$1/\$3 >> \$1/etc/rpm-postinsts/\${num}-\${name}
419 chmod +x \$1/etc/rpm-postinsts/\${num}-\${name}
420 else
421 echo "Error: pre/post remove scriptlet failed"
422 fi
423fi
424EOF
425
426 echo "Note: configuring RPM cross-install scriptlet_wrapper"
427 chmod 0755 ${WORKDIR}/scriptlet_wrapper
428 smart --data-dir=${target_rootfs}/var/lib/smart config --set rpm-extra-macros._cross_scriptlet_wrapper=${WORKDIR}/scriptlet_wrapper
429
430 # Determine what to install
431 translate_oe_to_smart ${sdk_mode} ${package_to_install} ${package_linguas}
432
433 # If incremental install, we need to determine what we've got,
434 # what we need to add, and what to remove...
435 if [ "${INC_RPM_IMAGE_GEN}" = "1" -a "${INSTALL_COMPLEMENTARY_RPM}" != "1" ]; then
436 # Dump the new solution
437 echo "Note: creating install solution for incremental install"
438 smart --data-dir=${target_rootfs}/var/lib/smart install -y --dump ${pkgs_to_install} 2> ${target_rootfs}/../solution.manifest
439 fi
440
441 if [ "${INSTALL_COMPLEMENTARY_RPM}" != "1" ]; then
442 echo "Note: adding Smart RPM DB channel"
443 smart --data-dir=${target_rootfs}/var/lib/smart channel --add rpmsys type=rpm-sys -y
444 fi
445
446 # If incremental install, we need to determine what we've got,
447 # what we need to add, and what to remove...
448 if [ "${INC_RPM_IMAGE_GEN}" = "1" -a "${INSTALL_COMPLEMENTARY_RPM}" != "1" ]; then
449 # First upgrade everything that was previously installed to the latest version
450 echo "Note: incremental update -- upgrade packages in place"
451 smart --data-dir=${target_rootfs}/var/lib/smart upgrade
452
453 # Dump what is already installed
454 echo "Note: dump installed packages for incremental update"
455 smart --data-dir=${target_rootfs}/var/lib/smart query --installed --output ${target_rootfs}/../installed.manifest
456
457 sort ${target_rootfs}/../installed.manifest > ${target_rootfs}/../installed.manifest.sorted
458 sort ${target_rootfs}/../solution.manifest > ${target_rootfs}/../solution.manifest.sorted
459
460 comm -1 -3 ${target_rootfs}/../solution.manifest.sorted ${target_rootfs}/../installed.manifest.sorted \
461 > ${target_rootfs}/../remove.list
462 comm -2 -3 ${target_rootfs}/../solution.manifest.sorted ${target_rootfs}/../installed.manifest.sorted \
463 > ${target_rootfs}/../install.list
464
465 pkgs_to_remove=`cat ${target_rootfs}/../remove.list | xargs echo`
466 pkgs_to_install=`cat ${target_rootfs}/../install.list | xargs echo`
467
468 echo "Note: to be removed: ${pkgs_to_remove}"
469
470 for pkg in ${pkgs_to_remove}; do
471 echo "Debug: What required: $pkg"
472 smart --data-dir=${target_rootfs}/var/lib/smart query $pkg --show-requiredby
473 done
474
475 [ -n "$pkgs_to_remove" ] && smart --data-dir=${target_rootfs}/var/lib/smart remove -y ${pkgs_to_remove}
476 fi
477
478 echo "Note: to be installed: ${pkgs_to_install}"
479 [ -n "$pkgs_to_install" ] && smart --data-dir=${target_rootfs}/var/lib/smart install -y ${pkgs_to_install}
480
481 if [ -n "${package_attemptonly}" ]; then
482 echo "Note: installing attempt only packages..."
483 echo "Attempting $pkgs_to_install"
484 echo "Note: see `dirname ${BB_LOGFILE}`/log.do_${task}_attemptonly.${PID}"
485 translate_oe_to_smart ${sdk_mode} --attemptonly $package_attemptonly
486 echo "Attempting $pkgs_to_install" >> "`dirname ${BB_LOGFILE}`/log.do_${task}_attemptonly.${PID}"
487 smart --data-dir=${target_rootfs}/var/lib/smart install --attempt -y ${pkgs_to_install} >> "`dirname ${BB_LOGFILE}`/log.do_${task}_attemptonly.${PID}" 2>&1 || :
488 fi
489}
490
491# Construct per file dependencies file
492def write_rpm_perfiledata(srcname, d):
493 workdir = d.getVar('WORKDIR', True)
494 packages = d.getVar('PACKAGES', True)
495 pkgd = d.getVar('PKGD', True)
496
497 def dump_filerdeps(varname, outfile, d):
498 outfile.write("#!/usr/bin/env python\n\n")
499 outfile.write("# Dependency table\n")
500 outfile.write('deps = {\n')
501 for pkg in packages.split():
502 dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
503 dependsflist = (d.getVar(dependsflist_key, True) or "")
504 for dfile in dependsflist.split():
505 key = "FILE" + varname + "_" + dfile + "_" + pkg
506 depends_dict = bb.utils.explode_dep_versions(d.getVar(key, True) or "")
507 file = dfile.replace("@underscore@", "_")
508 file = file.replace("@closebrace@", "]")
509 file = file.replace("@openbrace@", "[")
510 file = file.replace("@tab@", "\t")
511 file = file.replace("@space@", " ")
512 file = file.replace("@at@", "@")
513 outfile.write('"' + pkgd + file + '" : "')
514 for dep in depends_dict:
515 ver = depends_dict[dep]
516 if dep and ver:
517 ver = ver.replace("(","")
518 ver = ver.replace(")","")
519 outfile.write(dep + " " + ver + " ")
520 else:
521 outfile.write(dep + " ")
522 outfile.write('",\n')
523 outfile.write('}\n\n')
524 outfile.write("import sys\n")
525 outfile.write("while 1:\n")
526 outfile.write("\tline = sys.stdin.readline().strip()\n")
527 outfile.write("\tif not line:\n")
528 outfile.write("\t\tsys.exit(0)\n")
529 outfile.write("\tif line in deps:\n")
530 outfile.write("\t\tprint(deps[line] + '\\n')\n")
531
532 # OE-core dependencies a.k.a. RPM requires
533 outdepends = workdir + "/" + srcname + ".requires"
534
535 try:
536 dependsfile = open(outdepends, 'w')
537 except OSError:
538 raise bb.build.FuncFailed("unable to open spec file for writing.")
539
540 dump_filerdeps('RDEPENDS', dependsfile, d)
541
542 dependsfile.close()
543 os.chmod(outdepends, 0755)
544
545 # OE-core / RPM Provides
546 outprovides = workdir + "/" + srcname + ".provides"
547
548 try:
549 providesfile = open(outprovides, 'w')
550 except OSError:
551 raise bb.build.FuncFailed("unable to open spec file for writing.")
552
553 dump_filerdeps('RPROVIDES', providesfile, d)
554
555 providesfile.close()
556 os.chmod(outprovides, 0755)
557
558 return (outdepends, outprovides)
559
560
561python write_specfile () {
562 import oe.packagedata
563
564 # append information for logs and patches to %prep
565 def add_prep(d,spec_files_bottom):
566 if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) == 'srpm':
567 spec_files_bottom.append('%%prep -n %s' % d.getVar('PN', True) )
568 spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
569 spec_files_bottom.append('')
570
571 # append the name of tarball to key word 'SOURCE' in xxx.spec.
572 def tail_source(d):
573 if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) == 'srpm':
574 source_list = get_package(d)
575 source_number = 0
576 workdir = d.getVar('WORKDIR', True)
577 for source in source_list:
578 # The rpmbuild doesn't need the root permission, but it needs
579 # to know the file's user and group name, the only user and
580 # group in fakeroot is "root" when working in fakeroot.
581 os.chown("%s/%s" % (workdir, source), 0, 0)
582 spec_preamble_top.append('Source' + str(source_number) + ': %s' % source)
583 source_number += 1
584 # We need a simple way to remove the MLPREFIX from the package name,
585 # and dependency information...
586 def strip_multilib(name, d):
587 multilibs = d.getVar('MULTILIBS', True) or ""
588 for ext in multilibs.split():
589 eext = ext.split(':')
590 if len(eext) > 1 and eext[0] == 'multilib' and name and name.find(eext[1] + '-') >= 0:
591 name = "".join(name.split(eext[1] + '-'))
592 return name
593
594 def strip_multilib_deps(deps, d):
595 depends = bb.utils.explode_dep_versions2(deps or "")
596 newdeps = {}
597 for dep in depends:
598 newdeps[strip_multilib(dep, d)] = depends[dep]
599 return bb.utils.join_deps(newdeps)
600
601# ml = d.getVar("MLPREFIX", True)
602# if ml and name and len(ml) != 0 and name.find(ml) == 0:
603# return ml.join(name.split(ml, 1)[1:])
604# return name
605
606 # In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
607 # This format is similar to OE, however there are restrictions on the
608 # characters that can be in a field. In the Version field, "-"
609 # characters are not allowed. "-" is allowed in the Release field.
610 #
611 # We translate the "-" in the version to a "+", by loading the PKGV
612 # from the dependent recipe, replacing the - with a +, and then using
613 # that value to do a replace inside of this recipe's dependencies.
614 # This preserves the "-" separator between the version and release, as
615 # well as any "-" characters inside of the release field.
616 #
617 # All of this has to happen BEFORE the mapping_rename_hook as
618 # after renaming we cannot look up the dependencies in the packagedata
619 # store.
620 def translate_vers(varname, d):
621 depends = d.getVar(varname, True)
622 if depends:
623 depends_dict = bb.utils.explode_dep_versions2(depends)
624 newdeps_dict = {}
625 for dep in depends_dict:
626 verlist = []
627 for ver in depends_dict[dep]:
628 if '-' in ver:
629 subd = oe.packagedata.read_subpkgdata_dict(dep, d)
630 if 'PKGV' in subd:
631 pv = subd['PV']
632 pkgv = subd['PKGV']
633 reppv = pkgv.replace('-', '+')
634 ver = ver.replace(pv, reppv).replace(pkgv, reppv)
635 if 'PKGR' in subd:
636 # Make sure PKGR rather than PR in ver
637 pr = '-' + subd['PR']
638 pkgr = '-' + subd['PKGR']
639 if pkgr not in ver:
640 ver = ver.replace(pr, pkgr)
641 verlist.append(ver)
642 else:
643 verlist.append(ver)
644 newdeps_dict[dep] = verlist
645 depends = bb.utils.join_deps(newdeps_dict)
646 d.setVar(varname, depends.strip())
647
648 # We need to change the style the dependency from BB to RPM
649 # This needs to happen AFTER the mapping_rename_hook
650 def print_deps(variable, tag, array, d):
651 depends = variable
652 if depends:
653 depends_dict = bb.utils.explode_dep_versions2(depends)
654 for dep in depends_dict:
655 for ver in depends_dict[dep]:
656 ver = ver.replace('(', '')
657 ver = ver.replace(')', '')
658 array.append("%s: %s %s" % (tag, dep, ver))
659 if not len(depends_dict[dep]):
660 array.append("%s: %s" % (tag, dep))
661
662 def walk_files(walkpath, target, conffiles):
663 for rootpath, dirs, files in os.walk(walkpath):
664 path = rootpath.replace(walkpath, "")
665 for dir in dirs:
666 # All packages own the directories their files are in...
667 target.append('%dir "' + path + '/' + dir + '"')
668 for file in files:
669 if conffiles.count(path + '/' + file):
670 target.append('%config "' + path + '/' + file + '"')
671 else:
672 target.append('"' + path + '/' + file + '"')
673
674 # Prevent the prerm/postrm scripts from being run during an upgrade
675 def wrap_uninstall(scriptvar):
676 scr = scriptvar.strip()
677 if scr.startswith("#!"):
678 pos = scr.find("\n") + 1
679 else:
680 pos = 0
681 scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi'
682 return scr
683
684 def get_perfile(varname, pkg, d):
685 deps = []
686 dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
687 dependsflist = (d.getVar(dependsflist_key, True) or "")
688 for dfile in dependsflist.split():
689 key = "FILE" + varname + "_" + dfile + "_" + pkg
690 depends = d.getVar(key, True)
691 if depends:
692 deps.append(depends)
693 return " ".join(deps)
694
695 def append_description(spec_preamble, text):
696 """
697 Add the description to the spec file.
698 """
699 import textwrap
700 dedent_text = textwrap.dedent(text).strip()
701 # Bitbake saves "\n" as "\\n"
702 if '\\n' in dedent_text:
703 for t in dedent_text.split('\\n'):
704 spec_preamble.append(t.strip())
705 else:
706 spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
707
708 packages = d.getVar('PACKAGES', True)
709 if not packages or packages == '':
710 bb.debug(1, "No packages; nothing to do")
711 return
712
713 pkgdest = d.getVar('PKGDEST', True)
714 if not pkgdest:
715 bb.fatal("No PKGDEST")
716
717 outspecfile = d.getVar('OUTSPECFILE', True)
718 if not outspecfile:
719 bb.fatal("No OUTSPECFILE")
720
721 # Construct the SPEC file...
722 srcname = strip_multilib(d.getVar('PN', True), d)
723 srcsummary = (d.getVar('SUMMARY', True) or d.getVar('DESCRIPTION', True) or ".")
724 srcversion = d.getVar('PKGV', True).replace('-', '+')
725 srcrelease = d.getVar('PKGR', True)
726 srcepoch = (d.getVar('PKGE', True) or "")
727 srclicense = d.getVar('LICENSE', True)
728 srcsection = d.getVar('SECTION', True)
729 srcmaintainer = d.getVar('MAINTAINER', True)
730 srchomepage = d.getVar('HOMEPAGE', True)
731 srcdescription = d.getVar('DESCRIPTION', True) or "."
732
733 srcdepends = strip_multilib_deps(d.getVar('DEPENDS', True), d)
734 srcrdepends = []
735 srcrrecommends = []
736 srcrsuggests = []
737 srcrprovides = []
738 srcrreplaces = []
739 srcrconflicts = []
740 srcrobsoletes = []
741
742 srcrpreinst = []
743 srcrpostinst = []
744 srcrprerm = []
745 srcrpostrm = []
746
747 spec_preamble_top = []
748 spec_preamble_bottom = []
749
750 spec_scriptlets_top = []
751 spec_scriptlets_bottom = []
752
753 spec_files_top = []
754 spec_files_bottom = []
755
756 perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
757
758 for pkg in packages.split():
759 localdata = bb.data.createCopy(d)
760
761 root = "%s/%s" % (pkgdest, pkg)
762
763 lf = bb.utils.lockfile(root + ".lock")
764
765 localdata.setVar('ROOT', '')
766 localdata.setVar('ROOT_%s' % pkg, root)
767 pkgname = localdata.getVar('PKG_%s' % pkg, True)
768 if not pkgname:
769 pkgname = pkg
770 localdata.setVar('PKG', pkgname)
771
772 localdata.setVar('OVERRIDES', pkg)
773
774 bb.data.update_data(localdata)
775
776 conffiles = (localdata.getVar('CONFFILES', True) or "").split()
777
778 splitname = strip_multilib(pkgname, d)
779
780 splitsummary = (localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or ".")
781 splitversion = (localdata.getVar('PKGV', True) or "").replace('-', '+')
782 splitrelease = (localdata.getVar('PKGR', True) or "")
783 splitepoch = (localdata.getVar('PKGE', True) or "")
784 splitlicense = (localdata.getVar('LICENSE', True) or "")
785 splitsection = (localdata.getVar('SECTION', True) or "")
786 splitdescription = (localdata.getVar('DESCRIPTION', True) or ".")
787
788 translate_vers('RDEPENDS', localdata)
789 translate_vers('RRECOMMENDS', localdata)
790 translate_vers('RSUGGESTS', localdata)
791 translate_vers('RPROVIDES', localdata)
792 translate_vers('RREPLACES', localdata)
793 translate_vers('RCONFLICTS', localdata)
794
795 # Map the dependencies into their final form
796 mapping_rename_hook(localdata)
797
798 splitrdepends = strip_multilib_deps(localdata.getVar('RDEPENDS', True), d)
799 splitrrecommends = strip_multilib_deps(localdata.getVar('RRECOMMENDS', True), d)
800 splitrsuggests = strip_multilib_deps(localdata.getVar('RSUGGESTS', True), d)
801 splitrprovides = strip_multilib_deps(localdata.getVar('RPROVIDES', True), d)
802 splitrreplaces = strip_multilib_deps(localdata.getVar('RREPLACES', True), d)
803 splitrconflicts = strip_multilib_deps(localdata.getVar('RCONFLICTS', True), d)
804 splitrobsoletes = []
805
806 splitrpreinst = localdata.getVar('pkg_preinst', True)
807 splitrpostinst = localdata.getVar('pkg_postinst', True)
808 splitrprerm = localdata.getVar('pkg_prerm', True)
809 splitrpostrm = localdata.getVar('pkg_postrm', True)
810
811
812 if not perfiledeps:
813 # Add in summary of per file dependencies
814 splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
815 splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
816
817 # Gather special src/first package data
818 if srcname == splitname:
819 srcrdepends = splitrdepends
820 srcrrecommends = splitrrecommends
821 srcrsuggests = splitrsuggests
822 srcrprovides = splitrprovides
823 srcrreplaces = splitrreplaces
824 srcrconflicts = splitrconflicts
825
826 srcrpreinst = splitrpreinst
827 srcrpostinst = splitrpostinst
828 srcrprerm = splitrprerm
829 srcrpostrm = splitrpostrm
830
831 file_list = []
832 walk_files(root, file_list, conffiles)
833 if not file_list and localdata.getVar('ALLOW_EMPTY') != "1":
834 bb.note("Not creating empty RPM package for %s" % splitname)
835 else:
836 bb.note("Creating RPM package for %s" % splitname)
837 spec_files_top.append('%files')
838 spec_files_top.append('%defattr(-,-,-,-)')
839 if file_list:
840 bb.note("Creating RPM package for %s" % splitname)
841 spec_files_top.extend(file_list)
842 else:
843 bb.note("Creating EMPTY RPM Package for %s" % splitname)
844 spec_files_top.append('')
845
846 bb.utils.unlockfile(lf)
847 continue
848
849 # Process subpackage data
850 spec_preamble_bottom.append('%%package -n %s' % splitname)
851 spec_preamble_bottom.append('Summary: %s' % splitsummary)
852 if srcversion != splitversion:
853 spec_preamble_bottom.append('Version: %s' % splitversion)
854 if srcrelease != splitrelease:
855 spec_preamble_bottom.append('Release: %s' % splitrelease)
856 if srcepoch != splitepoch:
857 spec_preamble_bottom.append('Epoch: %s' % splitepoch)
858 if srclicense != splitlicense:
859 spec_preamble_bottom.append('License: %s' % splitlicense)
860 spec_preamble_bottom.append('Group: %s' % splitsection)
861
862 # Replaces == Obsoletes && Provides
863 robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes or "")
864 rprovides = bb.utils.explode_dep_versions2(splitrprovides or "")
865 rreplaces = bb.utils.explode_dep_versions2(splitrreplaces or "")
866 for dep in rreplaces:
867 if not dep in robsoletes:
868 robsoletes[dep] = rreplaces[dep]
869 if not dep in rprovides:
870 rprovides[dep] = rreplaces[dep]
871 splitrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
872 splitrprovides = bb.utils.join_deps(rprovides, commasep=False)
873
874 print_deps(splitrdepends, "Requires", spec_preamble_bottom, d)
875 if splitrpreinst:
876 print_deps(splitrdepends, "Requires(pre)", spec_preamble_bottom, d)
877 if splitrpostinst:
878 print_deps(splitrdepends, "Requires(post)", spec_preamble_bottom, d)
879 if splitrprerm:
880 print_deps(splitrdepends, "Requires(preun)", spec_preamble_bottom, d)
881 if splitrpostrm:
882 print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
883
884 # Suggests in RPM are like recommends in OE-core!
885 print_deps(splitrrecommends, "Suggests", spec_preamble_bottom, d)
886 # While there is no analog for suggests... (So call them recommends for now)
887 print_deps(splitrsuggests, "Recommends", spec_preamble_bottom, d)
888 print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
889 print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
890
891 # conflicts can not be in a provide! We will need to filter it.
892 if splitrconflicts:
893 depends_dict = bb.utils.explode_dep_versions2(splitrconflicts)
894 newdeps_dict = {}
895 for dep in depends_dict:
896 if dep not in splitrprovides:
897 newdeps_dict[dep] = depends_dict[dep]
898 if newdeps_dict:
899 splitrconflicts = bb.utils.join_deps(newdeps_dict)
900 else:
901 splitrconflicts = ""
902
903 print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
904
905 spec_preamble_bottom.append('')
906
907 spec_preamble_bottom.append('%%description -n %s' % splitname)
908 append_description(spec_preamble_bottom, splitdescription)
909
910 spec_preamble_bottom.append('')
911
912 # Now process scriptlets
913 if splitrpreinst:
914 spec_scriptlets_bottom.append('%%pre -n %s' % splitname)
915 spec_scriptlets_bottom.append('# %s - preinst' % splitname)
916 spec_scriptlets_bottom.append(splitrpreinst)
917 spec_scriptlets_bottom.append('')
918 if splitrpostinst:
919 spec_scriptlets_bottom.append('%%post -n %s' % splitname)
920 spec_scriptlets_bottom.append('# %s - postinst' % splitname)
921 spec_scriptlets_bottom.append(splitrpostinst)
922 spec_scriptlets_bottom.append('')
923 if splitrprerm:
924 spec_scriptlets_bottom.append('%%preun -n %s' % splitname)
925 spec_scriptlets_bottom.append('# %s - prerm' % splitname)
926 scriptvar = wrap_uninstall(splitrprerm)
927 spec_scriptlets_bottom.append(scriptvar)
928 spec_scriptlets_bottom.append('')
929 if splitrpostrm:
930 spec_scriptlets_bottom.append('%%postun -n %s' % splitname)
931 spec_scriptlets_bottom.append('# %s - postrm' % splitname)
932 scriptvar = wrap_uninstall(splitrpostrm)
933 spec_scriptlets_bottom.append(scriptvar)
934 spec_scriptlets_bottom.append('')
935
936 # Now process files
937 file_list = []
938 walk_files(root, file_list, conffiles)
939 if not file_list and localdata.getVar('ALLOW_EMPTY') != "1":
940 bb.note("Not creating empty RPM package for %s" % splitname)
941 else:
942 spec_files_bottom.append('%%files -n %s' % splitname)
943 spec_files_bottom.append('%defattr(-,-,-,-)')
944 if file_list:
945 bb.note("Creating RPM package for %s" % splitname)
946 spec_files_bottom.extend(file_list)
947 else:
948 bb.note("Creating EMPTY RPM Package for %s" % splitname)
949 spec_files_bottom.append('')
950
951 del localdata
952 bb.utils.unlockfile(lf)
953
954 add_prep(d,spec_files_bottom)
955 spec_preamble_top.append('Summary: %s' % srcsummary)
956 spec_preamble_top.append('Name: %s' % srcname)
957 spec_preamble_top.append('Version: %s' % srcversion)
958 spec_preamble_top.append('Release: %s' % srcrelease)
959 if srcepoch and srcepoch.strip() != "":
960 spec_preamble_top.append('Epoch: %s' % srcepoch)
961 spec_preamble_top.append('License: %s' % srclicense)
962 spec_preamble_top.append('Group: %s' % srcsection)
963 spec_preamble_top.append('Packager: %s' % srcmaintainer)
964 spec_preamble_top.append('URL: %s' % srchomepage)
965 tail_source(d)
966
967 # Replaces == Obsoletes && Provides
968 robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes or "")
969 rprovides = bb.utils.explode_dep_versions2(srcrprovides or "")
970 rreplaces = bb.utils.explode_dep_versions2(srcrreplaces or "")
971 for dep in rreplaces:
972 if not dep in robsoletes:
973 robsoletes[dep] = rreplaces[dep]
974 if not dep in rprovides:
975 rprovides[dep] = rreplaces[dep]
976 srcrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
977 srcrprovides = bb.utils.join_deps(rprovides, commasep=False)
978
979 print_deps(srcdepends, "BuildRequires", spec_preamble_top, d)
980 print_deps(srcrdepends, "Requires", spec_preamble_top, d)
981 if srcrpreinst:
982 print_deps(srcrdepends, "Requires(pre)", spec_preamble_top, d)
983 if srcrpostinst:
984 print_deps(srcrdepends, "Requires(post)", spec_preamble_top, d)
985 if srcrprerm:
986 print_deps(srcrdepends, "Requires(preun)", spec_preamble_top, d)
987 if srcrpostrm:
988 print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
989
990 # Suggests in RPM are like recommends in OE-core!
991 print_deps(srcrrecommends, "Suggests", spec_preamble_top, d)
992 # While there is no analog for suggests... (So call them recommends for now)
993 print_deps(srcrsuggests, "Recommends", spec_preamble_top, d)
994 print_deps(srcrprovides, "Provides", spec_preamble_top, d)
995 print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
996
997 # conflicts can not be in a provide! We will need to filter it.
998 if srcrconflicts:
999 depends_dict = bb.utils.explode_dep_versions2(srcrconflicts)
1000 newdeps_dict = {}
1001 for dep in depends_dict:
1002 if dep not in srcrprovides:
1003 newdeps_dict[dep] = depends_dict[dep]
1004 if newdeps_dict:
1005 srcrconflicts = bb.utils.join_deps(newdeps_dict)
1006 else:
1007 srcrconflicts = ""
1008
1009 print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
1010
1011 spec_preamble_top.append('')
1012
1013 spec_preamble_top.append('%description')
1014 append_description(spec_preamble_top, srcdescription)
1015
1016 spec_preamble_top.append('')
1017
1018 if srcrpreinst:
1019 spec_scriptlets_top.append('%pre')
1020 spec_scriptlets_top.append('# %s - preinst' % srcname)
1021 spec_scriptlets_top.append(srcrpreinst)
1022 spec_scriptlets_top.append('')
1023 if srcrpostinst:
1024 spec_scriptlets_top.append('%post')
1025 spec_scriptlets_top.append('# %s - postinst' % srcname)
1026 spec_scriptlets_top.append(srcrpostinst)
1027 spec_scriptlets_top.append('')
1028 if srcrprerm:
1029 spec_scriptlets_top.append('%preun')
1030 spec_scriptlets_top.append('# %s - prerm' % srcname)
1031 scriptvar = wrap_uninstall(srcrprerm)
1032 spec_scriptlets_top.append(scriptvar)
1033 spec_scriptlets_top.append('')
1034 if srcrpostrm:
1035 spec_scriptlets_top.append('%postun')
1036 spec_scriptlets_top.append('# %s - postrm' % srcname)
1037 scriptvar = wrap_uninstall(srcrpostrm)
1038 spec_scriptlets_top.append(scriptvar)
1039 spec_scriptlets_top.append('')
1040
1041 # Write the SPEC file
1042 try:
1043 specfile = open(outspecfile, 'w')
1044 except OSError:
1045 raise bb.build.FuncFailed("unable to open spec file for writing.")
1046
1047 # RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
1048 # of the generated spec file
1049 external_preamble = d.getVar("RPMSPEC_PREAMBLE", True)
1050 if external_preamble:
1051 specfile.write(external_preamble + "\n")
1052
1053 for line in spec_preamble_top:
1054 specfile.write(line + "\n")
1055
1056 for line in spec_preamble_bottom:
1057 specfile.write(line + "\n")
1058
1059 for line in spec_scriptlets_top:
1060 specfile.write(line + "\n")
1061
1062 for line in spec_scriptlets_bottom:
1063 specfile.write(line + "\n")
1064
1065 for line in spec_files_top:
1066 specfile.write(line + "\n")
1067
1068 for line in spec_files_bottom:
1069 specfile.write(line + "\n")
1070
1071 specfile.close()
1072}
1073
1074python do_package_rpm () {
1075 def creat_srpm_dir(d):
1076 if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) == 'srpm':
1077 clean_licenses = get_licenses(d)
1078 pkgwritesrpmdir = bb.data.expand('${PKGWRITEDIRSRPM}/${PACKAGE_ARCH_EXTEND}', d)
1079 pkgwritesrpmdir = pkgwritesrpmdir + '/' + clean_licenses
1080 bb.utils.mkdirhier(pkgwritesrpmdir)
1081 os.chmod(pkgwritesrpmdir, 0755)
1082 return pkgwritesrpmdir
1083
1084 # We need a simple way to remove the MLPREFIX from the package name,
1085 # and dependency information...
1086 def strip_multilib(name, d):
1087 ml = d.getVar("MLPREFIX", True)
1088 if ml and name and len(ml) != 0 and name.find(ml) >= 0:
1089 return "".join(name.split(ml))
1090 return name
1091
1092 workdir = d.getVar('WORKDIR', True)
1093 tmpdir = d.getVar('TMPDIR', True)
1094 pkgd = d.getVar('PKGD', True)
1095 pkgdest = d.getVar('PKGDEST', True)
1096 if not workdir or not pkgd or not tmpdir:
1097 bb.error("Variables incorrectly set, unable to package")
1098 return
1099
1100 packages = d.getVar('PACKAGES', True)
1101 if not packages or packages == '':
1102 bb.debug(1, "No packages; nothing to do")
1103 return
1104
1105 # Construct the spec file...
1106 # If the spec file already exist, and has not been stored into
1107 # pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
1108 # so remove it before doing rpmbuild src.rpm.
1109 srcname = strip_multilib(d.getVar('PN', True), d)
1110 outspecfile = workdir + "/" + srcname + ".spec"
1111 if os.path.isfile(outspecfile):
1112 os.remove(outspecfile)
1113 d.setVar('OUTSPECFILE', outspecfile)
1114 bb.build.exec_func('write_specfile', d)
1115
1116 perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
1117 if perfiledeps:
1118 outdepends, outprovides = write_rpm_perfiledata(srcname, d)
1119
1120 # Setup the rpmbuild arguments...
1121 rpmbuild = d.getVar('RPMBUILD', True)
1122 targetsys = d.getVar('TARGET_SYS', True)
1123 targetvendor = d.getVar('TARGET_VENDOR', True)
1124 package_arch = (d.getVar('PACKAGE_ARCH', True) or "").replace("-", "_")
1125 if package_arch not in "all any noarch".split() and not package_arch.endswith("_nativesdk"):
1126 ml_prefix = (d.getVar('MLPREFIX', True) or "").replace("-", "_")
1127 d.setVar('PACKAGE_ARCH_EXTEND', ml_prefix + package_arch)
1128 else:
1129 d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
1130 pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
1131 pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${TARGET_VENDOR}-${TARGET_OS}')
1132 magicfile = d.expand('${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc')
1133 bb.utils.mkdirhier(pkgwritedir)
1134 os.chmod(pkgwritedir, 0755)
1135
1136 cmd = rpmbuild
1137 cmd = cmd + " --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
1138 cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
1139 cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
1140 cmd = cmd + " --define '_use_internal_dependency_generator 0'"
1141 if perfiledeps:
1142 cmd = cmd + " --define '__find_requires " + outdepends + "'"
1143 cmd = cmd + " --define '__find_provides " + outprovides + "'"
1144 else:
1145 cmd = cmd + " --define '__find_requires %{nil}'"
1146 cmd = cmd + " --define '__find_provides %{nil}'"
1147 cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
1148 cmd = cmd + " --define 'debug_package %{nil}'"
1149 cmd = cmd + " --define '_rpmfc_magic_path " + magicfile + "'"
1150 cmd = cmd + " --define '_tmppath " + workdir + "'"
1151 if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) == 'srpm':
1152 cmd = cmd + " --define '_sourcedir " + workdir + "'"
1153 cmdsrpm = cmd + " --define '_srcrpmdir " + creat_srpm_dir(d) + "'"
1154 cmdsrpm = cmdsrpm + " -bs " + outspecfile
1155 # Build the .src.rpm
1156 d.setVar('SBUILDSPEC', cmdsrpm + "\n")
1157 d.setVarFlag('SBUILDSPEC', 'func', '1')
1158 bb.build.exec_func('SBUILDSPEC', d)
1159 # Remove the source (SOURCE0, SOURCE1 ...)
1160 cmd = cmd + " --rmsource "
1161 cmd = cmd + " -bb " + outspecfile
1162
1163 # Build the rpm package!
1164 d.setVar('BUILDSPEC', cmd + "\n")
1165 d.setVarFlag('BUILDSPEC', 'func', '1')
1166 bb.build.exec_func('BUILDSPEC', d)
1167}
1168
1169python () {
1170 if d.getVar('PACKAGES', True) != '':
1171 deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
1172 d.appendVarFlag('do_package_write_rpm', 'depends', deps)
1173 d.setVarFlag('do_package_write_rpm', 'fakeroot', 1)
1174}
1175
1176SSTATETASKS += "do_package_write_rpm"
1177do_package_write_rpm[sstate-name] = "deploy-rpm"
1178do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}"
1179do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}"
1180# Take a shared lock, we can write multiple packages at the same time...
1181# but we need to stop the rootfs/solver from running while we do...
1182do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock"
1183
1184python do_package_write_rpm_setscene () {
1185 sstate_setscene(d)
1186}
1187addtask do_package_write_rpm_setscene
1188
1189python do_package_write_rpm () {
1190 bb.build.exec_func("read_subpackage_metadata", d)
1191 bb.build.exec_func("do_package_rpm", d)
1192}
1193
1194do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
1195do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
1196do_package_write_rpm[umask] = "022"
1197addtask package_write_rpm before do_package_write after do_packagedata do_package
1198
1199PACKAGEINDEXES += "[ ! -e ${DEPLOY_DIR_RPM} ] || package_update_index_rpm;"
1200PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
1201PACKAGEINDEXDEPS += "createrepo-native:do_populate_sysroot"
diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass
new file mode 100644
index 0000000000..2d6fc8fe21
--- /dev/null
+++ b/meta/classes/package_tar.bbclass
@@ -0,0 +1,68 @@
1inherit package
2
3IMAGE_PKGTYPE ?= "tar"
4
5python do_package_tar () {
6 import subprocess
7 workdir = d.getVar('WORKDIR', True)
8 if not workdir:
9 bb.error("WORKDIR not defined, unable to package")
10 return
11
12 outdir = d.getVar('DEPLOY_DIR_TAR', True)
13 if not outdir:
14 bb.error("DEPLOY_DIR_TAR not defined, unable to package")
15 return
16
17 dvar = d.getVar('D', True)
18 if not dvar:
19 bb.error("D not defined, unable to package")
20 return
21
22 packages = d.getVar('PACKAGES', True)
23 if not packages:
24 bb.debug(1, "PACKAGES not defined, nothing to package")
25 return
26
27 pkgdest = d.getVar('PKGDEST', True)
28
29 bb.utils.mkdirhier(outdir)
30 bb.utils.mkdirhier(dvar)
31
32 for pkg in packages.split():
33 localdata = bb.data.createCopy(d)
34 root = "%s/%s" % (pkgdest, pkg)
35
36 overrides = localdata.getVar('OVERRIDES')
37 localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg))
38 bb.data.update_data(localdata)
39
40 bb.utils.mkdirhier(root)
41 basedir = os.path.dirname(root)
42 tarfn = localdata.expand("${DEPLOY_DIR_TAR}/${PKG}-${PKGV}-${PKGR}.tar.gz")
43 os.chdir(root)
44 from glob import glob
45 if not glob('*'):
46 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
47 continue
48 ret = subprocess.call("tar -czf %s %s" % (tarfn, '.'), shell=True)
49 if ret != 0:
50 bb.error("Creation of tar %s failed." % tarfn)
51}
52
53python () {
54 if d.getVar('PACKAGES', True) != '':
55 deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split()
56 deps.append('tar-native:do_populate_sysroot')
57 deps.append('virtual/fakeroot-native:do_populate_sysroot')
58 d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
59 d.setVarFlag('do_package_write_tar', 'fakeroot', "1")
60}
61
62
63python do_package_write_tar () {
64 bb.build.exec_func("read_subpackage_metadata", d)
65 bb.build.exec_func("do_package_tar", d)
66}
67do_package_write_tar[dirs] = "${D}"
68addtask package_write_tar before do_build after do_packagedata do_package
diff --git a/meta/classes/packagedata.bbclass b/meta/classes/packagedata.bbclass
new file mode 100644
index 0000000000..d1aedf2289
--- /dev/null
+++ b/meta/classes/packagedata.bbclass
@@ -0,0 +1,26 @@
1python read_subpackage_metadata () {
2 import oe.packagedata
3
4 vars = {
5 "PN" : d.getVar('PN', True),
6 "PE" : d.getVar('PE', True),
7 "PV" : d.getVar('PV', True),
8 "PR" : d.getVar('PR', True),
9 }
10
11 data = oe.packagedata.read_pkgdata(vars["PN"], d)
12
13 for key in data.keys():
14 d.setVar(key, data[key])
15
16 for pkg in d.getVar('PACKAGES', True).split():
17 sdata = oe.packagedata.read_subpkgdata(pkg, d)
18 for key in sdata.keys():
19 if key in vars:
20 if sdata[key] != vars[key]:
21 if key == "PN":
22 bb.fatal("Recipe %s is trying to create package %s which was already written by recipe %s. This will cause corruption, please resolve this and only provide the package from one recipe or the other or only build one of the recipes." % (vars[key], pkg, sdata[key]))
23 bb.fatal("Recipe %s is trying to change %s from '%s' to '%s'. This will cause do_package_write_* failures since the incorrect data will be used and they will be unable to find the right workdir." % (vars["PN"], key, vars[key], sdata[key]))
24 continue
25 d.setVar(key, sdata[key])
26}
diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass
new file mode 100644
index 0000000000..9bc9cc22ad
--- /dev/null
+++ b/meta/classes/packagegroup.bbclass
@@ -0,0 +1,47 @@
1# Class for packagegroup (package group) recipes
2
3# By default, only the packagegroup package itself is in PACKAGES.
4# -dbg and -dev flavours are handled by the anonfunc below.
5# This means that packagegroup recipes used to build multiple packagegroup
6# packages have to modify PACKAGES after inheriting packagegroup.bbclass.
7PACKAGES = "${PN}"
8
9# By default, packagegroup packages do not depend on a certain architecture.
10# Only if dependencies are modified by MACHINE_FEATURES, packages
11# need to be set to MACHINE_ARCH after inheriting packagegroup.bbclass
12inherit allarch
13
14# This automatically adds -dbg and -dev flavours of all PACKAGES
15# to the list. Their dependencies (RRECOMMENDS) are handled as usual
16# by package_depchains in a following step.
17# Also mark all packages as ALLOW_EMPTY
18python () {
19 packages = d.getVar('PACKAGES', True).split()
20 genpackages = []
21 for pkg in packages:
22 d.setVar("ALLOW_EMPTY_%s" % pkg, "1")
23 for postfix in ['-dbg', '-dev', '-ptest']:
24 genpackages.append(pkg+postfix)
25 if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY', True) != '1':
26 d.setVar('PACKAGES', ' '.join(packages+genpackages))
27}
28
29# We don't want to look at shared library dependencies for the
30# dbg packages
31DEPCHAIN_DBGDEFAULTDEPS = "1"
32
33# We only need the packaging tasks - disable the rest
34do_fetch[noexec] = "1"
35do_unpack[noexec] = "1"
36do_patch[noexec] = "1"
37do_configure[noexec] = "1"
38do_compile[noexec] = "1"
39do_install[noexec] = "1"
40do_populate_sysroot[noexec] = "1"
41
42python () {
43 initman = d.getVar("VIRTUAL-RUNTIME_init_manager", True)
44 if initman and initman in ['sysvinit', 'systemd'] and not base_contains('DISTRO_FEATURES', initman, True, False, d):
45 bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman)
46}
47
diff --git a/meta/classes/packageinfo.bbclass b/meta/classes/packageinfo.bbclass
new file mode 100644
index 0000000000..7d60ace1dc
--- /dev/null
+++ b/meta/classes/packageinfo.bbclass
@@ -0,0 +1,22 @@
1python packageinfo_handler () {
2 import oe.packagedata
3 pkginfolist = []
4
5 pkgdata_dir = e.data.getVar("PKGDATA_DIR", True) + '/runtime/'
6 if os.path.exists(pkgdata_dir):
7 for root, dirs, files in os.walk(pkgdata_dir):
8 for pkgname in files:
9 if pkgname.endswith('.packaged'):
10 pkgname = pkgname[:-9]
11 pkgdatafile = root + pkgname
12 try:
13 sdata = oe.packagedata.read_pkgdatafile(pkgdatafile)
14 sdata['PKG'] = pkgname
15 pkginfolist.append(sdata)
16 except Exception as e:
17 bb.warn("Failed to read pkgdata file %s: %s: %s" % (pkgdatafile, e.__class__, str(e)))
18 bb.event.fire(bb.event.PackageInfo(pkginfolist), e.data)
19}
20
21addhandler packageinfo_handler
22packageinfo_handler[eventmask] = "bb.event.RequestPackageInfo"
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
new file mode 100644
index 0000000000..86c65b3b8d
--- /dev/null
+++ b/meta/classes/patch.bbclass
@@ -0,0 +1,187 @@
1# Copyright (C) 2006 OpenedHand LTD
2
3# Point to an empty file so any user's custom settings don't break things
4QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
5
6PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
7
8inherit terminal
9
10def src_patches(d, all = False ):
11 workdir = d.getVar('WORKDIR', True)
12 fetch = bb.fetch2.Fetch([], d)
13 patches = []
14 sources = []
15 for url in fetch.urls:
16 local = patch_path(url, fetch, workdir)
17 if not local:
18 if all:
19 local = fetch.localpath(url)
20 sources.append(local)
21 continue
22
23 urldata = fetch.ud[url]
24 parm = urldata.parm
25 patchname = parm.get('pname') or os.path.basename(local)
26
27 apply, reason = should_apply(parm, d)
28 if not apply:
29 if reason:
30 bb.note("Patch %s %s" % (patchname, reason))
31 continue
32
33 patchparm = {'patchname': patchname}
34 if "striplevel" in parm:
35 striplevel = parm["striplevel"]
36 elif "pnum" in parm:
37 #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
38 striplevel = parm["pnum"]
39 else:
40 striplevel = '1'
41 patchparm['striplevel'] = striplevel
42
43 patchdir = parm.get('patchdir')
44 if patchdir:
45 patchparm['patchdir'] = patchdir
46
47 localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm))
48 patches.append(localurl)
49
50 if all:
51 return sources
52
53 return patches
54
55def patch_path(url, fetch, workdir):
56 """Return the local path of a patch, or None if this isn't a patch"""
57
58 local = fetch.localpath(url)
59 base, ext = os.path.splitext(os.path.basename(local))
60 if ext in ('.gz', '.bz2', '.Z'):
61 local = os.path.join(workdir, base)
62 ext = os.path.splitext(base)[1]
63
64 urldata = fetch.ud[url]
65 if "apply" in urldata.parm:
66 apply = oe.types.boolean(urldata.parm["apply"])
67 if not apply:
68 return
69 elif ext not in (".diff", ".patch"):
70 return
71
72 return local
73
74def should_apply(parm, d):
75 """Determine if we should apply the given patch"""
76
77 if "mindate" in parm or "maxdate" in parm:
78 pn = d.getVar('PN', True)
79 srcdate = d.getVar('SRCDATE_%s' % pn, True)
80 if not srcdate:
81 srcdate = d.getVar('SRCDATE', True)
82
83 if srcdate == "now":
84 srcdate = d.getVar('DATE', True)
85
86 if "maxdate" in parm and parm["maxdate"] < srcdate:
87 return False, 'is outdated'
88
89 if "mindate" in parm and parm["mindate"] > srcdate:
90 return False, 'is predated'
91
92
93 if "minrev" in parm:
94 srcrev = d.getVar('SRCREV', True)
95 if srcrev and srcrev < parm["minrev"]:
96 return False, 'applies to later revisions'
97
98 if "maxrev" in parm:
99 srcrev = d.getVar('SRCREV', True)
100 if srcrev and srcrev > parm["maxrev"]:
101 return False, 'applies to earlier revisions'
102
103 if "rev" in parm:
104 srcrev = d.getVar('SRCREV', True)
105 if srcrev and parm["rev"] not in srcrev:
106 return False, "doesn't apply to revision"
107
108 if "notrev" in parm:
109 srcrev = d.getVar('SRCREV', True)
110 if srcrev and parm["notrev"] in srcrev:
111 return False, "doesn't apply to revision"
112
113 return True, None
114
115should_apply[vardepsexclude] = "DATE SRCDATE"
116
117python patch_do_patch() {
118 import oe.patch
119
120 patchsetmap = {
121 "patch": oe.patch.PatchTree,
122 "quilt": oe.patch.QuiltTree,
123 "git": oe.patch.GitApplyTree,
124 }
125
126 cls = patchsetmap[d.getVar('PATCHTOOL', True) or 'quilt']
127
128 resolvermap = {
129 "noop": oe.patch.NOOPResolver,
130 "user": oe.patch.UserResolver,
131 }
132
133 rcls = resolvermap[d.getVar('PATCHRESOLVE', True) or 'user']
134
135 classes = {}
136
137 s = d.getVar('S', True)
138
139 path = os.getenv('PATH')
140 os.putenv('PATH', d.getVar('PATH', True))
141
142 # We must use one TMPDIR per process so that the "patch" processes
143 # don't generate the same temp file name.
144
145 import tempfile
146 process_tmpdir = tempfile.mkdtemp()
147 os.environ['TMPDIR'] = process_tmpdir
148
149 for patch in src_patches(d):
150 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
151
152 if "patchdir" in parm:
153 patchdir = parm["patchdir"]
154 if not os.path.isabs(patchdir):
155 patchdir = os.path.join(s, patchdir)
156 else:
157 patchdir = s
158
159 if not patchdir in classes:
160 patchset = cls(patchdir, d)
161 resolver = rcls(patchset, oe_terminal)
162 classes[patchdir] = (patchset, resolver)
163 patchset.Clean()
164 else:
165 patchset, resolver = classes[patchdir]
166
167 bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
168 try:
169 patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
170 except Exception as exc:
171 bb.utils.remove(process_tmpdir, True)
172 bb.fatal(str(exc))
173 try:
174 resolver.Resolve()
175 except bb.BBHandledException as e:
176 bb.utils.remove(process_tmpdir, True)
177 bb.fatal(str(e))
178
179 bb.utils.remove(process_tmpdir, True)
180}
181patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
182
183addtask patch after do_unpack
184do_patch[dirs] = "${WORKDIR}"
185do_patch[depends] = "${PATCHDEPENDENCY}"
186
187EXPORT_FUNCTIONS do_patch
diff --git a/meta/classes/perlnative.bbclass b/meta/classes/perlnative.bbclass
new file mode 100644
index 0000000000..cc8de8b381
--- /dev/null
+++ b/meta/classes/perlnative.bbclass
@@ -0,0 +1,3 @@
1EXTRANATIVEPATH += "perl-native"
2DEPENDS += "perl-native"
3OECMAKE_PERLNATIVE_DIR = "${STAGING_BINDIR_NATIVE}/perl-native"
diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass
new file mode 100644
index 0000000000..d46a8ba206
--- /dev/null
+++ b/meta/classes/pixbufcache.bbclass
@@ -0,0 +1,69 @@
1#
2# This class will generate the proper postinst/postrm scriptlets for pixbuf
3# packages.
4#
5
6DEPENDS += "qemu-native"
7inherit qemu
8
9PIXBUF_PACKAGES ??= "${PN}"
10
11pixbufcache_common() {
12if [ "x$D" != "x" ]; then
13 $INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
14 bindir=${bindir} base_libdir=${base_libdir}
15else
16
17 # Update the pixbuf loaders in case they haven't been registered yet
18 GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache
19
20 if [ -x ${bindir}/gtk-update-icon-cache ] && [ -d ${datadir}/icons ]; then
21 for icondir in /usr/share/icons/*; do
22 if [ -d ${icondir} ]; then
23 gtk-update-icon-cache -t -q ${icondir}
24 fi
25 done
26 fi
27fi
28}
29
30python populate_packages_append() {
31 pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES', True).split()
32
33 for pkg in pixbuf_pkgs:
34 bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
35 postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
36 if not postinst:
37 postinst = '#!/bin/sh\n'
38 postinst += d.getVar('pixbufcache_common', True)
39 d.setVar('pkg_postinst_%s' % pkg, postinst)
40
41 postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
42 if not postrm:
43 postrm = '#!/bin/sh\n'
44 postrm += d.getVar('pixbufcache_common', True)
45 d.setVar('pkg_postrm_%s' % pkg, postrm)
46}
47
48#
49# Add a sstate postinst hook to update the cache for native packages
50#
51SSTATEPOSTINSTFUNCS_append_class-native = " pixbufcache_sstate_postinst"
52
53pixbufcache_sstate_postinst() {
54 if [ "${BB_CURRENTTASK}" = "populate_sysroot" -o "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]
55 then
56 gdk-pixbuf-query-loaders --update-cache
57 fi
58}
59
60# Add all of the dependencies of gdk-pixbuf as dependencies of
61# do_populate_sysroot_setscene so that pixbufcache_sstate_postinst can work
62# (otherwise gdk-pixbuf-query-loaders may not exist or link). Only add
63# gdk-pixbuf-native if we're not building gdk-pixbuf itself.
64#
65# Packages that use this class should extend this variable with their runtime
66# dependencies.
67PIXBUFCACHE_SYSROOT_DEPS = ""
68PIXBUFCACHE_SYSROOT_DEPS_class-native = "${@['gdk-pixbuf-native:do_populate_sysroot_setscene', '']['${BPN}' == 'gdk-pixbuf']} glib-2.0-native:do_populate_sysroot_setscene libffi-native:do_populate_sysroot_setscene libpng-native:do_populate_sysroot_setscene"
69do_populate_sysroot_setscene[depends] += "${PIXBUFCACHE_SYSROOT_DEPS}"
diff --git a/meta/classes/pkg_distribute.bbclass b/meta/classes/pkg_distribute.bbclass
new file mode 100644
index 0000000000..9f249a0dfe
--- /dev/null
+++ b/meta/classes/pkg_distribute.bbclass
@@ -0,0 +1,29 @@
1PKG_DISTRIBUTECOMMAND[func] = "1"
2python do_distribute_packages () {
3 cmd = d.getVar('PKG_DISTRIBUTECOMMAND', True)
4 if not cmd:
5 raise bb.build.FuncFailed("Unable to distribute packages, PKG_DISTRIBUTECOMMAND not defined")
6 bb.build.exec_func('PKG_DISTRIBUTECOMMAND', d)
7}
8
9addtask distribute_packages before do_build after do_fetch
10
11PKG_DIST_LOCAL ?= "symlink"
12PKG_DISTRIBUTEDIR ?= "${DEPLOY_DIR}/packages"
13
14PKG_DISTRIBUTECOMMAND () {
15 p=`dirname ${FILE}`
16 d=`basename $p`
17 mkdir -p ${PKG_DISTRIBUTEDIR}
18 case "${PKG_DIST_LOCAL}" in
19 copy)
20 # use this weird tar command to copy because we want to
21 # exclude the BitKeeper directories
22 test -e ${PKG_DISTRIBUTEDIR}/${d} || mkdir ${PKG_DISTRIBUTEDIR}/${d};
23 (cd ${p}; tar -c --exclude SCCS -f - . ) | tar -C ${PKG_DISTRIBUTEDIR}/${d} -xpf -
24 ;;
25 symlink)
26 ln -sf $p ${PKG_DISTRIBUTEDIR}/
27 ;;
28 esac
29}
diff --git a/meta/classes/pkg_metainfo.bbclass b/meta/classes/pkg_metainfo.bbclass
new file mode 100644
index 0000000000..80f6244fca
--- /dev/null
+++ b/meta/classes/pkg_metainfo.bbclass
@@ -0,0 +1,22 @@
1python do_pkg_write_metainfo () {
2 deploydir = d.getVar('DEPLOY_DIR', True)
3 if not deploydir:
4 bb.error("DEPLOY_DIR not defined, unable to write package info")
5 return
6
7 try:
8 infofile = file(os.path.join(deploydir, 'package-metainfo'), 'a')
9 except OSError:
10 raise bb.build.FuncFailed("unable to open package-info file for writing.")
11
12 name = d.getVar('PN', True)
13 version = d.getVar('PV', True)
14 desc = d.getVar('DESCRIPTION', True)
15 page = d.getVar('HOMEPAGE', True)
16 lic = d.getVar('LICENSE', True)
17
18 infofile.write("|| "+ name +" || "+ version + " || "+ desc +" || "+ page +" || "+ lic + " ||\n" )
19 infofile.close()
20}
21
22addtask pkg_write_metainfo after do_package before do_build
diff --git a/meta/classes/pkgconfig.bbclass b/meta/classes/pkgconfig.bbclass
new file mode 100644
index 0000000000..ad1f84f506
--- /dev/null
+++ b/meta/classes/pkgconfig.bbclass
@@ -0,0 +1,2 @@
1DEPENDS_prepend = "pkgconfig-native "
2
diff --git a/meta/classes/populate_sdk.bbclass b/meta/classes/populate_sdk.bbclass
new file mode 100644
index 0000000000..f64a911b72
--- /dev/null
+++ b/meta/classes/populate_sdk.bbclass
@@ -0,0 +1,7 @@
1# The majority of populate_sdk is located in populate_sdk_base
2# This chunk simply facilitates compatibility with SDK only recipes.
3
4inherit populate_sdk_base
5
6addtask populate_sdk after do_install before do_build
7
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
new file mode 100644
index 0000000000..b7ea85159c
--- /dev/null
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -0,0 +1,337 @@
1inherit meta toolchain-scripts
2inherit populate_sdk_${IMAGE_PKGTYPE}
3
4SDK_DIR = "${WORKDIR}/sdk"
5SDK_OUTPUT = "${SDK_DIR}/image"
6SDK_DEPLOY = "${TMPDIR}/deploy/sdk"
7
8SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
9
10TOOLCHAIN_HOST_TASK ?= "nativesdk-packagegroup-sdk-host packagegroup-cross-canadian-${MACHINE}"
11TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
12TOOLCHAIN_TARGET_TASK ?= "packagegroup-core-standalone-sdk-target packagegroup-core-standalone-sdk-target-dbg"
13TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
14TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
15
16SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
17SDK_DEPENDS = "virtual/fakeroot-native sed-native"
18
19# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
20# could be set to the MACHINE_ARCH
21REAL_MULTIMACH_TARGET_SYS = "${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
22
23PID = "${@os.getpid()}"
24
25EXCLUDE_FROM_WORLD = "1"
26
27SDK_PACKAGING_FUNC ?= "create_shar"
28
29fakeroot python do_populate_sdk() {
30 pn = d.getVar('PN', True)
31 runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
32
33 bb.build.exec_func("populate_sdk_image", d)
34
35 # Handle multilibs in the SDK environment, siteconfig, etc files...
36 localdata = bb.data.createCopy(d)
37
38 # make sure we only use the WORKDIR value from 'd', or it can change
39 localdata.setVar('WORKDIR', d.getVar('WORKDIR', True))
40
41 # make sure we only use the SDKTARGETSYSROOT value from 'd'
42 localdata.setVar('SDKTARGETSYSROOT', d.getVar('SDKTARGETSYSROOT', True))
43
44 # Process DEFAULTTUNE
45 bb.build.exec_func("create_sdk_files", localdata)
46
47 variants = d.getVar("MULTILIB_VARIANTS", True) or ""
48 for item in variants.split():
49 # Load overrides from 'd' to avoid having to reset the value...
50 overrides = d.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
51 localdata.setVar("OVERRIDES", overrides)
52 bb.data.update_data(localdata)
53 bb.build.exec_func("create_sdk_files", localdata)
54
55 bb.build.exec_func("tar_sdk", d)
56
57 bb.build.exec_func(d.getVar("SDK_PACKAGING_FUNC", True), d)
58}
59
60fakeroot populate_sdk_image() {
61 rm -rf ${SDK_OUTPUT}
62 mkdir -p ${SDK_OUTPUT}
63
64 # populate_sdk_<image> is required to construct two images:
65 # SDK_ARCH-nativesdk - contains the cross compiler and associated tooling
66 # target - contains a target rootfs configured for the SDK usage
67 #
68 # the output of populate_sdk_<image> should end up in ${SDK_OUTPUT} it is made
69 # up of:
70 # ${SDK_OUTPUT}/<sdk_arch-nativesdk pkgs>
71 # ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/<target pkgs>
72
73 populate_sdk_${IMAGE_PKGTYPE}
74
75 # Don't ship any libGL in the SDK
76 rm -rf ${SDK_OUTPUT}/${SDKPATHNATIVE}${libdir_nativesdk}/libGL*
77
78 # Can copy pstage files here
79 # target_pkgs=`cat ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/var/lib/opkg/status | grep Package: | cut -f 2 -d ' '`
80
81 # Fix or remove broken .la files
82 #rm -f ${SDK_OUTPUT}/${SDKPATHNATIVE}/lib/*.la
83 rm -f ${SDK_OUTPUT}/${SDKPATHNATIVE}${libdir_nativesdk}/*.la
84
85 # Link the ld.so.cache file into the hosts filesystem
86 ln -s /etc/ld.so.cache ${SDK_OUTPUT}/${SDKPATHNATIVE}/etc/ld.so.cache
87
88 ${SDK_POSTPROCESS_COMMAND}
89}
90
91fakeroot create_sdk_files() {
92 # Setup site file for external use
93 toolchain_create_sdk_siteconfig ${SDK_OUTPUT}/${SDKPATH}/site-config-${REAL_MULTIMACH_TARGET_SYS}
94
95 toolchain_create_sdk_env_script ${SDK_OUTPUT}/${SDKPATH}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
96
97 # Add version information
98 toolchain_create_sdk_version ${SDK_OUTPUT}/${SDKPATH}/version-${REAL_MULTIMACH_TARGET_SYS}
99
100 cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
101
102 # Replace the ##DEFAULT_INSTALL_DIR## with the correct pattern.
103 # Escape special characters like '+' and '.' in the SDKPATH
104 escaped_sdkpath=$(echo ${SDKPATH} |sed -e "s:[\+\.]:\\\\\\\\\0:g")
105 sed -i -e "s:##DEFAULT_INSTALL_DIR##:$escaped_sdkpath:" ${SDK_OUTPUT}/${SDKPATH}/relocate_sdk.py
106}
107
108SDKTAROPTS = "--owner=root --group=root -j"
109
110fakeroot tar_sdk() {
111 # Package it up
112 mkdir -p ${SDK_DEPLOY}
113 cd ${SDK_OUTPUT}/${SDKPATH}
114 tar ${SDKTAROPTS} -c --file=${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 .
115}
116
117fakeroot create_shar() {
118 cat << "EOF" > ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
119#!/bin/bash
120
121INST_ARCH=$(uname -m | sed -e "s/i[3-6]86/ix86/" -e "s/x86[-_]64/x86_64/")
122SDK_ARCH=$(echo ${SDK_ARCH} | sed -e "s/i[3-6]86/ix86/" -e "s/x86[-_]64/x86_64/")
123
124if [ "$INST_ARCH" != "$SDK_ARCH" ]; then
125 # Allow for installation of ix86 SDK on x86_64 host
126 if [ "$INST_ARCH" != x86_64 -o "$SDK_ARCH" != ix86 ]; then
127 echo "Error: Installation machine not supported!"
128 exit 1
129 fi
130fi
131
132DEFAULT_INSTALL_DIR="${SDKPATH}"
133SUDO_EXEC=""
134target_sdk_dir=""
135answer=""
136relocate=1
137savescripts=0
138verbose=0
139while getopts ":yd:DRS" OPT; do
140 case $OPT in
141 y)
142 answer="Y"
143 [ "$target_sdk_dir" = "" ] && target_sdk_dir=$DEFAULT_INSTALL_DIR
144 ;;
145 d)
146 target_sdk_dir=$OPTARG
147 ;;
148 D)
149 verbose=1
150 ;;
151 R)
152 relocate=0
153 savescripts=1
154 ;;
155 S)
156 savescripts=1
157 ;;
158 *)
159 echo "Usage: $(basename $0) [-y] [-d <dir>]"
160 echo " -y Automatic yes to all prompts"
161 echo " -d <dir> Install the SDK to <dir>"
162 echo "======== Advanced DEBUGGING ONLY OPTIONS ========"
163 echo " -S Save relocation scripts"
164 echo " -R Do not relocate executables"
165 echo " -D use set -x to see what is going on"
166 exit 1
167 ;;
168 esac
169done
170
171if [ $verbose = 1 ] ; then
172 set -x
173fi
174
175printf "Enter target directory for SDK (default: $DEFAULT_INSTALL_DIR): "
176if [ "$target_sdk_dir" = "" ]; then
177 read target_sdk_dir
178 [ "$target_sdk_dir" = "" ] && target_sdk_dir=$DEFAULT_INSTALL_DIR
179else
180 echo "$target_sdk_dir"
181fi
182
183eval target_sdk_dir=$(echo "$target_sdk_dir"|sed 's/ /\\ /g')
184if [ -d "$target_sdk_dir" ]; then
185 target_sdk_dir=$(cd "$target_sdk_dir"; pwd)
186else
187 target_sdk_dir=$(readlink -m "$target_sdk_dir")
188fi
189
190if [ -n "$(echo $target_sdk_dir|grep ' ')" ]; then
191 echo "The target directory path ($target_sdk_dir) contains spaces. Abort!"
192 exit 1
193fi
194
195if [ -e "$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}" ]; then
196 echo "The directory \"$target_sdk_dir\" already contains a SDK for this architecture."
197 printf "If you continue, existing files will be overwritten! Proceed[y/N]?"
198
199 default_answer="n"
200else
201 printf "You are about to install the SDK to \"$target_sdk_dir\". Proceed[Y/n]?"
202
203 default_answer="y"
204fi
205
206if [ "$answer" = "" ]; then
207 read answer
208 [ "$answer" = "" ] && answer="$default_answer"
209else
210 echo $answer
211fi
212
213if [ "$answer" != "Y" -a "$answer" != "y" ]; then
214 echo "Installation aborted!"
215 exit 1
216fi
217
218# Try to create the directory (this will not succeed if user doesn't have rights)
219mkdir -p $target_sdk_dir >/dev/null 2>&1
220
221# if don't have the right to access dir, gain by sudo
222if [ ! -x $target_sdk_dir -o ! -w $target_sdk_dir -o ! -r $target_sdk_dir ]; then
223 SUDO_EXEC=$(which "sudo")
224 if [ -z $SUDO_EXEC ]; then
225 echo "No command 'sudo' found, please install sudo first. Abort!"
226 exit 1
227 fi
228
229 # test sudo could gain root right
230 $SUDO_EXEC pwd >/dev/null 2>&1
231 [ $? -ne 0 ] && echo "Sorry, you are not allowed to execute as root." && exit 1
232
233 # now that we have sudo rights, create the directory
234 $SUDO_EXEC mkdir -p $target_sdk_dir >/dev/null 2>&1
235fi
236
237payload_offset=$(($(grep -na -m1 "^MARKER:$" $0|cut -d':' -f1) + 1))
238
239printf "Extracting SDK..."
240tail -n +$payload_offset $0| $SUDO_EXEC tar xj -C $target_sdk_dir
241echo "done"
242
243printf "Setting it up..."
244# fix environment paths
245for env_setup_script in `ls $target_sdk_dir/environment-setup-*`; do
246 $SUDO_EXEC sed -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:g" -i $env_setup_script
247done
248
249# fix dynamic loader paths in all ELF SDK binaries
250native_sysroot=$($SUDO_EXEC cat $env_setup_script |grep 'OECORE_NATIVE_SYSROOT='|cut -d'=' -f2|tr -d '"')
251dl_path=$($SUDO_EXEC find $native_sysroot/lib -name "ld-linux*")
252if [ "$dl_path" = "" ] ; then
253 echo "SDK could not be set up. Relocate script unable to find ld-linux.so. Abort!"
254 exit 1
255fi
256executable_files=$($SUDO_EXEC find $native_sysroot -type f -perm /111)
257
258tdir=`mktemp -d`
259if [ x$tdir = x ] ; then
260 echo "SDK relocate failed, could not create a temporary directory"
261 exit 1
262fi
263echo "#!/bin/bash" > $tdir/relocate_sdk.sh
264echo exec ${env_setup_script%/*}/relocate_sdk.py $target_sdk_dir $dl_path $executable_files >> $tdir/relocate_sdk.sh
265$SUDO_EXEC mv $tdir/relocate_sdk.sh ${env_setup_script%/*}/relocate_sdk.sh
266$SUDO_EXEC chmod 755 ${env_setup_script%/*}/relocate_sdk.sh
267rm -rf $tdir
268if [ $relocate = 1 ] ; then
269 $SUDO_EXEC ${env_setup_script%/*}/relocate_sdk.sh
270 if [ $? -ne 0 ]; then
271 echo "SDK could not be set up. Relocate script failed. Abort!"
272 exit 1
273 fi
274fi
275
276# replace ${SDKPATH} with the new prefix in all text files: configs/scripts/etc
277$SUDO_EXEC find $native_sysroot -type f -exec file '{}' \;|grep ":.*\(ASCII\|script\|source\).*text"|cut -d':' -f1|$SUDO_EXEC xargs sed -i -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:g"
278
279# change all symlinks pointing to ${SDKPATH}
280for l in $($SUDO_EXEC find $native_sysroot -type l); do
281 $SUDO_EXEC ln -sfn $(readlink $l|$SUDO_EXEC sed -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:") $l
282done
283
284# find out all perl scripts in $native_sysroot and modify them replacing the
285# host perl with SDK perl.
286for perl_script in $($SUDO_EXEC grep "^#!.*perl" -rl $native_sysroot); do
287 $SUDO_EXEC sed -i -e "s:^#! */usr/bin/perl.*:#! /usr/bin/env perl:g" -e \
288 "s: /usr/bin/perl: /usr/bin/env perl:g" $perl_script
289done
290
291echo done
292
293# delete the relocating script, so that user is forced to re-run the installer
294# if he/she wants another location for the sdk
295if [ $savescripts = 0 ] ; then
296 $SUDO_EXEC rm ${env_setup_script%/*}/relocate_sdk.py ${env_setup_script%/*}/relocate_sdk.sh
297fi
298
299echo "SDK has been successfully set up and is ready to be used."
300
301exit 0
302
303MARKER:
304EOF
305 # add execution permission
306 chmod +x ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
307
308 # append the SDK tarball
309 cat ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 >> ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
310
311 # delete the old tarball, we don't need it anymore
312 rm ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2
313}
314
315populate_sdk_log_check() {
316 for target in $*
317 do
318 lf_path="`dirname ${BB_LOGFILE}`/log.do_$target.${PID}"
319
320 echo "log_check: Using $lf_path as logfile"
321
322 if test -e "$lf_path"
323 then
324 ${IMAGE_PKGTYPE}_log_check $target $lf_path
325 else
326 echo "Cannot find logfile [$lf_path]"
327 fi
328 echo "Logfile is clean"
329 done
330}
331
332do_populate_sdk[dirs] = "${TOPDIR}"
333do_populate_sdk[nostamp] = "1"
334do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS', True).split()])}"
335do_populate_sdk[rdepends] = "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_RDEPENDS', True).split()])}"
336do_populate_sdk[recrdeptask] = "do_package_write"
337addtask populate_sdk
diff --git a/meta/classes/populate_sdk_deb.bbclass b/meta/classes/populate_sdk_deb.bbclass
new file mode 100644
index 0000000000..ebb842ba86
--- /dev/null
+++ b/meta/classes/populate_sdk_deb.bbclass
@@ -0,0 +1,95 @@
1do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
2do_populate_sdk[recrdeptask] += "do_package_write_deb"
3
4
5DEB_SDK_ARCH = "${@[d.getVar('SDK_ARCH', True), "i386"]\
6 [d.getVar('SDK_ARCH', True) in \
7 ["x86", "i486", "i586", "i686", "pentium"]]}"
8
9do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
10
11populate_sdk_post_deb () {
12
13 local target_rootfs=$1
14
15 mkdir -p ${target_rootfs}/etc
16 tar -cf - -C ${STAGING_ETCDIR_NATIVE} -p apt | tar -xf - -C ${target_rootfs}/etc
17}
18
19populate_sdk_deb () {
20
21 # update index
22 package_update_index_deb
23
24 ## install target ##
25 # This needs to work in the same way as rootfs_deb.bbclass
26 echo "Installing TARGET packages"
27
28 mkdir -p ${IMAGE_ROOTFS}/var/dpkg/alternatives
29
30 export INSTALL_ROOTFS_DEB="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
31 export INSTALL_BASEARCH_DEB="${DPKG_ARCH}"
32 export INSTALL_ARCHS_DEB="${PACKAGE_ARCHS}"
33 export INSTALL_PACKAGES_NORMAL_DEB="${TOOLCHAIN_TARGET_TASK}"
34 export INSTALL_PACKAGES_ATTEMPTONLY_DEB="${TOOLCHAIN_TARGET_TASK_ATTEMPTONLY}"
35 export PACKAGES_LINGUAS_DEB=""
36 export INSTALL_TASK_DEB="populate_sdk-target"
37 export INTERCEPT_DIR=${WORKDIR}/intercept_scripts
38 export NATIVE_ROOT=${STAGING_DIR_NATIVE}
39
40 package_install_internal_deb
41
42 ${POPULATE_SDK_POST_TARGET_COMMAND}
43
44 populate_sdk_post_deb ${INSTALL_ROOTFS_DEB}
45
46 populate_sdk_log_check populate_sdk
47
48 ## install nativesdk ##
49 echo "Installing NATIVESDK packages"
50 export INSTALL_ROOTFS_DEB="${SDK_OUTPUT}"
51 export INSTALL_BASEARCH_DEB="${DEB_SDK_ARCH}"
52 export INSTALL_ARCHS_DEB="${SDK_PACKAGE_ARCHS}"
53 export INSTALL_PACKAGES_NORMAL_DEB="${TOOLCHAIN_HOST_TASK}"
54 export INSTALL_PACKAGES_ATTEMPTONLY_DEB="${TOOLCHAIN_HOST_TASK_ATTEMPTONLY}"
55 export PACKAGES_LINGUAS_DEB=""
56 export INSTALL_TASK_DEB="populate_sdk-nativesdk"
57
58 package_install_internal_deb
59 ${POPULATE_SDK_POST_HOST_COMMAND}
60 populate_sdk_post_deb ${SDK_OUTPUT}/${SDKPATHNATIVE}
61
62 #move remainings
63 install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}/var/lib/dpkg
64 mv ${SDK_OUTPUT}/var/lib/dpkg/* ${SDK_OUTPUT}/${SDKPATHNATIVE}/var/lib/dpkg
65 rm -rf ${SDK_OUTPUT}/var
66
67 populate_sdk_log_check populate_sdk
68}
69
70# This will of course only work after rootfs_deb_do_rootfs or populate_sdk_deb has been called
71DPKG_QUERY_COMMAND = "${STAGING_BINDIR_NATIVE}/dpkg-query --admindir=$INSTALL_ROOTFS_DEB/var/lib/dpkg"
72
73list_installed_packages() {
74 if [ "$1" = "arch" ] ; then
75 # Here we want the PACKAGE_ARCH not the deb architecture
76 ${DPKG_QUERY_COMMAND} -W -f='${Package} ${PackageArch}\n'
77 elif [ "$1" = "file" ] ; then
78 ${DPKG_QUERY_COMMAND} -W -f='${Package} ${Package}_${Version}_${Architecture}.deb ${PackageArch}\n' | while read pkg pkgfile pkgarch
79 do
80 fullpath=`find ${DEPLOY_DIR_DEB} -name "$pkgfile" || true`
81 if [ "$fullpath" = "" ] ; then
82 echo "$pkg $pkgfile $pkgarch"
83 else
84 echo "$pkg $fullpath $pkgarch"
85 fi
86 done
87 else
88 ${DPKG_QUERY_COMMAND} -W -f='${Package}\n'
89 fi
90}
91
92rootfs_list_installed_depends() {
93 # Cheat here a little bit by using the opkg query helper util
94 ${DPKG_QUERY_COMMAND} -W -f='Package: ${Package}\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n' | opkg-query-helper.py
95}
diff --git a/meta/classes/populate_sdk_ipk.bbclass b/meta/classes/populate_sdk_ipk.bbclass
new file mode 100644
index 0000000000..04c71af42e
--- /dev/null
+++ b/meta/classes/populate_sdk_ipk.bbclass
@@ -0,0 +1,80 @@
1do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
2do_populate_sdk[recrdeptask] += "do_package_write_ipk"
3
4do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
5
6populate_sdk_ipk() {
7
8 rm -f ${IPKGCONF_TARGET}
9 touch ${IPKGCONF_TARGET}
10 rm -f ${IPKGCONF_SDK}
11 touch ${IPKGCONF_SDK}
12
13 package_update_index_ipk
14 package_generate_ipkg_conf
15
16 export INSTALL_PACKAGES_LINGUAS_IPK=""
17 export INSTALL_TASK_IPK="populate_sdk"
18
19 #install target
20 export INSTALL_ROOTFS_IPK="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
21 export INSTALL_CONF_IPK="${IPKGCONF_TARGET}"
22 export INSTALL_PACKAGES_IPK="${TOOLCHAIN_TARGET_TASK}"
23 export INSTALL_PACKAGES_ATTEMPTONLY_IPK="${TOOLCHAIN_TARGET_TASK_ATTEMPTONLY}"
24
25 export D=${INSTALL_ROOTFS_IPK}
26 export OFFLINE_ROOT=${INSTALL_ROOTFS_IPK}
27 export IPKG_OFFLINE_ROOT=${INSTALL_ROOTFS_IPK}
28 export OPKG_OFFLINE_ROOT=${IPKG_OFFLINE_ROOT}
29 export INTERCEPT_DIR=${WORKDIR}/intercept_scripts
30 export NATIVE_ROOT=${STAGING_DIR_NATIVE}
31
32 package_install_internal_ipk
33
34 ${POPULATE_SDK_POST_TARGET_COMMAND}
35
36 #install host
37 export INSTALL_ROOTFS_IPK="${SDK_OUTPUT}"
38 export INSTALL_CONF_IPK="${IPKGCONF_SDK}"
39 export INSTALL_PACKAGES_IPK="${TOOLCHAIN_HOST_TASK}"
40 export INSTALL_PACKAGES_ATTEMPTONLY_IPK="${TOOLCHAIN_HOST_TASK_ATTEMPTONLY}"
41
42 package_install_internal_ipk
43
44 ${POPULATE_SDK_POST_HOST_COMMAND}
45
46 #post clean up
47 install -d ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/${sysconfdir}
48 install -m 0644 ${IPKGCONF_TARGET} ${IPKGCONF_SDK} ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/${sysconfdir}/
49
50 install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}
51 install -m 0644 ${IPKGCONF_SDK} ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}/
52
53 install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/opkg
54 mv ${SDK_OUTPUT}/var/lib/opkg/* ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/opkg/
55 rm -Rf ${SDK_OUTPUT}/var
56
57 populate_sdk_log_check populate_sdk
58}
59
60list_installed_packages() {
61 if [ "$1" = "arch" ] ; then
62 opkg-cl ${OPKG_ARGS} status | opkg-query-helper.py -a
63 elif [ "$1" = "file" ] ; then
64 opkg-cl ${OPKG_ARGS} status | opkg-query-helper.py -f | while read pkg pkgfile pkgarch
65 do
66 fullpath=`find ${DEPLOY_DIR_IPK} -name "$pkgfile" || true`
67 if [ "$fullpath" = "" ] ; then
68 echo "$pkg $pkgfile $pkgarch"
69 else
70 echo "$pkg $fullpath $pkgarch"
71 fi
72 done
73 else
74 opkg-cl ${OPKG_ARGS} list_installed | awk '{ print $1 }'
75 fi
76}
77
78rootfs_list_installed_depends() {
79 opkg-cl ${OPKG_ARGS} status | opkg-query-helper.py
80}
diff --git a/meta/classes/populate_sdk_rpm.bbclass b/meta/classes/populate_sdk_rpm.bbclass
new file mode 100644
index 0000000000..b0105931a1
--- /dev/null
+++ b/meta/classes/populate_sdk_rpm.bbclass
@@ -0,0 +1,172 @@
1# Smart is python based, so be sure python-native is available to us.
2EXTRANATIVEPATH += "python-native"
3
4do_populate_sdk[depends] += "rpm-native:do_populate_sysroot"
5do_populate_sdk[depends] += "rpmresolve-native:do_populate_sysroot"
6do_populate_sdk[depends] += "python-smartpm-native:do_populate_sysroot"
7
8# Needed for update-alternatives
9do_populate_sdk[depends] += "opkg-native:do_populate_sysroot"
10
11# Creating the repo info in do_rootfs
12do_populate_sdk[depends] += "createrepo-native:do_populate_sysroot"
13
14do_populate_sdk[recrdeptask] += "do_package_write_rpm"
15
16rpmlibdir = "/var/lib/rpm"
17RPMOPTS="--dbpath ${rpmlibdir}"
18RPM="rpm ${RPMOPTS}"
19
20do_populate_sdk[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
21
22populate_sdk_post_rpm () {
23
24 local target_rootfs=$1
25
26 # remove lock files
27 rm -f ${target_rootfs}/__db.*
28
29 # Remove all remaining resolver files
30 rm -rf ${target_rootfs}/install
31 rm -rf ${target_rootfs}/var/lib/smart
32}
33
34populate_sdk_rpm () {
35
36 package_update_index_rpm
37
38 ## install target ##
39 # This needs to work in the same way as rootfs_rpm.bbclass!
40 #
41 export INSTALL_ROOTFS_RPM="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
42 export INSTALL_PLATFORM_RPM="$(echo ${TARGET_ARCH} | tr - _)${TARGET_VENDOR}-${TARGET_OS}"
43 export INSTALL_PACKAGES_RPM="${TOOLCHAIN_TARGET_TASK}"
44 export INSTALL_PACKAGES_ATTEMPTONLY_RPM="$(echo '${TOOLCHAIN_TARGET_TASK_ATTEMPTONLY}' | sed 's/ *$//g')"
45 export INSTALL_PACKAGES_LINGUAS_RPM=""
46 # We don't need any of these runtime items for the SDK, so
47 # just make the system assume they exist.
48 export INSTALL_PROVIDENAME_RPM="/bin/sh /bin/bash /usr/bin/env /usr/bin/perl pkgconfig"
49 export INSTALL_TASK_RPM="populate_sdk-target"
50 export INSTALL_COMPLEMENTARY_RPM=""
51 export INTERCEPT_DIR=${WORKDIR}/intercept_scripts
52 export NATIVE_ROOT=${STAGING_DIR_NATIVE}
53
54 # Setup base system configuration
55 mkdir -p ${INSTALL_ROOTFS_RPM}/etc/rpm/
56
57 # List must be prefered to least preferred order
58 default_extra_rpm=""
59 INSTALL_PLATFORM_EXTRA_RPM=""
60 for os in ${MULTILIB_OS_LIST} ; do
61 old_IFS="$IFS"
62 IFS=":"
63 set -- $os
64 IFS="$old_IFS"
65 mlib=$1
66 mlib_os=$2
67 for prefix in ${MULTILIB_PREFIX_LIST} ; do
68 old_IFS="$IFS"
69 IFS=":"
70 set -- $prefix
71 IFS="$old_IFS"
72 if [ "$mlib" != "$1" ]; then
73 continue
74 fi
75 shift #remove mlib
76 while [ -n "$1" ]; do
77 platform="$(echo $1 | tr - _)-.*-$mlib_os"
78 if [ "$mlib" = "${BBEXTENDVARIANT}" ]; then
79 default_extra_rpm="$default_extra_rpm $platform"
80 else
81 INSTALL_PLATFORM_EXTRA_RPM="$INSTALL_PLATFORM_EXTRA_RPM $platform"
82 fi
83 shift
84 done
85 done
86 done
87 if [ -n "$default_extra_rpm" ]; then
88 INSTALL_PLATFORM_EXTRA_RPM="$default_extra_rpm $INSTALL_PLATFORM_EXTRA_RPM"
89 fi
90 export INSTALL_PLATFORM_EXTRA_RPM
91
92 package_install_internal_rpm
93 ${POPULATE_SDK_POST_TARGET_COMMAND}
94 populate_sdk_post_rpm ${INSTALL_ROOTFS_RPM}
95
96 ## install nativesdk ##
97 echo "Installing NATIVESDK packages"
98 export INSTALL_ROOTFS_RPM="${SDK_OUTPUT}"
99 export INSTALL_PLATFORM_RPM="$(echo ${TARGET_ARCH} | tr - _)${SDK_VENDOR}-${SDK_OS}"
100 export INSTALL_PACKAGES_RPM="${TOOLCHAIN_HOST_TASK}"
101 export INSTALL_PACKAGES_ATTEMPTONLY_RPM="$(echo '${TOOLCHAIN_HOST_TASK_ATTEMPTONLY}' | sed 's/ *$//g')"
102 export INSTALL_PACKAGES_LINGUAS_RPM=""
103 export INSTALL_PROVIDENAME_RPM="/bin/sh /bin/bash /usr/bin/env /usr/bin/perl pkgconfig libGL.so()(64bit) libGL.so"
104 export INSTALL_TASK_RPM="populate_sdk_rpm-nativesdk"
105 export INSTALL_COMPLEMENTARY_RPM=""
106
107 # List must be prefered to least preferred order
108 INSTALL_PLATFORM_EXTRA_RPM=""
109 for each_arch in ${SDK_PACKAGE_ARCHS} ; do
110 platform="$(echo $each_arch | tr - _)-.*-${SDK_OS}"
111 INSTALL_PLATFORM_EXTRA_RPM="$platform $INSTALL_PLATFORM_EXTRA_RPM"
112 done
113 export INSTALL_PLATFORM_EXTRA_RPM
114
115 package_install_internal_rpm --sdk
116 ${POPULATE_SDK_POST_HOST_COMMAND}
117 populate_sdk_post_rpm ${INSTALL_ROOTFS_RPM}
118
119 # move host RPM library data
120 install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/rpm
121 mv ${SDK_OUTPUT}${rpmlibdir}/* ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/rpm/
122 rm -Rf ${SDK_OUTPUT}/var
123
124 install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}
125 mv ${SDK_OUTPUT}/etc/* ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}/
126 rm -rf ${SDK_OUTPUT}/etc
127
128 populate_sdk_log_check populate_sdk
129}
130
131python () {
132 # The following code should be kept in sync w/ the rootfs_rpm version.
133
134 # package_arch order is reversed. This ensures the -best- match is listed first!
135 package_archs = d.getVar("PACKAGE_ARCHS", True) or ""
136 package_archs = ":".join(package_archs.split()[::-1])
137 package_os = d.getVar("TARGET_OS", True) or ""
138 ml_prefix_list = "%s:%s" % ('default', package_archs)
139 ml_os_list = "%s:%s" % ('default', package_os)
140 multilibs = d.getVar('MULTILIBS', True) or ""
141 for ext in multilibs.split():
142 eext = ext.split(':')
143 if len(eext) > 1 and eext[0] == 'multilib':
144 localdata = bb.data.createCopy(d)
145 default_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + eext[1], False)
146 if default_tune:
147 localdata.setVar("DEFAULTTUNE", default_tune)
148 bb.data.update_data(localdata)
149 package_archs = localdata.getVar("PACKAGE_ARCHS", True) or ""
150 package_archs = ":".join([i in "all noarch any".split() and i or eext[1]+"_"+i for i in package_archs.split()][::-1])
151 package_os = localdata.getVar("TARGET_OS", True) or ""
152 ml_prefix_list += " %s:%s" % (eext[1], package_archs)
153 ml_os_list += " %s:%s" % (eext[1], package_os)
154 d.setVar('MULTILIB_PREFIX_LIST', ml_prefix_list)
155 d.setVar('MULTILIB_OS_LIST', ml_os_list)
156}
157
158RPM_QUERY_CMD = '${RPM} --root $INSTALL_ROOTFS_RPM -D "_dbpath ${rpmlibdir}"'
159
160list_installed_packages() {
161 if [ "$1" = "arch" ]; then
162 ${RPM_QUERY_CMD} -qa --qf "[%{NAME} %{ARCH}\n]" | translate_smart_to_oe arch
163 elif [ "$1" = "file" ]; then
164 ${RPM_QUERY_CMD} -qa --qf "[%{NAME} %{ARCH} %{PACKAGEORIGIN}\n]" | translate_smart_to_oe file
165 else
166 ${RPM_QUERY_CMD} -qa --qf "[%{NAME} %{ARCH}\n]" | translate_smart_to_oe
167 fi
168}
169
170rootfs_list_installed_depends() {
171 rpmresolve -t $INSTALL_ROOTFS_RPM/${rpmlibdir}
172}
diff --git a/meta/classes/prexport.bbclass b/meta/classes/prexport.bbclass
new file mode 100644
index 0000000000..5a1cb33c6a
--- /dev/null
+++ b/meta/classes/prexport.bbclass
@@ -0,0 +1,58 @@
1PRSERV_DUMPOPT_VERSION = "${PRAUTOINX}"
2PRSERV_DUMPOPT_PKGARCH = ""
3PRSERV_DUMPOPT_CHECKSUM = ""
4PRSERV_DUMPOPT_COL = "0"
5
6PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
7PRSERV_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv.inc"
8
9python prexport_handler () {
10 import bb.event
11 if not e.data:
12 return
13
14 if isinstance(e, bb.event.RecipeParsed):
15 import oe.prservice
16 #get all PR values for the current PRAUTOINX
17 ver = e.data.getVar('PRSERV_DUMPOPT_VERSION', True)
18 ver = ver.replace('%','-')
19 retval = oe.prservice.prserv_dump_db(e.data)
20 if not retval:
21 bb.fatal("prexport_handler: export failed!")
22 (metainfo, datainfo) = retval
23 if not datainfo:
24 bb.warn("prexport_handler: No AUTOPR values found for %s" % ver)
25 return
26 oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
27 if 'AUTOINC' in ver:
28 import re
29 srcpv = bb.fetch2.get_srcrev(e.data)
30 base_ver = "AUTOINC-%s" % ver[:ver.find(srcpv)]
31 e.data.setVar('PRSERV_DUMPOPT_VERSION', base_ver)
32 retval = oe.prservice.prserv_dump_db(e.data)
33 if not retval:
34 bb.fatal("prexport_handler: export failed!")
35 (metainfo, datainfo) = retval
36 oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
37 elif isinstance(e, bb.event.ParseStarted):
38 import bb.utils
39 import oe.prservice
40 oe.prservice.prserv_check_avail(e.data)
41 #remove dumpfile
42 bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE', True))
43 elif isinstance(e, bb.event.ParseCompleted):
44 import oe.prservice
45 #dump meta info of tables
46 d = e.data.createCopy()
47 d.setVar('PRSERV_DUMPOPT_COL', "1")
48 retval = oe.prservice.prserv_dump_db(d)
49 if not retval:
50 bb.error("prexport_handler: export failed!")
51 return
52 (metainfo, datainfo) = retval
53 oe.prservice.prserv_export_tofile(d, metainfo, None, True)
54
55}
56
57addhandler prexport_handler
58prexport_handler[eventmask] = "bb.event.RecipeParsed bb.event.ParseStarted bb.event.ParseCompleted"
diff --git a/meta/classes/primport.bbclass b/meta/classes/primport.bbclass
new file mode 100644
index 0000000000..8ed45f03f0
--- /dev/null
+++ b/meta/classes/primport.bbclass
@@ -0,0 +1,21 @@
1python primport_handler () {
2 import bb.event
3 if not e.data:
4 return
5
6 if isinstance(e, bb.event.ParseCompleted):
7 import oe.prservice
8 #import all exported AUTOPR values
9 imported = oe.prservice.prserv_import_db(e.data)
10 if imported is None:
11 bb.fatal("import failed!")
12
13 for (version, pkgarch, checksum, value) in imported:
14 bb.note("imported (%s,%s,%s,%d)" % (version, pkgarch, checksum, value))
15 elif isinstance(e, bb.event.ParseStarted):
16 import oe.prservice
17 oe.prservice.prserv_check_avail(e.data)
18}
19
20addhandler primport_handler
21primport_handler[eventmask] = "bb.event.ParseCompleted bb.event.ParseStarted"
diff --git a/meta/classes/prserv.bbclass b/meta/classes/prserv.bbclass
new file mode 100644
index 0000000000..b440d863ef
--- /dev/null
+++ b/meta/classes/prserv.bbclass
@@ -0,0 +1,33 @@
1def prserv_get_pr_auto(d):
2 import oe.prservice
3 import re
4
5 pv = d.getVar("PV", True)
6 if not d.getVar('PRSERV_HOST', True):
7 if 'AUTOINC' in pv:
8 d.setVar("PKGV", pv.replace("AUTOINC", "0"))
9 bb.warn("Not using network based PR service")
10 return None
11
12 version = d.getVar("PRAUTOINX", True)
13 pkgarch = d.getVar("PACKAGE_ARCH", True)
14 checksum = d.getVar("BB_TASKHASH", True)
15
16 conn = d.getVar("__PRSERV_CONN", True)
17 if conn is None:
18 conn = oe.prservice.prserv_make_conn(d)
19 if conn is None:
20 return None
21
22 if "AUTOINC" in pv:
23 srcpv = bb.fetch2.get_srcrev(d)
24 base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
25 value = conn.getPR(base_ver, pkgarch, srcpv)
26 d.setVar("PKGV", pv.replace("AUTOINC", str(value)))
27
28 if d.getVar('PRSERV_LOCKDOWN', True):
29 auto_rev = d.getVar('PRAUTO_' + version + '_' + pkgarch, True) or d.getVar('PRAUTO_' + version, True) or None
30 else:
31 auto_rev = conn.getPR(version, pkgarch, checksum)
32
33 return auto_rev
diff --git a/meta/classes/ptest.bbclass b/meta/classes/ptest.bbclass
new file mode 100644
index 0000000000..d67b4e659f
--- /dev/null
+++ b/meta/classes/ptest.bbclass
@@ -0,0 +1,59 @@
1# Ptest packages are built indirectly by a distro_feature,
2# no need for them to be a direct target of 'world'
3EXCLUDE_FROM_WORLD = "1"
4
5SUMMARY_${PN}-ptest ?= "${SUMMARY} - Package test files"
6DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \
7This package contains a test directory ${PTEST_PATH} for package test purposes."
8
9PTEST_PATH ?= "${libdir}/${PN}/ptest"
10FILES_${PN}-ptest = "${PTEST_PATH}"
11SECTION_${PN}-ptest = "devel"
12ALLOW_EMPTY_${PN}-ptest = "1"
13PTEST_ENABLED = "${@base_contains("DISTRO_FEATURES", "ptest", "1", "0", d)}"
14RDEPENDS_${PN}-ptest_virtclass-native = ""
15RDEPENDS_${PN}-ptest_virtclass-nativesdk = ""
16
17PACKAGES =+ "${@base_contains('DISTRO_FEATURES', 'ptest', '${PN}-ptest', '', d)}"
18
19do_configure_ptest() {
20 :
21}
22
23do_configure_ptest_base() {
24 if [ ${PTEST_ENABLED} = 1 ]; then
25 do_configure_ptest
26 fi
27}
28
29do_compile_ptest() {
30 :
31}
32
33do_compile_ptest_base() {
34 if [ ${PTEST_ENABLED} = 1 ]; then
35 do_compile_ptest
36 fi
37}
38
39do_install_ptest() {
40 :
41}
42
43do_install_ptest_base() {
44 if [ ${PTEST_ENABLED} = 1 ]; then
45 if [ -f ${WORKDIR}/run-ptest ]; then
46 install -D ${WORKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest
47 if grep -q install-ptest: Makefile; then
48 oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest
49 fi
50 do_install_ptest
51 fi
52 fi
53}
54
55do_install_ptest_base[cleandirs] = "${D}${PTEST_PATH}"
56
57addtask configure_ptest_base after do_configure before do_compile
58addtask compile_ptest_base after do_compile before do_install
59addtask install_ptest_base after do_install before do_package
diff --git a/meta/classes/python-dir.bbclass b/meta/classes/python-dir.bbclass
new file mode 100644
index 0000000000..0b6a33c2ed
--- /dev/null
+++ b/meta/classes/python-dir.bbclass
@@ -0,0 +1,3 @@
1PYTHON_BASEVERSION ?= "2.7"
2PYTHON_DIR = "python${PYTHON_BASEVERSION}"
3PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/meta/classes/pythonnative.bbclass b/meta/classes/pythonnative.bbclass
new file mode 100644
index 0000000000..7886207d06
--- /dev/null
+++ b/meta/classes/pythonnative.bbclass
@@ -0,0 +1,3 @@
1PYTHON="${STAGING_BINDIR_NATIVE}/python-native/python"
2EXTRANATIVEPATH += "python-native"
3DEPENDS += " python-native "
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
new file mode 100644
index 0000000000..3d437b0e45
--- /dev/null
+++ b/meta/classes/qemu.bbclass
@@ -0,0 +1,35 @@
1#
2# This class contains functions for recipes that need QEMU or test for its
3# existence.
4#
5
6def qemu_target_binary(data):
7 target_arch = data.getVar("TARGET_ARCH", True)
8 if target_arch in ("i486", "i586", "i686"):
9 target_arch = "i386"
10 elif target_arch == "powerpc":
11 target_arch = "ppc"
12 elif target_arch == "powerpc64":
13 target_arch = "ppc64"
14
15 return "qemu-" + target_arch
16#
17# Next function will return a string containing the command that is needed to
18# to run a certain binary through qemu. For example, in order to make a certain
19# postinstall scriptlet run at do_rootfs time and running the postinstall is
20# architecture dependent, we can run it through qemu. For example, in the
21# postinstall scriptlet, we could use the following:
22#
23# ${@qemu_run_binary(d, '$D', '/usr/bin/test_app')} [test_app arguments]
24#
25def qemu_run_binary(data, rootfs_path, binary):
26 qemu_binary = qemu_target_binary(data)
27 if qemu_binary == "qemu-allarch":
28 qemu_binary = "qemuwrapper"
29
30 libdir = rootfs_path + data.getVar("libdir", False)
31 base_libdir = rootfs_path + data.getVar("base_libdir", False)
32
33 return "PSEUDO_UNLOAD=1 " + qemu_binary + " -L " + rootfs_path\
34 + " -E LD_LIBRARY_PATH=" + libdir + ":" + base_libdir + " "\
35 + rootfs_path + binary
diff --git a/meta/classes/qmake2.bbclass b/meta/classes/qmake2.bbclass
new file mode 100644
index 0000000000..6e73ad2d1e
--- /dev/null
+++ b/meta/classes/qmake2.bbclass
@@ -0,0 +1,27 @@
1#
2# QMake variables for Qt4
3#
4inherit qmake_base
5
6DEPENDS_prepend = "qt4-tools-native "
7
8export QMAKESPEC = "${STAGING_DATADIR}/qt4/mkspecs/${TARGET_OS}-oe-g++"
9export OE_QMAKE_QT_CONFIG = "${STAGING_DATADIR}/qt4/mkspecs/qconfig.pri"
10export OE_QMAKE_UIC = "${STAGING_BINDIR_NATIVE}/uic4"
11export OE_QMAKE_UIC3 = "${STAGING_BINDIR_NATIVE}/uic34"
12export OE_QMAKE_MOC = "${STAGING_BINDIR_NATIVE}/moc4"
13export OE_QMAKE_RCC = "${STAGING_BINDIR_NATIVE}/rcc4"
14export OE_QMAKE_QDBUSCPP2XML = "${STAGING_BINDIR_NATIVE}/qdbuscpp2xml4"
15export OE_QMAKE_QDBUSXML2CPP = "${STAGING_BINDIR_NATIVE}/qdbusxml2cpp4"
16export OE_QMAKE_QMAKE = "${STAGING_BINDIR_NATIVE}/qmake2"
17export OE_QMAKE_LINK = "${CXX}"
18export OE_QMAKE_CXXFLAGS = "${CXXFLAGS}"
19export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/qt4"
20export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
21export OE_QMAKE_LIBS_QT = "qt"
22export OE_QMAKE_LIBS_X11 = "-lXext -lX11 -lm"
23export OE_QMAKE_LIBS_X11SM = "-lSM -lICE"
24export OE_QMAKE_LCONVERT = "${STAGING_BINDIR_NATIVE}/lconvert4"
25export OE_QMAKE_LRELEASE = "${STAGING_BINDIR_NATIVE}/lrelease4"
26export OE_QMAKE_LUPDATE = "${STAGING_BINDIR_NATIVE}/lupdate4"
27export OE_QMAKE_XMLPATTERNS = "${STAGING_BINDIR_NATIVE}/xmlpatterns4"
diff --git a/meta/classes/qmake_base.bbclass b/meta/classes/qmake_base.bbclass
new file mode 100644
index 0000000000..86bbede260
--- /dev/null
+++ b/meta/classes/qmake_base.bbclass
@@ -0,0 +1,119 @@
1QMAKE_MKSPEC_PATH ?= "${STAGING_DATADIR_NATIVE}/qmake"
2
3OE_QMAKE_PLATFORM = "${TARGET_OS}-oe-g++"
4QMAKESPEC := "${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}"
5
6# We override this completely to eliminate the -e normally passed in
7EXTRA_OEMAKE = ""
8
9export OE_QMAKE_CC="${CC}"
10export OE_QMAKE_CFLAGS="${CFLAGS}"
11export OE_QMAKE_CXX="${CXX}"
12export OE_QMAKE_LDFLAGS="${LDFLAGS}"
13export OE_QMAKE_AR="${AR}"
14export OE_QMAKE_STRIP="echo"
15export OE_QMAKE_RPATH="-Wl,-rpath-link,"
16
17# default to qte2 via bb.conf, inherit qt3x11 to configure for qt3x11
18
19oe_qmake_mkspecs () {
20 mkdir -p mkspecs/${OE_QMAKE_PLATFORM}
21 for f in ${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}/*; do
22 if [ -L $f ]; then
23 lnk=`readlink $f`
24 if [ -f mkspecs/${OE_QMAKE_PLATFORM}/$lnk ]; then
25 ln -s $lnk mkspecs/${OE_QMAKE_PLATFORM}/`basename $f`
26 else
27 cp $f mkspecs/${OE_QMAKE_PLATFORM}/
28 fi
29 else
30 cp $f mkspecs/${OE_QMAKE_PLATFORM}/
31 fi
32 done
33}
34
35do_generate_qt_config_file() {
36 export QT_CONF_PATH=${WORKDIR}/qt.conf
37 cat > ${WORKDIR}/qt.conf <<EOF
38[Paths]
39Prefix =
40Binaries = ${STAGING_BINDIR_NATIVE}
41Headers = ${STAGING_INCDIR}/qt4
42Plugins = ${STAGING_LIBDIR}/qt4/plugins/
43Mkspecs = ${STAGING_DATADIR}/qt4/mkspecs/
44EOF
45}
46
47addtask generate_qt_config_file after do_patch before do_configure
48
49qmake_base_do_configure() {
50 case ${QMAKESPEC} in
51 *linux-oe-g++|*linux-uclibc-oe-g++|*linux-gnueabi-oe-g++|*linux-uclibceabi-oe-g++|*linux-gnuspe-oe-g++|*linux-uclibcspe-oe-g++|*linux-gnun32-oe-g++)
52 ;;
53 *-oe-g++)
54 die Unsupported target ${TARGET_OS} for oe-g++ qmake spec
55 ;;
56 *)
57 bbnote Searching for qmake spec file
58 paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-${TARGET_ARCH}-g++"
59 paths="${QMAKE_MKSPEC_PATH}/${TARGET_OS}-g++ $paths"
60
61 if (echo "${TARGET_ARCH}"|grep -q 'i.86'); then
62 paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-x86-g++ $paths"
63 fi
64 for i in $paths; do
65 if test -e $i; then
66 export QMAKESPEC=$i
67 break
68 fi
69 done
70 ;;
71 esac
72
73 bbnote "using qmake spec in ${QMAKESPEC}, using profiles '${QMAKE_PROFILES}'"
74
75 if [ -z "${QMAKE_PROFILES}" ]; then
76 PROFILES="`ls *.pro`"
77 else
78 PROFILES="${QMAKE_PROFILES}"
79 fi
80
81 if [ -z "$PROFILES" ]; then
82 die "QMAKE_PROFILES not set and no profiles found in $PWD"
83 fi
84
85 if [ ! -z "${EXTRA_QMAKEVARS_POST}" ]; then
86 AFTER="-after"
87 QMAKE_VARSUBST_POST="${EXTRA_QMAKEVARS_POST}"
88 bbnote "qmake postvar substitution: ${EXTRA_QMAKEVARS_POST}"
89 fi
90
91 if [ ! -z "${EXTRA_QMAKEVARS_PRE}" ]; then
92 QMAKE_VARSUBST_PRE="${EXTRA_QMAKEVARS_PRE}"
93 bbnote "qmake prevar substitution: ${EXTRA_QMAKEVARS_PRE}"
94 fi
95
96 # Hack .pro files to use OE utilities
97 LCONVERT_NAME=$(basename ${OE_QMAKE_LCONVERT})
98 LRELEASE_NAME=$(basename ${OE_QMAKE_LRELEASE})
99 LUPDATE_NAME=$(basename ${OE_QMAKE_LUPDATE})
100 XMLPATTERNS_NAME=$(basename ${OE_QMAKE_XMLPATTERNS})
101 find -name '*.pro' \
102 -exec sed -i -e "s|\(=\s*.*\)/$LCONVERT_NAME|\1/lconvert|g" \
103 -e "s|\(=\s*.*\)/$LRELEASE_NAME|\1/lrelease|g" \
104 -e "s|\(=\s*.*\)/$LUPDATE_NAME|\1/lupdate|g" \
105 -e "s|\(=\s*.*\)/$XMLPATTERNS_NAME|\1/xmlpatterns|g" \
106 -e "s|\(=\s*.*\)/lconvert|\1/$LCONVERT_NAME|g" \
107 -e "s|\(=\s*.*\)/lrelease|\1/$LRELEASE_NAME|g" \
108 -e "s|\(=\s*.*\)/lupdate|\1/$LUPDATE_NAME|g" \
109 -e "s|\(=\s*.*\)/xmlpatterns|\1/$XMLPATTERNS_NAME|g" \
110 '{}' ';'
111
112#bbnote "Calling '${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST'"
113 unset QMAKESPEC || true
114 ${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST || die "Error calling ${OE_QMAKE_QMAKE} on $PROFILES"
115}
116
117EXPORT_FUNCTIONS do_configure
118
119addtask configure after do_unpack do_patch before do_compile
diff --git a/meta/classes/qt4e.bbclass b/meta/classes/qt4e.bbclass
new file mode 100644
index 0000000000..850bb6a717
--- /dev/null
+++ b/meta/classes/qt4e.bbclass
@@ -0,0 +1,24 @@
1QT4EDEPENDS ?= "qt4-embedded "
2DEPENDS_prepend = "${QT4EDEPENDS}"
3
4inherit qmake2
5
6QT_BASE_NAME = "qt4-embedded"
7QT_DIR_NAME = "qtopia"
8QT_LIBINFIX = "E"
9# override variables set by qmake-base to compile Qt/Embedded apps
10#
11export QMAKESPEC = "${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/${TARGET_OS}-oe-g++"
12export OE_QMAKE_QT_CONFIG = "${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/qconfig.pri"
13export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/${QT_DIR_NAME}"
14export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
15export OE_QMAKE_LIBS_QT = "qt"
16export OE_QMAKE_LIBS_X11 = ""
17export OE_QMAKE_EXTRA_MODULES = "network"
18EXTRA_QMAKEVARS_PRE += " QT_LIBINFIX=${QT_LIBINFIX} "
19
20# Qt4 uses atomic instructions not supported in thumb mode
21ARM_INSTRUCTION_SET = "arm"
22
23# Qt4 could NOT be built on MIPS64 with 64 bits userspace
24COMPATIBLE_HOST_mips64 = "mips64.*-linux-gnun32"
diff --git a/meta/classes/qt4x11.bbclass b/meta/classes/qt4x11.bbclass
new file mode 100644
index 0000000000..65d196afc6
--- /dev/null
+++ b/meta/classes/qt4x11.bbclass
@@ -0,0 +1,14 @@
1QT4DEPENDS ?= "qt4-x11 "
2DEPENDS_prepend = "${QT4DEPENDS}"
3
4inherit qmake2
5
6QT_BASE_NAME = "qt4"
7QT_DIR_NAME = "qt4"
8QT_LIBINFIX = ""
9
10# Qt4 uses atomic instructions not supported in thumb mode
11ARM_INSTRUCTION_SET = "arm"
12
13# Qt4 could NOT be built on MIPS64 with 64 bits userspace
14COMPATIBLE_HOST_mips64 = "mips64.*-linux-gnun32"
diff --git a/meta/classes/recipe_sanity.bbclass b/meta/classes/recipe_sanity.bbclass
new file mode 100644
index 0000000000..5dd4624f40
--- /dev/null
+++ b/meta/classes/recipe_sanity.bbclass
@@ -0,0 +1,168 @@
1def __note(msg, d):
2 bb.note("%s: recipe_sanity: %s" % (d.getVar("P", True), msg))
3
4__recipe_sanity_badruntimevars = "RDEPENDS RPROVIDES RRECOMMENDS RCONFLICTS"
5def bad_runtime_vars(cfgdata, d):
6 if bb.data.inherits_class("native", d) or \
7 bb.data.inherits_class("cross", d):
8 return
9
10 for var in d.getVar("__recipe_sanity_badruntimevars", True).split():
11 val = d.getVar(var, 0)
12 if val and val != cfgdata.get(var):
13 __note("%s should be %s_${PN}" % (var, var), d)
14
15__recipe_sanity_reqvars = "DESCRIPTION"
16__recipe_sanity_reqdiffvars = ""
17def req_vars(cfgdata, d):
18 for var in d.getVar("__recipe_sanity_reqvars", True).split():
19 if not d.getVar(var, 0):
20 __note("%s should be set" % var, d)
21
22 for var in d.getVar("__recipe_sanity_reqdiffvars", True).split():
23 val = d.getVar(var, 0)
24 cfgval = cfgdata.get(var)
25
26 if not val:
27 __note("%s should be set" % var, d)
28 elif val == cfgval:
29 __note("%s should be defined to something other than default (%s)" % (var, cfgval), d)
30
31def var_renames_overwrite(cfgdata, d):
32 renames = d.getVar("__recipe_sanity_renames", 0)
33 if renames:
34 for (key, newkey, oldvalue, newvalue) in renames:
35 if oldvalue != newvalue and oldvalue != cfgdata.get(newkey):
36 __note("rename of variable '%s' to '%s' overwrote existing value '%s' with '%s'." % (key, newkey, oldvalue, newvalue), d)
37
38def incorrect_nonempty_PACKAGES(cfgdata, d):
39 if bb.data.inherits_class("native", d) or \
40 bb.data.inherits_class("cross", d):
41 if d.getVar("PACKAGES", True):
42 return True
43
44def can_use_autotools_base(cfgdata, d):
45 cfg = d.getVar("do_configure", True)
46 if not bb.data.inherits_class("autotools", d):
47 return False
48
49 for i in ["autoreconf"] + ["%s_do_configure" % cls for cls in ["gnomebase", "gnome", "e", "autotools", "efl", "gpephone", "openmoko", "openmoko2", "xfce", "xlibs"]]:
50 if cfg.find(i) != -1:
51 return False
52
53 for clsfile in d.getVar("__inherit_cache", 0):
54 (base, _) = os.path.splitext(os.path.basename(clsfile))
55 if cfg.find("%s_do_configure" % base) != -1:
56 __note("autotools_base usage needs verification, spotted %s_do_configure" % base, d)
57
58 return True
59
60def can_delete_FILESPATH(cfgdata, d):
61 expected = cfgdata.get("FILESPATH")
62 #expected = "${@':'.join([os.path.normpath(os.path.join(fp, p, o)) for fp in d.getVar('FILESPATHBASE', True).split(':') for p in d.getVar('FILESPATHPKG', True).split(':') for o in (d.getVar('OVERRIDES', True) + ':').split(':') if os.path.exists(os.path.join(fp, p, o))])}:${FILESDIR}"
63 expectedpaths = d.expand(expected)
64 unexpanded = d.getVar("FILESPATH", 0)
65 filespath = d.getVar("FILESPATH", True).split(":")
66 filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
67 for fp in filespath:
68 if not fp in expectedpaths:
69 # __note("Path %s in FILESPATH not in the expected paths %s" %
70 # (fp, expectedpaths), d)
71 return False
72 return expected != unexpanded
73
74def can_delete_FILESDIR(cfgdata, d):
75 expected = cfgdata.get("FILESDIR")
76 #expected = "${@bb.utils.which(d.getVar('FILESPATH', True), '.')}"
77 unexpanded = d.getVar("FILESDIR", 0)
78 if unexpanded is None:
79 return False
80
81 expanded = os.path.normpath(d.getVar("FILESDIR", True))
82 filespath = d.getVar("FILESPATH", True).split(":")
83 filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
84
85 return unexpanded != expected and \
86 os.path.exists(expanded) and \
87 (expanded in filespath or
88 expanded == d.expand(expected))
89
90def can_delete_others(p, cfgdata, d):
91 for k in ["S", "PV", "PN", "DESCRIPTION", "DEPENDS",
92 "SECTION", "PACKAGES", "EXTRA_OECONF", "EXTRA_OEMAKE"]:
93 #for k in cfgdata:
94 unexpanded = d.getVar(k, 0)
95 cfgunexpanded = cfgdata.get(k)
96 if not cfgunexpanded:
97 continue
98
99 try:
100 expanded = d.getVar(k, True)
101 cfgexpanded = d.expand(cfgunexpanded)
102 except bb.fetch.ParameterError:
103 continue
104
105 if unexpanded != cfgunexpanded and \
106 cfgexpanded == expanded:
107 __note("candidate for removal of %s" % k, d)
108 bb.debug(1, "%s: recipe_sanity: cfg's '%s' and d's '%s' both expand to %s" %
109 (p, cfgunexpanded, unexpanded, expanded))
110
111python do_recipe_sanity () {
112 p = d.getVar("P", True)
113 p = "%s %s %s" % (d.getVar("PN", True), d.getVar("PV", True), d.getVar("PR", True))
114
115 sanitychecks = [
116 (can_delete_FILESDIR, "candidate for removal of FILESDIR"),
117 (can_delete_FILESPATH, "candidate for removal of FILESPATH"),
118 #(can_use_autotools_base, "candidate for use of autotools_base"),
119 (incorrect_nonempty_PACKAGES, "native or cross recipe with non-empty PACKAGES"),
120 ]
121 cfgdata = d.getVar("__recipe_sanity_cfgdata", 0)
122
123 for (func, msg) in sanitychecks:
124 if func(cfgdata, d):
125 __note(msg, d)
126
127 can_delete_others(p, cfgdata, d)
128 var_renames_overwrite(cfgdata, d)
129 req_vars(cfgdata, d)
130 bad_runtime_vars(cfgdata, d)
131}
132do_recipe_sanity[nostamp] = "1"
133addtask recipe_sanity
134
135do_recipe_sanity_all[nostamp] = "1"
136do_recipe_sanity_all[recrdeptask] = "do_recipe_sanity_all do_recipe_sanity"
137do_recipe_sanity_all () {
138 :
139}
140addtask recipe_sanity_all after do_recipe_sanity
141
142python recipe_sanity_eh () {
143 d = e.data
144
145 cfgdata = {}
146 for k in d.keys():
147 #for k in ["S", "PR", "PV", "PN", "DESCRIPTION", "LICENSE", "DEPENDS",
148 # "SECTION"]:
149 cfgdata[k] = d.getVar(k, 0)
150
151 d.setVar("__recipe_sanity_cfgdata", cfgdata)
152 #d.setVar("__recipe_sanity_cfgdata", d)
153
154 # Sick, very sick..
155 from bb.data_smart import DataSmart
156 old = DataSmart.renameVar
157 def myrename(self, key, newkey):
158 oldvalue = self.getVar(newkey, 0)
159 old(self, key, newkey)
160 newvalue = self.getVar(newkey, 0)
161 if oldvalue:
162 renames = self.getVar("__recipe_sanity_renames", 0) or set()
163 renames.add((key, newkey, oldvalue, newvalue))
164 self.setVar("__recipe_sanity_renames", renames)
165 DataSmart.renameVar = myrename
166}
167addhandler recipe_sanity_eh
168recipe_sanity_eh[eventmask] = "bb.event.ConfigParsed"
diff --git a/meta/classes/relocatable.bbclass b/meta/classes/relocatable.bbclass
new file mode 100644
index 0000000000..4ca9981f44
--- /dev/null
+++ b/meta/classes/relocatable.bbclass
@@ -0,0 +1,7 @@
1inherit chrpath
2
3SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess"
4
5python relocatable_binaries_preprocess() {
6 rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
7}
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
new file mode 100644
index 0000000000..f0f6d18249
--- /dev/null
+++ b/meta/classes/rm_work.bbclass
@@ -0,0 +1,99 @@
1#
2# Removes source after build
3#
4# To use it add that line to conf/local.conf:
5#
6# INHERIT += "rm_work"
7#
8# To inhibit rm_work for some recipes, specify them in RM_WORK_EXCLUDE.
9# For example, in conf/local.conf:
10#
11# RM_WORK_EXCLUDE += "icu-native icu busybox"
12#
13
14# Use the completion scheduler by default when rm_work is active
15# to try and reduce disk usage
16BB_SCHEDULER ?= "completion"
17
18RMWORK_ORIG_TASK := "${BB_DEFAULT_TASK}"
19BB_DEFAULT_TASK = "rm_work_all"
20
21do_rm_work () {
22 # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
23 for p in ${RM_WORK_EXCLUDE}; do
24 if [ "$p" = "${PN}" ]; then
25 bbnote "rm_work: Skipping ${PN} since it is in RM_WORK_EXCLUDE"
26 exit 0
27 fi
28 done
29
30 cd ${WORKDIR}
31 for dir in *
32 do
33 # Retain only logs and other files in temp, safely ignore
34 # failures of removing pseudo folers on NFS2/3 server.
35 if [ $dir = 'pseudo' ]; then
36 rm -rf $dir 2> /dev/null || true
37 elif [ $dir != 'temp' ]; then
38 rm -rf $dir
39 fi
40 done
41
42 # Need to add pseudo back or subsqeuent work in this workdir
43 # might fail since setscene may not rerun to recreate it
44 mkdir -p ${WORKDIR}/pseudo/
45
46 # Change normal stamps into setscene stamps as they better reflect the
47 # fact WORKDIR is now empty
48 # Also leave noexec stamps since setscene stamps don't cover them
49 cd `dirname ${STAMP}`
50 for i in `basename ${STAMP}`*
51 do
52 for j in ${SSTATETASKS}
53 do
54 case $i in
55 *do_setscene*)
56 break
57 ;;
58 *sigdata*)
59 i=dummy
60 break
61 ;;
62 *do_package_write*)
63 i=dummy
64 break
65 ;;
66 *do_build*)
67 i=dummy
68 break
69 ;;
70 # We remove do_package entirely, including any
71 # sstate version since otherwise we'd need to leave 'plaindirs' around
72 # such as 'packages' and 'packages-split' and these can be large. No end
73 # of chain tasks depend directly on do_package anymore.
74 *do_package|*do_package.*|*do_package_setscene.*)
75 rm -f $i;
76 i=dummy
77 break
78 ;;
79 *_setscene*)
80 i=dummy
81 break
82 ;;
83 *$j|*$j.*)
84 mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
85 i=dummy
86 break
87 ;;
88 esac
89 done
90 rm -f $i
91 done
92}
93addtask rm_work after do_${RMWORK_ORIG_TASK}
94
95do_rm_work_all () {
96 :
97}
98do_rm_work_all[recrdeptask] = "do_rm_work"
99addtask rm_work_all after do_rm_work
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
new file mode 100644
index 0000000000..b1c52f9dd6
--- /dev/null
+++ b/meta/classes/rootfs_deb.bbclass
@@ -0,0 +1,137 @@
1#
2# Copyright 2006-2007 Openedhand Ltd.
3#
4
5ROOTFS_PKGMANAGE = "dpkg apt"
6ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
7
8do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
9do_rootfs[recrdeptask] += "do_package_write_deb"
10rootfs_deb_do_rootfs[vardepsexclude] += "BUILDNAME"
11
12do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
13
14python rootfs_deb_bad_recommendations() {
15 if d.getVar("BAD_RECOMMENDATIONS", True):
16 bb.warn("Debian package install does not support BAD_RECOMMENDATIONS")
17}
18do_rootfs[prefuncs] += "rootfs_deb_bad_recommendations"
19
20DEB_POSTPROCESS_COMMANDS = ""
21
22opkglibdir = "${localstatedir}/lib/opkg"
23
24deb_package_setflag() {
25 sed -i -e "/^Package: $2\$/{n; s/Status: install ok .*/Status: install ok $1/;}" ${IMAGE_ROOTFS}/var/lib/dpkg/status
26}
27
28deb_package_getflag() {
29 cat ${IMAGE_ROOTFS}/var/lib/dpkg/status | sed -n -e "/^Package: $2\$/{n; s/Status: install ok .*/$1/; p}"
30}
31
32fakeroot rootfs_deb_do_rootfs () {
33 set +e
34
35 mkdir -p ${IMAGE_ROOTFS}/var/lib/dpkg/alternatives
36
37 # update index
38 package_update_index_deb
39
40 #install packages
41 export INSTALL_ROOTFS_DEB="${IMAGE_ROOTFS}"
42 export INSTALL_BASEARCH_DEB="${DPKG_ARCH}"
43 export INSTALL_ARCHS_DEB="${PACKAGE_ARCHS}"
44 export INSTALL_PACKAGES_NORMAL_DEB="${PACKAGE_INSTALL}"
45 export INSTALL_PACKAGES_ATTEMPTONLY_DEB="${PACKAGE_INSTALL_ATTEMPTONLY}"
46 export INSTALL_PACKAGES_LINGUAS_DEB="${LINGUAS_INSTALL}"
47 export INSTALL_TASK_DEB="rootfs"
48
49 package_install_internal_deb
50 ${DEB_POSTPROCESS_COMMANDS}
51
52 rootfs_install_complementary
53
54 export D=${IMAGE_ROOTFS}
55 export OFFLINE_ROOT=${IMAGE_ROOTFS}
56 export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
57 export OPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
58 export INTERCEPT_DIR=${WORKDIR}/intercept_scripts
59 export NATIVE_ROOT=${STAGING_DIR_NATIVE}
60
61 # Attempt to run preinsts
62 # Mark packages with preinst failures as unpacked
63 for i in ${IMAGE_ROOTFS}/var/lib/dpkg/info/*.preinst; do
64 if [ -f $i ] && ! sh $i; then
65 deb_package_setflag unpacked `basename $i .preinst`
66 fi
67 done
68
69 # Attempt to run postinsts
70 # Mark packages with postinst failures as unpacked
71 for i in ${IMAGE_ROOTFS}/var/lib/dpkg/info/*.postinst; do
72 if [ -f $i ] && ! sh $i configure; then
73 deb_package_setflag unpacked `basename $i .postinst`
74 fi
75 done
76
77 set -e
78
79 install -d ${IMAGE_ROOTFS}/${sysconfdir}
80 echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version
81
82 # Hacks to allow opkg's update-alternatives and opkg to coexist for now
83 mkdir -p ${IMAGE_ROOTFS}${opkglibdir}
84 if [ -e ${IMAGE_ROOTFS}/var/lib/dpkg/alternatives ]; then
85 rmdir ${IMAGE_ROOTFS}/var/lib/dpkg/alternatives
86 fi
87 ln -s ${opkglibdir}/alternatives ${IMAGE_ROOTFS}/var/lib/dpkg/alternatives
88 ln -s /var/lib/dpkg/info ${IMAGE_ROOTFS}${opkglibdir}/info
89 ln -s /var/lib/dpkg/status ${IMAGE_ROOTFS}${opkglibdir}/status
90
91 ${ROOTFS_POSTPROCESS_COMMAND}
92
93 if ${@base_contains("IMAGE_FEATURES", "read-only-rootfs", "true", "false" ,d)}; then
94 if [ -n "$(delayed_postinsts)" ]; then
95 bberror "Some packages could not be configured offline and rootfs is read-only."
96 exit 1
97 fi
98 fi
99
100 log_check rootfs
101}
102
103rootfs_deb_do_rootfs[vardeps] += "delayed_postinsts"
104
105delayed_postinsts () {
106 cat ${IMAGE_ROOTFS}/var/lib/dpkg/status|grep -e "^Package:" -e "^Status:"|sed -ne 'N;s/Package: \(.*\)\nStatus:.*unpacked/\1/p'
107}
108
109save_postinsts () {
110 # Scripts need to be ordered when executed, this ensures numeric order
111 # If we ever run into needing more the 899 scripts, we'll have to
112 # change num to start with 1000.
113 num=100
114 for p in $(delayed_postinsts); do
115 install -d ${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts
116 cp ${IMAGE_ROOTFS}/var/lib/dpkg/info/$p.postinst ${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts/$num-$p
117 num=`echo \$((num+1))`
118 done
119}
120
121remove_packaging_data_files() {
122 rm -rf ${IMAGE_ROOTFS}${opkglibdir}
123 rm -rf ${IMAGE_ROOTFS}/var/lib/dpkg/
124}
125
126rootfs_install_packages() {
127 ${STAGING_BINDIR_NATIVE}/apt-get ${APT_ARGS} install `cat $1` --force-yes --allow-unauthenticated
128
129 # Mark all packages installed
130 sed -i -e "s/Status: install ok unpacked/Status: install ok installed/;" $INSTALL_ROOTFS_DEB/var/lib/dpkg/status
131}
132
133rootfs_uninstall_packages() {
134 # for some reason, --root doesn't really work here... We use --admindir&--instdir instead.
135 ${STAGING_BINDIR_NATIVE}/dpkg --admindir=${IMAGE_ROOTFS}/var/lib/dpkg --instdir=${IMAGE_ROOTFS} -r --force-depends $@
136}
137
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
new file mode 100644
index 0000000000..b0805dc329
--- /dev/null
+++ b/meta/classes/rootfs_ipk.bbclass
@@ -0,0 +1,188 @@
1#
2# Creates a root filesystem out of IPKs
3#
4# This rootfs can be mounted via root-nfs or it can be put into an cramfs/jffs etc.
5# See image.bbclass for a usage of this.
6#
7
8EXTRAOPKGCONFIG ?= ""
9ROOTFS_PKGMANAGE = "opkg opkg-collateral ${EXTRAOPKGCONFIG}"
10ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
11
12do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
13do_rootfs[recrdeptask] += "do_package_write_ipk"
14rootfs_ipk_do_rootfs[vardepsexclude] += "BUILDNAME"
15
16do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
17
18OPKG_PREPROCESS_COMMANDS = "package_update_index_ipk; package_generate_ipkg_conf"
19
20OPKG_POSTPROCESS_COMMANDS = "ipk_insert_feed_uris; "
21
22OPKGLIBDIR = "${localstatedir}/lib"
23
24MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg"
25
26fakeroot rootfs_ipk_do_rootfs () {
27 #set -x
28
29 rm -f ${IPKGCONF_TARGET}
30 touch ${IPKGCONF_TARGET}
31
32 ${OPKG_PREPROCESS_COMMANDS}
33
34 mkdir -p ${T}/
35
36 export INSTALL_CONF_IPK="${IPKGCONF_TARGET}"
37 export INSTALL_ROOTFS_IPK="${IMAGE_ROOTFS}"
38 STATUS=${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/status
39 mkdir -p ${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg
40
41 opkg-cl ${OPKG_ARGS} update
42
43 # prime the status file with bits that we don't want
44 for i in ${BAD_RECOMMENDATIONS}; do
45 pkginfo="`opkg-cl ${OPKG_ARGS} info $i`"
46 if [ ! -z "$pkginfo" ]; then
47 # Take just the first package stanza as otherwise only
48 # the last one will have the right Status line.
49 echo "$pkginfo" | awk "/^Package:/ { print } \
50 /^Architecture:/ { print } \
51 /^Version:/ { print } \
52 /^$/ { exit } \
53 END { print \"Status: deinstall hold not-installed\n\" }" - >> $STATUS
54 else
55 echo "Requested ignored recommendation $i is not a package"
56 fi
57 done
58
59 #install
60 export INSTALL_PACKAGES_ATTEMPTONLY_IPK="${PACKAGE_INSTALL_ATTEMPTONLY}"
61 export INSTALL_PACKAGES_LINGUAS_IPK="${LINGUAS_INSTALL}"
62 export INSTALL_TASK_IPK="rootfs"
63
64
65 export INSTALL_PACKAGES_IPK="${PACKAGE_INSTALL}"
66
67 #post install
68 export D=${IMAGE_ROOTFS}
69 export OFFLINE_ROOT=${IMAGE_ROOTFS}
70 export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
71 export OPKG_OFFLINE_ROOT=${IPKG_OFFLINE_ROOT}
72 export INTERCEPT_DIR=${WORKDIR}/intercept_scripts
73 export NATIVE_ROOT=${STAGING_DIR_NATIVE}
74
75 package_install_internal_ipk
76
77 # Distro specific packages should create this
78 #mkdir -p ${IMAGE_ROOTFS}/etc/opkg/
79 #grep "^arch" ${IPKGCONF_TARGET} >${IMAGE_ROOTFS}/etc/opkg/arch.conf
80
81 rootfs_install_complementary
82
83 ${OPKG_POSTPROCESS_COMMANDS}
84 ${ROOTFS_POSTINSTALL_COMMAND}
85
86 install -d ${IMAGE_ROOTFS}/${sysconfdir}
87 echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version
88
89 ${ROOTFS_POSTPROCESS_COMMAND}
90
91 if ${@base_contains("IMAGE_FEATURES", "read-only-rootfs", "true", "false" ,d)}; then
92 if [ -n "$(delayed_postinsts)" ]; then
93 bberror "Some packages could not be configured offline and rootfs is read-only."
94 exit 1
95 fi
96 fi
97
98 rm -f ${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/lists/*
99 log_check rootfs
100}
101
102rootfs_ipk_do_rootfs[vardeps] += "delayed_postinsts"
103
104delayed_postinsts () {
105 cat ${STATUS}|grep -e "^Package:" -e "^Status:"|sed -ne 'N;s/Package: \(.*\)\nStatus:.*unpacked/\1/p'
106}
107
108save_postinsts () {
109 # Scripts need to be ordered when executed, this ensures numeric order
110 # If we ever run into needing more the 899 scripts, we'll have to
111 # change num to start with 1000.
112 num=100
113 for p in $(delayed_postinsts); do
114 install -d ${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts
115 cp ${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info/$p.postinst ${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts/$num-$p
116 num=`echo \$((num+1))`
117 done
118}
119
120rootfs_ipk_write_manifest() {
121 manifest=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest
122 cp ${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/status $manifest
123
124 sed '/Depends/d' -i $manifest
125 sed '/Status/d' -i $manifest
126 sed '/Architecture/d' -i $manifest
127 sed '/Installed-Time/d' -i $manifest
128 sed '/Auto-Installed/d' -i $manifest
129 sed '/Recommends/d' -i $manifest
130 sed '/Provides/d' -i $manifest
131 sed '/Conflicts/d' -i $manifest
132}
133
134remove_packaging_data_files() {
135 rm -rf ${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg
136 # We need the directory for the package manager lock
137 mkdir ${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg
138}
139
140rootfs_install_packages() {
141 opkg-cl ${OPKG_ARGS} install `cat $1`
142}
143
144rootfs_uninstall_packages() {
145 opkg-cl ${OPKG_ARGS} --force-depends remove $@
146}
147
148ipk_insert_feed_uris () {
149
150 echo "Building from feeds activated!"
151
152 for line in ${IPK_FEED_URIS}
153 do
154 # strip leading and trailing spaces/tabs, then split into name and uri
155 line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`"
156 feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`"
157 feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`"
158
159 echo "Added $feed_name feed with URL $feed_uri"
160
161 # insert new feed-sources
162 echo "src/gz $feed_name $feed_uri" >> ${IPKGCONF_TARGET}
163 done
164
165 # Allow to use package deploy directory contents as quick devel-testing
166 # feed. This creates individual feed configs for each arch subdir of those
167 # specified as compatible for the current machine.
168 # NOTE: Development-helper feature, NOT a full-fledged feed.
169 if [ -n "${FEED_DEPLOYDIR_BASE_URI}" ]; then
170 for arch in ${PACKAGE_ARCHS}
171 do
172 echo "src/gz local-$arch ${FEED_DEPLOYDIR_BASE_URI}/$arch" >> ${IMAGE_ROOTFS}/etc/opkg/local-$arch-feed.conf
173 done
174 fi
175}
176
177python () {
178
179 if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
180 flags = d.getVarFlag('do_rootfs', 'recrdeptask')
181 flags = flags.replace("do_package_write_ipk", "")
182 flags = flags.replace("do_deploy", "")
183 flags = flags.replace("do_populate_sysroot", "")
184 d.setVarFlag('do_rootfs', 'recrdeptask', flags)
185 d.setVar('OPKG_PREPROCESS_COMMANDS', "package_generate_archlist\nipk_insert_feed_uris")
186 d.setVar('OPKG_POSTPROCESS_COMMANDS', '')
187}
188
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
new file mode 100644
index 0000000000..6c68ac89fb
--- /dev/null
+++ b/meta/classes/rootfs_rpm.bbclass
@@ -0,0 +1,224 @@
1#
2# Creates a root filesystem out of rpm packages
3#
4
5ROOTFS_PKGMANAGE = "rpm smartpm"
6ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
7
8# Add 50Meg of extra space for Smart
9IMAGE_ROOTFS_EXTRA_SPACE_append = "${@base_contains("PACKAGE_INSTALL", "smartpm", " + 51200", "" ,d)}"
10
11# Smart is python based, so be sure python-native is available to us.
12EXTRANATIVEPATH += "python-native"
13
14do_rootfs[depends] += "rpm-native:do_populate_sysroot"
15do_rootfs[depends] += "rpmresolve-native:do_populate_sysroot"
16do_rootfs[depends] += "python-smartpm-native:do_populate_sysroot"
17
18# Needed for update-alternatives
19do_rootfs[depends] += "opkg-native:do_populate_sysroot"
20
21# Creating the repo info in do_rootfs
22do_rootfs[depends] += "createrepo-native:do_populate_sysroot"
23
24do_rootfs[recrdeptask] += "do_package_write_rpm"
25rootfs_rpm_do_rootfs[vardepsexclude] += "BUILDNAME"
26
27RPM_PREPROCESS_COMMANDS = "package_update_index_rpm; "
28RPM_POSTPROCESS_COMMANDS = "rpm_setup_smart_target_config; "
29
30rpmlibdir = "/var/lib/rpm"
31opkglibdir = "${localstatedir}/lib/opkg"
32
33RPMOPTS="--dbpath ${rpmlibdir}"
34RPM="rpm ${RPMOPTS}"
35
36# RPM doesn't work with multiple rootfs generation at once due to collisions in the use of files
37# in ${DEPLOY_DIR_RPM}. This can be removed if package_update_index_rpm can be called concurrently
38do_rootfs[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
39
40fakeroot rootfs_rpm_do_rootfs () {
41 ${RPM_PREPROCESS_COMMANDS}
42
43 # install packages
44 # This needs to work in the same way as populate_sdk_rpm.bbclass!
45 export INSTALL_ROOTFS_RPM="${IMAGE_ROOTFS}"
46 export INSTALL_PLATFORM_RPM="$(echo ${TARGET_ARCH} | tr - _)${TARGET_VENDOR}-${TARGET_OS}"
47 export INSTALL_PACKAGES_RPM="${PACKAGE_INSTALL}"
48 export INSTALL_PACKAGES_ATTEMPTONLY_RPM="${PACKAGE_INSTALL_ATTEMPTONLY}"
49 export INSTALL_PACKAGES_LINGUAS_RPM="${LINGUAS_INSTALL}"
50 export INSTALL_PROVIDENAME_RPM=""
51 export INSTALL_TASK_RPM="rootfs_rpm_do_rootfs"
52 export INSTALL_COMPLEMENTARY_RPM=""
53
54 # Setup base system configuration
55 mkdir -p ${INSTALL_ROOTFS_RPM}/etc/rpm/
56
57 # List must be prefered to least preferred order
58 default_extra_rpm=""
59 INSTALL_PLATFORM_EXTRA_RPM=""
60 for os in ${MULTILIB_OS_LIST} ; do
61 old_IFS="$IFS"
62 IFS=":"
63 set -- $os
64 IFS="$old_IFS"
65 mlib=$1
66 mlib_os=$2
67 for prefix in ${MULTILIB_PREFIX_LIST} ; do
68 old_IFS="$IFS"
69 IFS=":"
70 set -- $prefix
71 IFS="$old_IFS"
72 if [ "$mlib" != "$1" ]; then
73 continue
74 fi
75 shift #remove mlib
76 while [ -n "$1" ]; do
77 platform="$(echo $1 | tr - _)-.*-$mlib_os"
78 if [ "$mlib" = "${BBEXTENDVARIANT}" ]; then
79 default_extra_rpm="$default_extra_rpm $platform"
80 else
81 INSTALL_PLATFORM_EXTRA_RPM="$INSTALL_PLATFORM_EXTRA_RPM $platform"
82 fi
83 shift
84 done
85 done
86 done
87 if [ -n "$default_extra_rpm" ]; then
88 INSTALL_PLATFORM_EXTRA_RPM="$default_extra_rpm $INSTALL_PLATFORM_EXTRA_RPM"
89 fi
90 export INSTALL_PLATFORM_EXTRA_RPM
91
92 package_install_internal_rpm
93
94 rootfs_install_complementary
95
96 export D=${IMAGE_ROOTFS}
97 export OFFLINE_ROOT=${IMAGE_ROOTFS}
98 export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
99 export OPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
100
101 ${ROOTFS_POSTINSTALL_COMMAND}
102
103 # Report delayed package scriptlets
104 for i in ${IMAGE_ROOTFS}/etc/rpm-postinsts/*; do
105 if [ -f $i ]; then
106 echo "Delayed package scriptlet: `head -n 3 $i | tail -n 1`"
107 fi
108 done
109
110 install -d ${IMAGE_ROOTFS}/${sysconfdir}
111 echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version
112
113 ${RPM_POSTPROCESS_COMMANDS}
114 ${ROOTFS_POSTPROCESS_COMMAND}
115
116 if ${@base_contains("IMAGE_FEATURES", "read-only-rootfs", "true", "false" ,d)}; then
117 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts ] ; then
118 if [ "`ls -A ${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts`" != "" ] ; then
119 bberror "Some packages could not be configured offline and rootfs is read-only."
120 exit 1
121 fi
122 fi
123 fi
124
125 rm -rf ${IMAGE_ROOTFS}/var/cache2/
126 rm -rf ${IMAGE_ROOTFS}/var/run2/
127 rm -rf ${IMAGE_ROOTFS}/var/log2/
128
129 # remove lock files
130 rm -f ${IMAGE_ROOTFS}${rpmlibdir}/__db.*
131
132 # Remove all remaining resolver files
133 rm -rf ${IMAGE_ROOTFS}/install
134
135 log_check rootfs
136}
137
138rootfs_rpm_do_rootfs[vardeps] += "delayed_postinsts"
139
140delayed_postinsts() {
141 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts ]; then
142 ls ${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts
143 fi
144}
145
146save_postinsts() {
147 # this is just a stub. For RPM, the failed postinstalls are already saved in
148 # /etc/rpm-postinsts
149 true
150}
151
152remove_packaging_data_files() {
153 # Save the rpmlib for increment rpm image generation
154 t="${T}/saved_rpmlib/var/lib"
155 rm -fr $t
156 mkdir -p $t
157 mv ${IMAGE_ROOTFS}${rpmlibdir} $t
158 rm -rf ${IMAGE_ROOTFS}${opkglibdir}
159 rm -rf ${IMAGE_ROOTFS}/var/lib/smart
160}
161
162rpm_setup_smart_target_config() {
163 # Set up smart configuration for the target
164 rm -rf ${IMAGE_ROOTFS}/var/lib/smart
165 smart --data-dir=${IMAGE_ROOTFS}/var/lib/smart channel --add rpmsys type=rpm-sys -y
166 package_write_smart_config ${IMAGE_ROOTFS}
167 rm -f ${IMAGE_ROOTFS}/var/lib/smart/config.old
168}
169
170rootfs_install_packages() {
171 # Note - we expect the variables not set here to already have been set
172 export INSTALL_PACKAGES_RPM=""
173 export INSTALL_PACKAGES_ATTEMPTONLY_RPM="`cat $1`"
174 export INSTALL_PROVIDENAME_RPM=""
175 export INSTALL_TASK_RPM="rootfs_install_packages"
176 export INSTALL_COMPLEMENTARY_RPM="1"
177
178 package_install_internal_rpm
179}
180
181rootfs_uninstall_packages() {
182 rpm -e --nodeps --root=${IMAGE_ROOTFS} --dbpath=/var/lib/rpm\
183 --define='_cross_scriptlet_wrapper ${WORKDIR}/scriptlet_wrapper'\
184 --define='_tmppath /install/tmp' $@
185
186 # remove temp directory
187 rm -rf ${IMAGE_ROOTFS}/install
188}
189
190python () {
191 if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
192 flags = d.getVarFlag('do_rootfs', 'recrdeptask')
193 flags = flags.replace("do_package_write_rpm", "")
194 flags = flags.replace("do_deploy", "")
195 flags = flags.replace("do_populate_sysroot", "")
196 d.setVarFlag('do_rootfs', 'recrdeptask', flags)
197 d.setVar('RPM_PREPROCESS_COMMANDS', '')
198 d.setVar('RPM_POSTPROCESS_COMMANDS', '')
199
200 # The following code should be kept in sync w/ the populate_sdk_rpm version.
201
202 # package_arch order is reversed. This ensures the -best- match is listed first!
203 package_archs = d.getVar("PACKAGE_ARCHS", True) or ""
204 package_archs = ":".join(package_archs.split()[::-1])
205 package_os = d.getVar("TARGET_OS", True) or ""
206 ml_prefix_list = "%s:%s" % ('default', package_archs)
207 ml_os_list = "%s:%s" % ('default', package_os)
208 multilibs = d.getVar('MULTILIBS', True) or ""
209 for ext in multilibs.split():
210 eext = ext.split(':')
211 if len(eext) > 1 and eext[0] == 'multilib':
212 localdata = bb.data.createCopy(d)
213 default_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + eext[1], False)
214 if default_tune:
215 localdata.setVar("DEFAULTTUNE", default_tune)
216 bb.data.update_data(localdata)
217 package_archs = localdata.getVar("PACKAGE_ARCHS", True) or ""
218 package_archs = ":".join([i in "all noarch any".split() and i or eext[1]+"_"+i for i in package_archs.split()][::-1])
219 package_os = localdata.getVar("TARGET_OS", True) or ""
220 ml_prefix_list += " %s:%s" % (eext[1], package_archs)
221 ml_os_list += " %s:%s" % (eext[1], package_os)
222 d.setVar('MULTILIB_PREFIX_LIST', ml_prefix_list)
223 d.setVar('MULTILIB_OS_LIST', ml_os_list)
224}
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
new file mode 100644
index 0000000000..b8e5b02da0
--- /dev/null
+++ b/meta/classes/sanity.bbclass
@@ -0,0 +1,756 @@
1#
2# Sanity check the users setup for common misconfigurations
3#
4
5SANITY_REQUIRED_UTILITIES ?= "patch diffstat makeinfo git bzip2 tar gzip gawk chrpath wget cpio"
6
7def bblayers_conf_file(d):
8 return os.path.join(d.getVar('TOPDIR', True), 'conf/bblayers.conf')
9
10def sanity_conf_read(fn):
11 with open(fn, 'r') as f:
12 lines = f.readlines()
13 return lines
14
15def sanity_conf_find_line(pattern, lines):
16 import re
17 return next(((index, line)
18 for index, line in enumerate(lines)
19 if re.search(pattern, line)), (None, None))
20
21def sanity_conf_update(fn, lines, version_var_name, new_version):
22 index, line = sanity_conf_find_line(version_var_name, lines)
23 lines[index] = '%s = "%d"\n' % (version_var_name, new_version)
24 with open(fn, "w") as f:
25 f.write(''.join(lines))
26
27EXPORT_FUNCTIONS bblayers_conf_file sanity_conf_read sanity_conf_find_line sanity_conf_update
28
29# Functions added to this variable MUST throw an exception (or sys.exit()) unless they
30# successfully changed LCONF_VERSION in bblayers.conf
31BBLAYERS_CONF_UPDATE_FUNCS += "oecore_update_bblayers"
32
33python oecore_update_bblayers() {
34 # bblayers.conf is out of date, so see if we can resolve that
35
36 current_lconf = int(d.getVar('LCONF_VERSION', True))
37 if not current_lconf:
38 sys.exit()
39 lconf_version = int(d.getVar('LAYER_CONF_VERSION', True))
40 lines = []
41
42 if current_lconf < 4:
43 sys.exit()
44
45 bblayers_fn = bblayers_conf_file(d)
46 lines = sanity_conf_read(bblayers_fn)
47
48 if current_lconf == 4 and lconf_version > 4:
49 topdir_var = '$' + '{TOPDIR}'
50 index, bbpath_line = sanity_conf_find_line('BBPATH', lines)
51 if bbpath_line:
52 start = bbpath_line.find('"')
53 if start != -1 and (len(bbpath_line) != (start + 1)):
54 if bbpath_line[start + 1] == '"':
55 lines[index] = (bbpath_line[:start + 1] +
56 topdir_var + bbpath_line[start + 1:])
57 else:
58 if not topdir_var in bbpath_line:
59 lines[index] = (bbpath_line[:start + 1] +
60 topdir_var + ':' + bbpath_line[start + 1:])
61 else:
62 sys.exit()
63 else:
64 index, bbfiles_line = sanity_conf_find_line('BBFILES', lines)
65 if bbfiles_line:
66 lines.insert(index, 'BBPATH = "' + topdir_var + '"\n')
67 else:
68 sys.exit()
69
70 current_lconf += 1
71 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
72 return
73
74 sys.exit()
75}
76
77def raise_sanity_error(msg, d, network_error=False):
78 if d.getVar("SANITY_USE_EVENTS", True) == "1":
79 try:
80 bb.event.fire(bb.event.SanityCheckFailed(msg, network_error), d)
81 except TypeError:
82 bb.event.fire(bb.event.SanityCheckFailed(msg), d)
83 return
84
85 bb.fatal(""" OE-core's config sanity checker detected a potential misconfiguration.
86 Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
87 Following is the list of potential problems / advisories:
88
89 %s""" % msg)
90
91# Check a single tune for validity.
92def check_toolchain_tune(data, tune, multilib):
93 tune_errors = []
94 if not tune:
95 return "No tuning found for %s multilib." % multilib
96 bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
97 features = (data.getVar("TUNE_FEATURES_tune-%s" % tune, True) or "").split()
98 if not features:
99 return "Tuning '%s' has no defined features, and cannot be used." % tune
100 valid_tunes = data.getVarFlags('TUNEVALID') or {}
101 conflicts = data.getVarFlags('TUNECONFLICTS') or {}
102 # [doc] is the documentation for the variable, not a real feature
103 if 'doc' in valid_tunes:
104 del valid_tunes['doc']
105 if 'doc' in conflicts:
106 del conflicts['doc']
107 for feature in features:
108 if feature in conflicts:
109 for conflict in conflicts[feature].split():
110 if conflict in features:
111 tune_errors.append("Feature '%s' conflicts with '%s'." %
112 (feature, conflict))
113 if feature in valid_tunes:
114 bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
115 else:
116 tune_errors.append("Feature '%s' is not defined." % feature)
117 whitelist = data.getVar("TUNEABI_WHITELIST", True) or ''
118 override = data.getVar("TUNEABI_OVERRIDE", True) or ''
119 if whitelist:
120 tuneabi = data.getVar("TUNEABI_tune-%s" % tune, True) or ''
121 if not tuneabi:
122 tuneabi = tune
123 if True not in [x in whitelist.split() for x in tuneabi.split()]:
124 tune_errors.append("Tuning '%s' (%s) cannot be used with any supported tuning/ABI." %
125 (tune, tuneabi))
126 if tune_errors:
127 return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
128
129def check_toolchain(data):
130 tune_error_set = []
131 deftune = data.getVar("DEFAULTTUNE", True)
132 tune_errors = check_toolchain_tune(data, deftune, 'default')
133 if tune_errors:
134 tune_error_set.append(tune_errors)
135
136 multilibs = (data.getVar("MULTILIB_VARIANTS", True) or "").split()
137 global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS", True) or "").split()
138
139 if multilibs:
140 seen_libs = []
141 seen_tunes = []
142 for lib in multilibs:
143 if lib in seen_libs:
144 tune_error_set.append("The multilib '%s' appears more than once." % lib)
145 else:
146 seen_libs.append(lib)
147 if not lib in global_multilibs:
148 tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
149 tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib, True)
150 if tune in seen_tunes:
151 tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
152 else:
153 seen_libs.append(tune)
154 if tune == deftune:
155 tune_error_set.append("Multilib '%s' (%s) is also the default tuning." % (lib, deftune))
156 else:
157 tune_errors = check_toolchain_tune(data, tune, lib)
158 if tune_errors:
159 tune_error_set.append(tune_errors)
160 if tune_error_set:
161 return "Toolchain tunings invalid:\n" + '\n'.join(tune_error_set) + "\n"
162
163 return ""
164
165def check_conf_exists(fn, data):
166 bbpath = []
167 fn = data.expand(fn)
168 vbbpath = data.getVar("BBPATH")
169 if vbbpath:
170 bbpath += vbbpath.split(":")
171 for p in bbpath:
172 currname = os.path.join(data.expand(p), fn)
173 if os.access(currname, os.R_OK):
174 return True
175 return False
176
177def check_create_long_filename(filepath, pathname):
178 testfile = os.path.join(filepath, ''.join([`num`[-1] for num in xrange(1,200)]))
179 try:
180 if not os.path.exists(filepath):
181 bb.utils.mkdirhier(filepath)
182 f = open(testfile, "w")
183 f.close()
184 os.remove(testfile)
185 except IOError as e:
186 errno, strerror = e.args
187 if errno == 36: # ENAMETOOLONG
188 return "Failed to create a file with a long name in %s. Please use a filesystem that does not unreasonably limit filename length.\n" % pathname
189 else:
190 return "Failed to create a file in %s: %s.\n" % (pathname, strerror)
191 except OSError as e:
192 errno, strerror = e.args
193 return "Failed to create %s directory in which to run long name sanity check: %s.\n" % (pathname, strerror)
194 return ""
195
196def check_path_length(filepath, pathname, limit):
197 if len(filepath) > limit:
198 return "The length of %s is longer than 410, this would cause unexpected errors, please use a shorter path.\n" % pathname
199 return ""
200
201def check_connectivity(d):
202 # URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable
203 # using the same syntax as for SRC_URI. If the variable is not set
204 # the check is skipped
205 test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS', True) or "").split()
206 retval = ""
207
208 # Only check connectivity if network enabled and the
209 # CONNECTIVITY_CHECK_URIS are set
210 network_enabled = not d.getVar('BB_NO_NETWORK', True)
211 check_enabled = len(test_uris)
212 # Take a copy of the data store and unset MIRRORS and PREMIRROS
213 data = bb.data.createCopy(d)
214 data.delVar('PREMIRRORS')
215 data.delVar('MIRRORS')
216 if check_enabled and network_enabled:
217 try:
218 fetcher = bb.fetch2.Fetch(test_uris, data)
219 fetcher.checkstatus()
220 except Exception:
221 # Allow the message to be configured so that users can be
222 # pointed to a support mechanism.
223 msg = data.getVar('CONNECTIVITY_CHECK_MSG', True) or ""
224 if len(msg) == 0:
225 msg = "Failed to fetch test data from the network. Please ensure your network is configured correctly.\n"
226 retval = msg
227
228 return retval
229
230def check_supported_distro(sanity_data):
231 tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS', True)
232 if not tested_distros:
233 return
234
235 try:
236 distro = oe.lsb.distro_identifier()
237 except Exception:
238 distro = None
239
240 if distro:
241 if distro not in [x.strip() for x in tested_distros.split('\\n')]:
242 bb.warn('Host distribution "%s" has not been validated with this version of the build system; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.' % distro)
243 else:
244 bb.warn('Host distribution could not be determined; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.')
245
246# Checks we should only make if MACHINE is set correctly
247def check_sanity_validmachine(sanity_data):
248 messages = ""
249
250 # Check TUNE_ARCH is set
251 if sanity_data.getVar('TUNE_ARCH', True) == 'INVALID':
252 messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n'
253
254 # Check TARGET_OS is set
255 if sanity_data.getVar('TARGET_OS', True) == 'INVALID':
256 messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
257
258 # Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
259 pkgarchs = sanity_data.getVar('PACKAGE_ARCHS', True)
260 tunepkg = sanity_data.getVar('TUNE_PKGARCH', True)
261 tunefound = False
262 seen = {}
263 dups = []
264
265 for pa in pkgarchs.split():
266 if seen.get(pa, 0) == 1:
267 dups.append(pa)
268 else:
269 seen[pa] = 1
270 if pa == tunepkg:
271 tunefound = True
272
273 if len(dups):
274 messages = messages + "Error, the PACKAGE_ARCHS variable contains duplicates. The following archs are listed more than once: %s" % " ".join(dups)
275
276 if tunefound == False:
277 messages = messages + "Error, the PACKAGE_ARCHS variable does not contain TUNE_PKGARCH (%s)." % tunepkg
278
279 return messages
280
281# Checks if necessary to add option march to host gcc
282def check_gcc_march(sanity_data):
283 result = True
284 message = ""
285
286 # Check if -march not in BUILD_CFLAGS
287 if sanity_data.getVar("BUILD_CFLAGS",True).find("-march") < 0:
288 result = False
289
290 # Construct a test file
291 f = open("gcc_test.c", "w")
292 f.write("int main (){ volatile int atomic = 2; __sync_bool_compare_and_swap (&atomic, 2, 3); return 0; }\n")
293 f.close()
294
295 # Check if GCC could work without march
296 if not result:
297 status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc gcc_test.c -o gcc_test")
298 if status == 0:
299 result = True;
300
301 if not result:
302 status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc -march=native gcc_test.c -o gcc_test")
303 if status == 0:
304 message = "BUILD_CFLAGS_append = \" -march=native\""
305 result = True;
306
307 if not result:
308 build_arch = sanity_data.getVar('BUILD_ARCH', True)
309 status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc -march=%s gcc_test.c -o gcc_test" % build_arch)
310 if status == 0:
311 message = "BUILD_CFLAGS_append = \" -march=%s\"" % build_arch
312 result = True;
313
314 os.remove("gcc_test.c")
315 if os.path.exists("gcc_test"):
316 os.remove("gcc_test")
317
318 return (result, message)
319
320# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612.
321# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate.
322def check_make_version(sanity_data):
323 from distutils.version import LooseVersion
324 status, result = oe.utils.getstatusoutput("make --version")
325 if status != 0:
326 return "Unable to execute make --version, exit code %s\n" % status
327 version = result.split()[2]
328 if LooseVersion(version) == LooseVersion("3.82"):
329 # Construct a test file
330 f = open("makefile_test", "w")
331 f.write("makefile_test.a: makefile_test_a.c makefile_test_b.c makefile_test.a( makefile_test_a.c makefile_test_b.c)\n")
332 f.write("\n")
333 f.write("makefile_test_a.c:\n")
334 f.write(" touch $@\n")
335 f.write("\n")
336 f.write("makefile_test_b.c:\n")
337 f.write(" touch $@\n")
338 f.close()
339
340 # Check if make 3.82 has been patched
341 status,result = oe.utils.getstatusoutput("make -f makefile_test")
342
343 os.remove("makefile_test")
344 if os.path.exists("makefile_test_a.c"):
345 os.remove("makefile_test_a.c")
346 if os.path.exists("makefile_test_b.c"):
347 os.remove("makefile_test_b.c")
348 if os.path.exists("makefile_test.a"):
349 os.remove("makefile_test.a")
350
351 if status != 0:
352 return "Your version of make 3.82 is broken. Please revert to 3.81 or install a patched version.\n"
353 return None
354
355
356# Tar version 1.24 and onwards handle overwriting symlinks correctly
357# but earlier versions do not; this needs to work properly for sstate
358def check_tar_version(sanity_data):
359 from distutils.version import LooseVersion
360 status, result = oe.utils.getstatusoutput("tar --version")
361 if status != 0:
362 return "Unable to execute tar --version, exit code %s\n" % status
363 version = result.split()[3]
364 if LooseVersion(version) < LooseVersion("1.24"):
365 return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar.\n"
366 return None
367
368# We use git parameters and functionality only found in 1.7.5 or later
369def check_git_version(sanity_data):
370 from distutils.version import LooseVersion
371 status, result = oe.utils.getstatusoutput("git --version 2> /dev/null")
372 if status != 0:
373 return "Unable to execute git --version, exit code %s\n" % status
374 version = result.split()[2]
375 if LooseVersion(version) < LooseVersion("1.7.5"):
376 return "Your version of git is older than 1.7.5 and has bugs which will break builds. Please install a newer version of git.\n"
377 return None
378
379
380def sanity_check_conffiles(status, d):
381 # Check we are using a valid local.conf
382 current_conf = d.getVar('CONF_VERSION', True)
383 conf_version = d.getVar('LOCALCONF_VERSION', True)
384
385 if current_conf != conf_version:
386 status.addresult("Your version of local.conf was generated from an older/newer version of local.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/local.conf ${COREBASE}/meta*/conf/local.conf.sample\" is a good way to visualise the changes.\n")
387
388 # Check bblayers.conf is valid
389 current_lconf = d.getVar('LCONF_VERSION', True)
390 lconf_version = d.getVar('LAYER_CONF_VERSION', True)
391 if current_lconf != lconf_version:
392 funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS', True).split()
393 for func in funcs:
394 success = True
395 try:
396 bb.build.exec_func(func, d)
397 except Exception:
398 success = False
399 if success:
400 bb.note("Your conf/bblayers.conf has been automatically updated.")
401 status.reparse = True
402 break
403 if not status.reparse:
404 status.addresult("Your version of bblayers.conf has the wrong LCONF_VERSION (has %s, expecting %s).\nPlease compare the your file against bblayers.conf.sample and merge any changes before continuing.\n\"meld conf/bblayers.conf ${COREBASE}/meta*/conf/bblayers.conf.sample\" is a good way to visualise the changes.\n" % (current_lconf, lconf_version))
405
406 # If we have a site.conf, check it's valid
407 if check_conf_exists("conf/site.conf", d):
408 current_sconf = d.getVar('SCONF_VERSION', True)
409 sconf_version = d.getVar('SITE_CONF_VERSION', True)
410 if current_sconf != sconf_version:
411 status.addresult("Your version of site.conf was generated from an older version of site.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/site.conf ${COREBASE}/meta*/conf/site.conf.sample\" is a good way to visualise the changes.\n")
412
413
414def sanity_handle_abichanges(status, d):
415 #
416 # Check the 'ABI' of TMPDIR
417 #
418 current_abi = d.getVar('OELAYOUT_ABI', True)
419 abifile = d.getVar('SANITY_ABIFILE', True)
420 if os.path.exists(abifile):
421 with open(abifile, "r") as f:
422 abi = f.read().strip()
423 if not abi.isdigit():
424 with open(abifile, "w") as f:
425 f.write(current_abi)
426 elif abi == "2" and current_abi == "3":
427 bb.note("Converting staging from layout version 2 to layout version 3")
428 subprocess.call(d.expand("mv ${TMPDIR}/staging ${TMPDIR}/sysroots"), shell=True)
429 subprocess.call(d.expand("ln -s sysroots ${TMPDIR}/staging"), shell=True)
430 subprocess.call(d.expand("cd ${TMPDIR}/stamps; for i in */*do_populate_staging; do new=`echo $i | sed -e 's/do_populate_staging/do_populate_sysroot/'`; mv $i $new; done"), shell=True)
431 with open(abifile, "w") as f:
432 f.write(current_abi)
433 elif abi == "3" and current_abi == "4":
434 bb.note("Converting staging layout from version 3 to layout version 4")
435 if os.path.exists(d.expand("${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}")):
436 subprocess.call(d.expand("mv ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS} ${STAGING_BINDIR_CROSS}"), shell=True)
437 subprocess.call(d.expand("ln -s ${STAGING_BINDIR_CROSS} ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}"), shell=True)
438 with open(abifile, "w") as f:
439 f.write(current_abi)
440 elif abi == "4":
441 status.addresult("Staging layout has changed. The cross directory has been deprecated and cross packages are now built under the native sysroot.\nThis requires a rebuild.\n")
442 elif abi == "5" and current_abi == "6":
443 bb.note("Converting staging layout from version 5 to layout version 6")
444 subprocess.call(d.expand("mv ${TMPDIR}/pstagelogs ${SSTATE_MANIFESTS}"), shell=True)
445 with open(abifile, "w") as f:
446 f.write(current_abi)
447 elif abi == "7" and current_abi == "8":
448 status.addresult("Your configuration is using stamp files including the sstate hash but your build directory was built with stamp files that do not include this.\nTo continue, either rebuild or switch back to the OEBasic signature handler with BB_SIGNATURE_HANDLER = 'OEBasic'.\n")
449 elif (abi != current_abi and current_abi == "9"):
450 status.addresult("The layout of the TMPDIR STAMPS directory has changed. Please clean out TMPDIR and rebuild (sstate will be still be valid and reused)\n")
451 elif (abi != current_abi):
452 # Code to convert from one ABI to another could go here if possible.
453 status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
454 else:
455 with open(abifile, "w") as f:
456 f.write(current_abi)
457
458def check_sanity_sstate_dir_change(sstate_dir, data):
459 # Sanity checks to be done when the value of SSTATE_DIR changes
460
461 # Check that SSTATE_DIR isn't on a filesystem with limited filename length (eg. eCryptFS)
462 testmsg = ""
463 if sstate_dir != "":
464 testmsg = check_create_long_filename(sstate_dir, "SSTATE_DIR")
465 # If we don't have permissions to SSTATE_DIR, suggest the user set it as an SSTATE_MIRRORS
466 try:
467 err = testmsg.split(': ')[1].strip()
468 if err == "Permission denied.":
469 testmsg = testmsg + "You could try using %s in SSTATE_MIRRORS rather than as an SSTATE_CACHE.\n" % (sstate_dir)
470 except IndexError:
471 pass
472 return testmsg
473
474def check_sanity_version_change(status, d):
475 # Sanity checks to be done when SANITY_VERSION changes
476 # In other words, these tests run once in a given build directory and then
477 # never again until the sanity version changes.
478
479 # Check the python install is complete. glib-2.0-natives requries
480 # xml.parsers.expat
481 try:
482 import xml.parsers.expat
483 except ImportError:
484 status.addresult('Your python is not a full install. Please install the module xml.parsers.expat (python-xml on openSUSE and SUSE Linux).\n')
485
486 status.addresult(check_make_version(d))
487 status.addresult(check_tar_version(d))
488 status.addresult(check_git_version(d))
489
490 missing = ""
491
492 if not check_app_exists("${MAKE}", d):
493 missing = missing + "GNU make,"
494
495 if not check_app_exists('${BUILD_PREFIX}gcc', d):
496 missing = missing + "C Compiler (%sgcc)," % d.getVar("BUILD_PREFIX", True)
497
498 if not check_app_exists('${BUILD_PREFIX}g++', d):
499 missing = missing + "C++ Compiler (%sg++)," % d.getVar("BUILD_PREFIX", True)
500
501 required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES', True)
502
503 for util in required_utilities.split():
504 if not check_app_exists(util, d):
505 missing = missing + "%s," % util
506
507 if missing:
508 missing = missing.rstrip(',')
509 status.addresult("Please install the following missing utilities: %s\n" % missing)
510
511 assume_provided = d.getVar('ASSUME_PROVIDED', True).split()
512 # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
513 if "diffstat-native" not in assume_provided:
514 status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
515
516 if "qemu-native" in assume_provided:
517 if not check_app_exists("qemu-arm", d):
518 status.addresult("qemu-native was in ASSUME_PROVIDED but the QEMU binaries (qemu-arm) can't be found in PATH")
519
520 (result, message) = check_gcc_march(d)
521 if result and message:
522 status.addresult("Your gcc version is older than 4.5, please add the following param to local.conf\n \
523 %s\n" % message)
524 if not result:
525 status.addresult("Your gcc version is older than 4.5 or is not working properly. Please verify you can build")
526 status.addresult(" and link something that uses atomic operations, such as: \n")
527 status.addresult(" __sync_bool_compare_and_swap (&atomic, 2, 3);\n")
528
529 # Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
530 tmpdir = d.getVar('TMPDIR', True)
531 status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
532
533 # Some third-party software apparently relies on chmod etc. being suid root (!!)
534 import stat
535 suid_check_bins = "chown chmod mknod".split()
536 for bin_cmd in suid_check_bins:
537 bin_path = bb.utils.which(os.environ["PATH"], bin_cmd)
538 if bin_path:
539 bin_stat = os.stat(bin_path)
540 if bin_stat.st_uid == 0 and bin_stat.st_mode & stat.S_ISUID:
541 status.addresult('%s has the setuid bit set. This interferes with pseudo and may cause other issues that break the build process.\n' % bin_path)
542
543 # Check that we can fetch from various network transports
544 netcheck = check_connectivity(d)
545 status.addresult(netcheck)
546 if netcheck:
547 status.network_error = True
548
549 nolibs = d.getVar('NO32LIBS', True)
550 if not nolibs:
551 lib32path = '/lib'
552 if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ):
553 lib32path = '/lib32'
554
555 if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'):
556 status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n")
557
558 bbpaths = d.getVar('BBPATH', True).split(":")
559 if ("." in bbpaths or "" in bbpaths) and not status.reparse:
560 status.addresult("BBPATH references the current directory, either through " \
561 "an empty entry, or a '.'.\n\t This is unsafe and means your "\
562 "layer configuration is adding empty elements to BBPATH.\n\t "\
563 "Please check your layer.conf files and other BBPATH " \
564 "settings to remove the current working directory " \
565 "references.\n" \
566 "Parsed BBPATH is" + str(bbpaths));
567
568 oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF', True)
569 if not oes_bb_conf:
570 status.addresult('You are not using the OpenEmbedded version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n')
571
572 # The length of tmpdir can't be longer than 410
573 status.addresult(check_path_length(tmpdir, "TMPDIR", 410))
574
575def check_sanity_everybuild(status, d):
576 # Sanity tests which test the users environment so need to run at each build (or are so cheap
577 # it makes sense to always run them.
578
579 if 0 == os.getuid():
580 raise_sanity_error("Do not use Bitbake as root.", d)
581
582 # Check the Python version, we now have a minimum of Python 2.7.3
583 import sys
584 if sys.hexversion < 0x020703F0:
585 status.addresult('The system requires at least Python 2.7.3 to run. Please update your Python interpreter.\n')
586
587 # Check the bitbake version meets minimum requirements
588 from distutils.version import LooseVersion
589 minversion = d.getVar('BB_MIN_VERSION', True)
590 if (LooseVersion(bb.__version__) < LooseVersion(minversion)):
591 status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
592
593 sanity_check_conffiles(status, d)
594
595 paths = d.getVar('PATH', True).split(":")
596 if "." in paths or "" in paths:
597 status.addresult("PATH contains '.' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
598
599 # Check that the DISTRO is valid, if set
600 # need to take into account DISTRO renaming DISTRO
601 distro = d.getVar('DISTRO', True)
602 if distro:
603 if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ):
604 status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO", True))
605
606 # Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
607 # set, since so much relies on it being set.
608 dldir = d.getVar('DL_DIR', True)
609 if not dldir:
610 status.addresult("DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n")
611 if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
612 status.addresult("DL_DIR: %s exists but you do not appear to have write access to it. \n" % dldir)
613
614 # Check that the MACHINE is valid, if it is set
615 machinevalid = True
616 if d.getVar('MACHINE', True):
617 if not check_conf_exists("conf/machine/${MACHINE}.conf", d):
618 status.addresult('Please set a valid MACHINE in your local.conf or environment\n')
619 machinevalid = False
620 else:
621 status.addresult(check_sanity_validmachine(d))
622 else:
623 status.addresult('Please set a MACHINE in your local.conf or environment\n')
624 machinevalid = False
625 if machinevalid:
626 status.addresult(check_toolchain(d))
627
628 check_supported_distro(d)
629
630 # Check if DISPLAY is set if TEST_IMAGE is set
631 if d.getVar('TEST_IMAGE', True) == '1' or d.getVar('DEFAULT_TEST_SUITES', True):
632 display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True)
633 if not display:
634 status.addresult('testimage needs an X desktop to start qemu, please set DISPLAY correctly (e.g. DISPLAY=:1.0)\n')
635
636 omask = os.umask(022)
637 if omask & 0755:
638 status.addresult("Please use a umask which allows a+rx and u+rwx\n")
639 os.umask(omask)
640
641 if d.getVar('TARGET_ARCH', True) == "arm":
642 # This path is no longer user-readable in modern (very recent) Linux
643 try:
644 if os.path.exists("/proc/sys/vm/mmap_min_addr"):
645 f = open("/proc/sys/vm/mmap_min_addr", "r")
646 try:
647 if (int(f.read().strip()) > 65536):
648 status.addresult("/proc/sys/vm/mmap_min_addr is not <= 65536. This will cause problems with qemu so please fix the value (as root).\n\nTo fix this in later reboots, set vm.mmap_min_addr = 65536 in /etc/sysctl.conf.\n")
649 finally:
650 f.close()
651 except:
652 pass
653
654 oeroot = d.getVar('COREBASE', True)
655 if oeroot.find('+') != -1:
656 status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.")
657 if oeroot.find('@') != -1:
658 status.addresult("Error, you have an invalid character (@) in your COREBASE directory path. Please move the installation to a directory which doesn't include any @ characters.")
659 if oeroot.find(' ') != -1:
660 status.addresult("Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this.")
661
662 # Check that TMPDIR hasn't changed location since the last time we were run
663 tmpdir = d.getVar('TMPDIR', True)
664 checkfile = os.path.join(tmpdir, "saved_tmpdir")
665 if os.path.exists(checkfile):
666 with open(checkfile, "r") as f:
667 saved_tmpdir = f.read().strip()
668 if (saved_tmpdir != tmpdir):
669 status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or rebuild\n" % saved_tmpdir)
670 else:
671 bb.utils.mkdirhier(tmpdir)
672 with open(checkfile, "w") as f:
673 f.write(tmpdir)
674
675def check_sanity(sanity_data):
676 import subprocess
677
678 class SanityStatus(object):
679 def __init__(self):
680 self.messages = ""
681 self.network_error = False
682 self.reparse = False
683
684 def addresult(self, message):
685 if message:
686 self.messages = self.messages + message
687
688 status = SanityStatus()
689
690 tmpdir = sanity_data.getVar('TMPDIR', True)
691 sstate_dir = sanity_data.getVar('SSTATE_DIR', True)
692
693 # Check saved sanity info
694 last_sanity_version = 0
695 last_tmpdir = ""
696 last_sstate_dir = ""
697 sanityverfile = sanity_data.expand("${TOPDIR}/conf/sanity_info")
698 if os.path.exists(sanityverfile):
699 with open(sanityverfile, 'r') as f:
700 for line in f:
701 if line.startswith('SANITY_VERSION'):
702 last_sanity_version = int(line.split()[1])
703 if line.startswith('TMPDIR'):
704 last_tmpdir = line.split()[1]
705 if line.startswith('SSTATE_DIR'):
706 last_sstate_dir = line.split()[1]
707
708 check_sanity_everybuild(status, sanity_data)
709
710 sanity_version = int(sanity_data.getVar('SANITY_VERSION', True) or 1)
711 network_error = False
712 if last_sanity_version < sanity_version:
713 check_sanity_version_change(status, sanity_data)
714 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
715 else:
716 if last_sstate_dir != sstate_dir:
717 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
718
719 if os.path.exists(os.path.dirname(sanityverfile)) and not status.messages:
720 with open(sanityverfile, 'w') as f:
721 f.write("SANITY_VERSION %s\n" % sanity_version)
722 f.write("TMPDIR %s\n" % tmpdir)
723 f.write("SSTATE_DIR %s\n" % sstate_dir)
724
725 sanity_handle_abichanges(status, sanity_data)
726
727 if status.messages != "":
728 raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error)
729 return status.reparse
730
731# Create a copy of the datastore and finalise it to ensure appends and
732# overrides are set - the datastore has yet to be finalised at ConfigParsed
733def copy_data(e):
734 sanity_data = bb.data.createCopy(e.data)
735 sanity_data.finalize()
736 return sanity_data
737
738addhandler check_sanity_eventhandler
739check_sanity_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.SanityCheck bb.event.NetworkTest"
740python check_sanity_eventhandler() {
741 if bb.event.getName(e) == "ConfigParsed" and e.data.getVar("BB_WORKERCONTEXT", True) != "1" and e.data.getVar("DISABLE_SANITY_CHECKS", True) != "1":
742 sanity_data = copy_data(e)
743 reparse = check_sanity(sanity_data)
744 e.data.setVar("BB_INVALIDCONF", reparse)
745 elif bb.event.getName(e) == "SanityCheck":
746 sanity_data = copy_data(e)
747 sanity_data.setVar("SANITY_USE_EVENTS", "1")
748 reparse = check_sanity(sanity_data)
749 e.data.setVar("BB_INVALIDCONF", reparse)
750 bb.event.fire(bb.event.SanityCheckPassed(), e.data)
751 elif bb.event.getName(e) == "NetworkTest":
752 sanity_data = copy_data(e)
753 bb.event.fire(bb.event.NetworkTestFailed() if check_connectivity(sanity_data) else bb.event.NetworkTestPassed(), e.data)
754
755 return
756}
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
new file mode 100644
index 0000000000..a07a366df8
--- /dev/null
+++ b/meta/classes/scons.bbclass
@@ -0,0 +1,15 @@
1DEPENDS += "python-scons-native"
2
3EXTRA_OESCONS ?= ""
4
5scons_do_compile() {
6 ${STAGING_BINDIR_NATIVE}/scons PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
7 bbfatal "scons build execution failed."
8}
9
10scons_do_install() {
11 ${STAGING_BINDIR_NATIVE}/scons PREFIX=${D}${prefix} prefix=${D}${prefix} install ${EXTRA_OESCONS}|| \
12 bbfatal "scons install execution failed."
13}
14
15EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/sdl.bbclass b/meta/classes/sdl.bbclass
new file mode 100644
index 0000000000..cc31288f61
--- /dev/null
+++ b/meta/classes/sdl.bbclass
@@ -0,0 +1,6 @@
1#
2# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
3#
4
5DEPENDS += "virtual/libsdl libsdl-mixer libsdl-image"
6SECTION = "x11/games"
diff --git a/meta/classes/setuptools.bbclass b/meta/classes/setuptools.bbclass
new file mode 100644
index 0000000000..ba9cf13295
--- /dev/null
+++ b/meta/classes/setuptools.bbclass
@@ -0,0 +1,9 @@
1inherit distutils
2
3DEPENDS += "python-setuptools-native"
4
5DISTUTILS_INSTALL_ARGS = "--root=${D} \
6 --single-version-externally-managed \
7 --prefix=${prefix} \
8 --install-lib=${PYTHON_SITEPACKAGES_DIR} \
9 --install-data=${datadir}"
diff --git a/meta/classes/sip.bbclass b/meta/classes/sip.bbclass
new file mode 100644
index 0000000000..711f851593
--- /dev/null
+++ b/meta/classes/sip.bbclass
@@ -0,0 +1,63 @@
1# Build Class for Sip based Python Bindings
2# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
3#
4STAGING_SIPDIR ?= "${STAGING_DATADIR_NATIVE}/sip"
5
6DEPENDS =+ "sip-native"
7RDEPENDS_${PN} += "python-sip"
8
9# default stuff, do not uncomment
10# EXTRA_SIPTAGS = "-tWS_X11 -tQt_4_3_0"
11
12# do_generate is before do_configure so ensure that sip_native is populated in sysroot before executing it
13do_generate[depends] += "sip-native:do_populate_sysroot"
14
15sip_do_generate() {
16 if [ -z "${SIP_MODULES}" ]; then
17 MODULES="`ls sip/*mod.sip`"
18 else
19 MODULES="${SIP_MODULES}"
20 fi
21
22 if [ -z "$MODULES" ]; then
23 die "SIP_MODULES not set and no modules found in $PWD"
24 else
25 bbnote "using modules '${SIP_MODULES}' and tags '${EXTRA_SIPTAGS}'"
26 fi
27
28 if [ -z "${EXTRA_SIPTAGS}" ]; then
29 die "EXTRA_SIPTAGS needs to be set!"
30 else
31 SIPTAGS="${EXTRA_SIPTAGS}"
32 fi
33
34 if [ ! -z "${SIP_FEATURES}" ]; then
35 FEATURES="-z ${SIP_FEATURES}"
36 bbnote "sip feature file: ${SIP_FEATURES}"
37 fi
38
39 for module in $MODULES
40 do
41 install -d ${module}/
42 echo "calling 'sip4 -I sip -I ${STAGING_SIPDIR} ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.pro.in sip/${module}/${module}mod.sip'"
43 sip4 -I ${STAGING_SIPDIR} -I sip ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.sbf \
44 sip/${module}/${module}mod.sip || die "Error calling sip on ${module}"
45 cat ${module}/${module}.sbf | sed s,target,TARGET, \
46 | sed s,sources,SOURCES, \
47 | sed s,headers,HEADERS, \
48 | sed s,"moc_HEADERS =","HEADERS +=", \
49 >${module}/${module}.pro
50 echo "TEMPLATE=lib" >>${module}/${module}.pro
51 [ "${module}" = "qt" ] && echo "" >>${module}/${module}.pro
52 [ "${module}" = "qtcanvas" ] && echo "" >>${module}/${module}.pro
53 [ "${module}" = "qttable" ] && echo "" >>${module}/${module}.pro
54 [ "${module}" = "qwt" ] && echo "" >>${module}/${module}.pro
55 [ "${module}" = "qtpe" ] && echo "" >>${module}/${module}.pro
56 [ "${module}" = "qtpe" ] && echo "LIBS+=-lqpe" >>${module}/${module}.pro
57 true
58 done
59}
60
61EXPORT_FUNCTIONS do_generate
62
63addtask generate after do_unpack do_patch before do_configure
diff --git a/meta/classes/siteconfig.bbclass b/meta/classes/siteconfig.bbclass
new file mode 100644
index 0000000000..3701b7cd81
--- /dev/null
+++ b/meta/classes/siteconfig.bbclass
@@ -0,0 +1,33 @@
1python siteconfig_do_siteconfig () {
2 shared_state = sstate_state_fromvars(d)
3 if shared_state['name'] != 'populate-sysroot':
4 return
5 if not os.path.isdir(os.path.join(d.getVar('FILE_DIRNAME', True), 'site_config')):
6 bb.debug(1, "No site_config directory, skipping do_siteconfig")
7 return
8 bb.build.exec_func('do_siteconfig_gencache', d)
9 sstate_clean(shared_state, d)
10 sstate_install(shared_state, d)
11}
12
13EXTRASITECONFIG ?= ""
14
15siteconfig_do_siteconfig_gencache () {
16 mkdir -p ${WORKDIR}/site_config_${MACHINE}
17 gen-site-config ${FILE_DIRNAME}/site_config \
18 >${WORKDIR}/site_config_${MACHINE}/configure.ac
19 cd ${WORKDIR}/site_config_${MACHINE}
20 autoconf
21 rm -f ${PN}_cache
22 CONFIG_SITE="" ${EXTRASITECONFIG} ./configure ${CONFIGUREOPTS} --cache-file ${PN}_cache
23 sed -n -e "/ac_cv_c_bigendian/p" -e "/ac_cv_sizeof_/p" \
24 -e "/ac_cv_type_/p" -e "/ac_cv_header_/p" -e "/ac_cv_func_/p" \
25 < ${PN}_cache > ${PN}_config
26 mkdir -p ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
27 cp ${PN}_config ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
28
29}
30
31do_populate_sysroot[sstate-interceptfuncs] += "do_siteconfig "
32
33EXPORT_FUNCTIONS do_siteconfig do_siteconfig_gencache
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
new file mode 100644
index 0000000000..8705eaa243
--- /dev/null
+++ b/meta/classes/siteinfo.bbclass
@@ -0,0 +1,149 @@
1# This class exists to provide information about the targets that
2# may be needed by other classes and/or recipes. If you add a new
3# target this will probably need to be updated.
4
5#
6# Returns information about 'what' for the named target 'target'
7# where 'target' == "<arch>-<os>"
8#
9# 'what' can be one of
10# * target: Returns the target name ("<arch>-<os>")
11# * endianess: Return "be" for big endian targets, "le" for little endian
12# * bits: Returns the bit size of the target, either "32" or "64"
13# * libc: Returns the name of the c library used by the target
14#
15# It is an error for the target not to exist.
16# If 'what' doesn't exist then an empty value is returned
17#
18def siteinfo_data(d):
19 archinfo = {
20 "allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
21 "aarch64": "endian-little bit-64 arm-common",
22 "arm": "endian-little bit-32 arm-common",
23 "armeb": "endian-big bit-32 arm-common",
24 "avr32": "endian-big bit-32 avr32-common",
25 "bfin": "endian-little bit-32 bfin-common",
26 "i386": "endian-little bit-32 ix86-common",
27 "i486": "endian-little bit-32 ix86-common",
28 "i586": "endian-little bit-32 ix86-common",
29 "i686": "endian-little bit-32 ix86-common",
30 "ia64": "endian-little bit-64",
31 "microblaze": "endian-big bit-32 microblaze-common",
32 "microblazeel": "endian-little bit-32 microblaze-common",
33 "mips": "endian-big bit-32 mips-common",
34 "mips64": "endian-big bit-64 mips-common",
35 "mips64el": "endian-little bit-64 mips-common",
36 "mipsel": "endian-little bit-32 mips-common",
37 "powerpc": "endian-big bit-32 powerpc-common",
38 "nios2": "endian-little bit-32 nios2-common",
39 "powerpc64": "endian-big bit-64 powerpc-common",
40 "ppc": "endian-big bit-32 powerpc-common",
41 "ppc64": "endian-big bit-64 powerpc-common",
42 "sh3": "endian-little bit-32 sh-common",
43 "sh4": "endian-little bit-32 sh-common",
44 "sparc": "endian-big bit-32",
45 "viac3": "endian-little bit-32 ix86-common",
46 "x86_64": "endian-little", # bitinfo specified in targetinfo
47 }
48 osinfo = {
49 "darwin": "common-darwin",
50 "darwin9": "common-darwin",
51 "linux": "common-linux common-glibc",
52 "linux-gnu": "common-linux common-glibc",
53 "linux-gnux32": "common-linux common-glibc",
54 "linux-gnun32": "common-linux common-glibc",
55 "linux-gnueabi": "common-linux common-glibc",
56 "linux-gnuspe": "common-linux common-glibc",
57 "linux-uclibc": "common-linux common-uclibc",
58 "linux-uclibceabi": "common-linux common-uclibc",
59 "linux-uclibcspe": "common-linux common-uclibc",
60 "uclinux-uclibc": "common-uclibc",
61 "cygwin": "common-cygwin",
62 "mingw32": "common-mingw",
63 }
64 targetinfo = {
65 "aarch64-linux-gnu": "aarch64-linux",
66 "arm-linux-gnueabi": "arm-linux",
67 "arm-linux-uclibceabi": "arm-linux-uclibc",
68 "armeb-linux-gnueabi": "armeb-linux",
69 "armeb-linux-uclibceabi": "armeb-linux-uclibc",
70 "mips64-linux-gnun32": "mips-linux bit-32",
71 "mips64el-linux-gnun32": "mipsel-linux bit-32",
72 "powerpc-linux": "powerpc32-linux",
73 "powerpc-linux-uclibc": "powerpc-linux powerpc32-linux",
74 "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux",
75 "powerpc-linux-uclibcspe": "powerpc-linux powerpc32-linux powerpc-linux-uclibc",
76 "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
77 "powerpc64-linux": "powerpc-linux",
78 "x86_64-cygwin": "bit-64",
79 "x86_64-darwin": "bit-64",
80 "x86_64-darwin9": "bit-64",
81 "x86_64-linux": "bit-64",
82 "x86_64-linux-uclibc": "bit-64",
83 "x86_64-linux-gnu": "bit-64 x86_64-linux",
84 "x86_64-linux-gnux32": "bit-32 ix86-common x32-linux",
85 "x86_64-mingw32": "bit-64",
86 }
87
88 hostarch = d.getVar("HOST_ARCH", True)
89 hostos = d.getVar("HOST_OS", True)
90 target = "%s-%s" % (hostarch, hostos)
91
92 sitedata = []
93 if hostarch in archinfo:
94 sitedata.extend(archinfo[hostarch].split())
95 if hostos in osinfo:
96 sitedata.extend(osinfo[hostos].split())
97 if target in targetinfo:
98 sitedata.extend(targetinfo[target].split())
99 sitedata.append(target)
100 sitedata.append("common")
101
102 bb.debug(1, "SITE files %s" % sitedata);
103 return sitedata
104
105python () {
106 sitedata = set(siteinfo_data(d))
107 if "endian-little" in sitedata:
108 d.setVar("SITEINFO_ENDIANNESS", "le")
109 elif "endian-big" in sitedata:
110 d.setVar("SITEINFO_ENDIANNESS", "be")
111 else:
112 bb.error("Unable to determine endianness for architecture '%s'" %
113 d.getVar("HOST_ARCH", True))
114 bb.fatal("Please add your architecture to siteinfo.bbclass")
115
116 if "bit-32" in sitedata:
117 d.setVar("SITEINFO_BITS", "32")
118 elif "bit-64" in sitedata:
119 d.setVar("SITEINFO_BITS", "64")
120 else:
121 bb.error("Unable to determine bit size for architecture '%s'" %
122 d.getVar("HOST_ARCH", True))
123 bb.fatal("Please add your architecture to siteinfo.bbclass")
124}
125
126def siteinfo_get_files(d, no_cache = False):
127 sitedata = siteinfo_data(d)
128 sitefiles = ""
129 for path in d.getVar("BBPATH", True).split(":"):
130 for element in sitedata:
131 filename = os.path.join(path, "site", element)
132 if os.path.exists(filename):
133 sitefiles += filename + " "
134
135 if no_cache: return sitefiles
136
137 # Now check for siteconfig cache files
138 path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE', True)
139 if os.path.isdir(path_siteconfig):
140 for i in os.listdir(path_siteconfig):
141 filename = os.path.join(path_siteconfig, i)
142 sitefiles += filename + " "
143
144 return sitefiles
145
146#
147# Make some information available via variables
148#
149SITECONFIG_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
diff --git a/meta/classes/spdx.bbclass b/meta/classes/spdx.bbclass
new file mode 100644
index 0000000000..55ce3aff4f
--- /dev/null
+++ b/meta/classes/spdx.bbclass
@@ -0,0 +1,321 @@
1# This class integrates real-time license scanning, generation of SPDX standard
2# output and verifiying license info during the building process.
3# It is a combination of efforts from the OE-Core, SPDX and Fossology projects.
4#
5# For more information on FOSSology:
6# http://www.fossology.org
7#
8# For more information on FOSSologySPDX commandline:
9# https://github.com/spdx-tools/fossology-spdx/wiki/Fossology-SPDX-Web-API
10#
11# For more information on SPDX:
12# http://www.spdx.org
13#
14
15# SPDX file will be output to the path which is defined as[SPDX_MANIFEST_DIR]
16# in ./meta/conf/licenses.conf.
17
18SPDXOUTPUTDIR = "${WORKDIR}/spdx_output_dir"
19SPDXSSTATEDIR = "${WORKDIR}/spdx_sstate_dir"
20
21python do_spdx () {
22 import os, sys
23 import json
24
25 info = {}
26 info['workdir'] = (d.getVar('WORKDIR', True) or "")
27 info['sourcedir'] = (d.getVar('S', True) or "")
28 info['pn'] = (d.getVar( 'PN', True ) or "")
29 info['pv'] = (d.getVar( 'PV', True ) or "")
30 info['src_uri'] = (d.getVar( 'SRC_URI', True ) or "")
31 info['spdx_version'] = (d.getVar('SPDX_VERSION', True) or '')
32 info['data_license'] = (d.getVar('DATA_LICENSE', True) or '')
33
34 spdx_sstate_dir = (d.getVar('SPDXSSTATEDIR', True) or "")
35 manifest_dir = (d.getVar('SPDX_MANIFEST_DIR', True) or "")
36 info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" )
37 sstatefile = os.path.join(spdx_sstate_dir,
38 info['pn'] + info['pv'] + ".spdx" )
39 info['spdx_temp_dir'] = (d.getVar('SPDX_TEMP_DIR', True) or "")
40 info['tar_file'] = os.path.join( info['workdir'], info['pn'] + ".tar.gz" )
41
42
43 ## get everything from cache. use it to decide if
44 ## something needs to be rerun
45 cur_ver_code = get_ver_code( info['sourcedir'] )
46 cache_cur = False
47 if not os.path.exists( spdx_sstate_dir ):
48 bb.utils.mkdirhier( spdx_sstate_dir )
49 if not os.path.exists( info['spdx_temp_dir'] ):
50 bb.utils.mkdirhier( info['spdx_temp_dir'] )
51 if os.path.exists( sstatefile ):
52 ## cache for this package exists. read it in
53 cached_spdx = get_cached_spdx( sstatefile )
54
55 if cached_spdx['PackageVerificationCode'] == cur_ver_code:
56 bb.warn(info['pn'] + "'s ver code same as cache's. do nothing")
57 cache_cur = True
58 else:
59 local_file_info = setup_foss_scan( info,
60 True, cached_spdx['Files'] )
61 else:
62 local_file_info = setup_foss_scan( info, False, None )
63
64 if cache_cur:
65 spdx_file_info = cached_spdx['Files']
66 else:
67 ## setup fossology command
68 foss_server = (d.getVar('FOSS_SERVER', True) or "")
69 foss_flags = (d.getVar('FOSS_WGET_FLAGS', True) or "")
70 foss_command = "wget %s --post-file=%s %s"\
71 % (foss_flags,info['tar_file'],foss_server)
72
73 #bb.warn(info['pn'] + json.dumps(local_file_info))
74 foss_file_info = run_fossology( foss_command )
75 spdx_file_info = create_spdx_doc( local_file_info, foss_file_info )
76 ## write to cache
77 write_cached_spdx(sstatefile,cur_ver_code,spdx_file_info)
78
79 ## Get document and package level information
80 spdx_header_info = get_header_info(info, cur_ver_code, spdx_file_info)
81
82 ## CREATE MANIFEST
83 create_manifest(info,spdx_header_info,spdx_file_info)
84
85 ## clean up the temp stuff
86 remove_dir_tree( info['spdx_temp_dir'] )
87 if os.path.exists(info['tar_file']):
88 remove_file( info['tar_file'] )
89}
90addtask spdx after do_patch before do_configure
91
92def create_manifest(info,header,files):
93 with open(info['outfile'], 'w') as f:
94 f.write(header + '\n')
95 for chksum, block in files.iteritems():
96 for key, value in block.iteritems():
97 f.write(key + ": " + value)
98 f.write('\n')
99 f.write('\n')
100
101def get_cached_spdx( sstatefile ):
102 import json
103 cached_spdx_info = {}
104 with open( sstatefile, 'r' ) as f:
105 try:
106 cached_spdx_info = json.load(f)
107 except ValueError as e:
108 cached_spdx_info = None
109 return cached_spdx_info
110
111def write_cached_spdx( sstatefile, ver_code, files ):
112 import json
113 spdx_doc = {}
114 spdx_doc['PackageVerificationCode'] = ver_code
115 spdx_doc['Files'] = {}
116 spdx_doc['Files'] = files
117 with open( sstatefile, 'w' ) as f:
118 f.write(json.dumps(spdx_doc))
119
120def setup_foss_scan( info, cache, cached_files ):
121 import errno, shutil
122 import tarfile
123 file_info = {}
124 cache_dict = {}
125
126 for f_dir, f in list_files( info['sourcedir'] ):
127 full_path = os.path.join( f_dir, f )
128 abs_path = os.path.join(info['sourcedir'], full_path)
129 dest_dir = os.path.join( info['spdx_temp_dir'], f_dir )
130 dest_path = os.path.join( info['spdx_temp_dir'], full_path )
131 try:
132 stats = os.stat(abs_path)
133 except OSError as e:
134 bb.warn( "Stat failed" + str(e) + "\n")
135 continue
136
137 checksum = hash_file( abs_path )
138 mtime = time.asctime(time.localtime(stats.st_mtime))
139
140 ## retain cache information if it exists
141 file_info[checksum] = {}
142 if cache and checksum in cached_files:
143 file_info[checksum] = cached_files[checksum]
144 else:
145 file_info[checksum]['FileName'] = full_path
146
147 try:
148 os.makedirs( dest_dir )
149 except OSError as e:
150 if e.errno == errno.EEXIST and os.path.isdir(dest_dir):
151 pass
152 else:
153 bb.warn( "mkdir failed " + str(e) + "\n" )
154 continue
155
156 if(cache and checksum not in cached_files) or not cache:
157 try:
158 shutil.copyfile( abs_path, dest_path )
159 except shutil.Error as e:
160 bb.warn( str(e) + "\n" )
161 except IOError as e:
162 bb.warn( str(e) + "\n" )
163
164 with tarfile.open( info['tar_file'], "w:gz" ) as tar:
165 tar.add( info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir']) )
166 tar.close()
167
168 return file_info
169
170
171def remove_dir_tree( dir_name ):
172 import shutil
173 try:
174 shutil.rmtree( dir_name )
175 except:
176 pass
177
178def remove_file( file_name ):
179 try:
180 os.remove( file_name )
181 except OSError as e:
182 pass
183
184def list_files( dir ):
185 for root, subFolders, files in os.walk( dir ):
186 for f in files:
187 rel_root = os.path.relpath( root, dir )
188 yield rel_root, f
189 return
190
191def hash_file( file_name ):
192 try:
193 f = open( file_name, 'rb' )
194 data_string = f.read()
195 except:
196 return None
197 finally:
198 f.close()
199 sha1 = hash_string( data_string )
200 return sha1
201
202def hash_string( data ):
203 import hashlib
204 sha1 = hashlib.sha1()
205 sha1.update( data )
206 return sha1.hexdigest()
207
208def run_fossology( foss_command ):
209 import string, re
210 import subprocess
211
212 p = subprocess.Popen(foss_command.split(),
213 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
214 foss_output, foss_error = p.communicate()
215
216 records = []
217 records = re.findall('FileName:.*?</text>', foss_output, re.S)
218
219 file_info = {}
220 for rec in records:
221 rec = string.replace( rec, '\r', '' )
222 chksum = re.findall( 'FileChecksum: SHA1: (.*)\n', rec)[0]
223 file_info[chksum] = {}
224 file_info[chksum]['FileCopyrightText'] = re.findall( 'FileCopyrightText: '
225 + '(.*?</text>)', rec, re.S )[0]
226 fields = ['FileType','LicenseConcluded',
227 'LicenseInfoInFile','FileName']
228 for field in fields:
229 file_info[chksum][field] = re.findall(field + ': (.*)', rec)[0]
230
231 return file_info
232
233def create_spdx_doc( file_info, scanned_files ):
234 import json
235 ## push foss changes back into cache
236 for chksum, lic_info in scanned_files.iteritems():
237 if chksum in file_info:
238 file_info[chksum]['FileName'] = file_info[chksum]['FileName']
239 file_info[chksum]['FileType'] = lic_info['FileType']
240 file_info[chksum]['FileChecksum: SHA1'] = chksum
241 file_info[chksum]['LicenseInfoInFile'] = lic_info['LicenseInfoInFile']
242 file_info[chksum]['LicenseConcluded'] = lic_info['LicenseConcluded']
243 file_info[chksum]['FileCopyrightText'] = lic_info['FileCopyrightText']
244 else:
245 bb.warn(lic_info['FileName'] + " : " + chksum
246 + " : is not in the local file info: "
247 + json.dumps(lic_info,indent=1))
248 return file_info
249
250def get_ver_code( dirname ):
251 chksums = []
252 for f_dir, f in list_files( dirname ):
253 try:
254 stats = os.stat(os.path.join(dirname,f_dir,f))
255 except OSError as e:
256 bb.warn( "Stat failed" + str(e) + "\n")
257 continue
258 chksums.append(hash_file(os.path.join(dirname,f_dir,f)))
259 ver_code_string = ''.join( chksums ).lower()
260 ver_code = hash_string( ver_code_string )
261 return ver_code
262
263def get_header_info( info, spdx_verification_code, spdx_files ):
264 """
265 Put together the header SPDX information.
266 Eventually this needs to become a lot less
267 of a hardcoded thing.
268 """
269 from datetime import datetime
270 import os
271 head = []
272 DEFAULT = "NOASSERTION"
273
274 #spdx_verification_code = get_ver_code( info['sourcedir'] )
275 package_checksum = ''
276 if os.path.exists(info['tar_file']):
277 package_checksum = hash_file( info['tar_file'] )
278 else:
279 package_checksum = DEFAULT
280
281 ## document level information
282 head.append("SPDXVersion: " + info['spdx_version'])
283 head.append("DataLicense: " + info['data_license'])
284 head.append("DocumentComment: <text>SPDX for "
285 + info['pn'] + " version " + info['pv'] + "</text>")
286 head.append("")
287
288 ## Creator information
289 now = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
290 head.append("## Creation Information")
291 head.append("Creator: fossology-spdx")
292 head.append("Created: " + now)
293 head.append("CreatorComment: <text>UNO</text>")
294 head.append("")
295
296 ## package level information
297 head.append("## Package Information")
298 head.append("PackageName: " + info['pn'])
299 head.append("PackageVersion: " + info['pv'])
300 head.append("PackageDownloadLocation: " + DEFAULT)
301 head.append("PackageSummary: <text></text>")
302 head.append("PackageFileName: " + os.path.basename(info['tar_file']))
303 head.append("PackageSupplier: Person:" + DEFAULT)
304 head.append("PackageOriginator: Person:" + DEFAULT)
305 head.append("PackageChecksum: SHA1: " + package_checksum)
306 head.append("PackageVerificationCode: " + spdx_verification_code)
307 head.append("PackageDescription: <text>" + info['pn']
308 + " version " + info['pv'] + "</text>")
309 head.append("")
310 head.append("PackageCopyrightText: <text>" + DEFAULT + "</text>")
311 head.append("")
312 head.append("PackageLicenseDeclared: " + DEFAULT)
313 head.append("PackageLicenseConcluded: " + DEFAULT)
314 head.append("PackageLicenseInfoFromFiles: " + DEFAULT)
315 head.append("")
316
317 ## header for file level
318 head.append("## File Information")
319 head.append("")
320
321 return '\n'.join(head)
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
new file mode 100644
index 0000000000..517c1001d2
--- /dev/null
+++ b/meta/classes/sstate.bbclass
@@ -0,0 +1,737 @@
1SSTATE_VERSION = "3"
2
3SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
4SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
5
6def generate_sstatefn(spec, hash, d):
7 if not hash:
8 hash = "INVALID"
9 return hash[:2] + "/" + spec + hash
10
11SSTATE_PKGARCH = "${PACKAGE_ARCH}"
12SSTATE_PKGSPEC = "sstate-${PN}-${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}-${PV}-${PR}-${SSTATE_PKGARCH}-${SSTATE_VERSION}-"
13SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC', True), d.getVar('BB_TASKHASH', True), d)}"
14SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
15SSTATE_EXTRAPATH = ""
16SSTATE_EXTRAPATHWILDCARD = ""
17SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC}"
18
19SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/"
20# Also need to make cross recipes append to ${PN} and install once for any given PACAGE_ARCH so
21# can avoid multiple installs (e.g. routerstationpro+qemumips both using mips32)
22SSTATE_DUPWHITELIST += "${STAGING_LIBDIR_NATIVE}/${MULTIMACH_TARGET_SYS} ${STAGING_DIR_NATIVE}/usr/libexec/${MULTIMACH_TARGET_SYS} ${STAGING_BINDIR_NATIVE}/${MULTIMACH_TARGET_SYS} ${STAGING_DIR_NATIVE}${includedir_native}/gcc-build-internal-${MULTIMACH_TARGET_SYS}"
23# Avoid docbook/sgml catalog warnings for now
24SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
25
26SSTATE_SCAN_FILES ?= "*.la *-config *_config"
27SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
28
29BB_HASHFILENAME = "${SSTATE_EXTRAPATH} ${SSTATE_PKGSPEC}"
30
31SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
32
33SSTATEPREINSTFUNCS ?= ""
34SSTATEPOSTINSTFUNCS ?= ""
35EXTRA_STAGING_FIXMES ?= ""
36
37# Specify dirs in which the shell function is executed and don't use ${B}
38# as default dirs to avoid possible race about ${B} with other task.
39sstate_create_package[dirs] = "${SSTATE_BUILDDIR}"
40sstate_unpack_package[dirs] = "${SSTATE_INSTDIR}"
41
42python () {
43 if bb.data.inherits_class('native', d):
44 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH'))
45 elif bb.data.inherits_class('crosssdk', d):
46 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}"))
47 elif bb.data.inherits_class('cross', d):
48 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TUNE_PKGARCH}"))
49 d.setVar('SSTATE_MANMACH', d.expand("${BUILD_ARCH}_${MACHINE}"))
50 elif bb.data.inherits_class('nativesdk', d):
51 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}"))
52 elif bb.data.inherits_class('cross-canadian', d):
53 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
54 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH", True) == "all":
55 d.setVar('SSTATE_PKGARCH', "allarch")
56 else:
57 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
58
59 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
60 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
61 d.setVar('SSTATE_EXTRAPATHWILDCARD', "*/")
62
63 # These classes encode staging paths into their scripts data so can only be
64 # reused if we manipulate the paths
65 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d):
66 scan_cmd = "grep -Irl ${STAGING_DIR} ${SSTATE_BUILDDIR}"
67 d.setVar('SSTATE_SCAN_CMD', scan_cmd)
68
69 unique_tasks = set((d.getVar('SSTATETASKS', True) or "").split())
70 d.setVar('SSTATETASKS', " ".join(unique_tasks))
71 namemap = []
72 for task in unique_tasks:
73 namemap.append(d.getVarFlag(task, 'sstate-name'))
74 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
75 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
76 d.setVar('SSTATETASKNAMES', " ".join(namemap))
77}
78
79def sstate_init(name, task, d):
80 ss = {}
81 ss['task'] = task
82 ss['name'] = name
83 ss['dirs'] = []
84 ss['plaindirs'] = []
85 ss['lockfiles'] = []
86 ss['lockfiles-shared'] = []
87 return ss
88
89def sstate_state_fromvars(d, task = None):
90 if task is None:
91 task = d.getVar('BB_CURRENTTASK', True)
92 if not task:
93 bb.fatal("sstate code running without task context?!")
94 task = task.replace("_setscene", "")
95
96 name = d.getVarFlag("do_" + task, 'sstate-name', True)
97 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs', True) or "").split()
98 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs', True) or "").split()
99 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs', True) or "").split()
100 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile', True) or "").split()
101 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared', True) or "").split()
102 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs', True) or "").split()
103 if not name or len(inputs) != len(outputs):
104 bb.fatal("sstate variables not setup correctly?!")
105
106 ss = sstate_init(name, task, d)
107 for i in range(len(inputs)):
108 sstate_add(ss, inputs[i], outputs[i], d)
109 ss['lockfiles'] = lockfiles
110 ss['lockfiles-shared'] = lockfilesshared
111 ss['plaindirs'] = plaindirs
112 ss['interceptfuncs'] = interceptfuncs
113 return ss
114
115def sstate_add(ss, source, dest, d):
116 if not source.endswith("/"):
117 source = source + "/"
118 if not dest.endswith("/"):
119 dest = dest + "/"
120 source = os.path.normpath(source)
121 dest = os.path.normpath(dest)
122 srcbase = os.path.basename(source)
123 ss['dirs'].append([srcbase, source, dest])
124 return ss
125
126def sstate_install(ss, d):
127 import oe.path
128 import subprocess
129
130 sharedfiles = []
131 shareddirs = []
132 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
133
134 d2 = d.createCopy()
135 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True)
136 if extrainf:
137 d2.setVar("SSTATE_MANMACH", extrainf)
138 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['name'])
139
140 if os.access(manifest, os.R_OK):
141 bb.fatal("Package already staged (%s)?!" % manifest)
142
143 locks = []
144 for lock in ss['lockfiles-shared']:
145 locks.append(bb.utils.lockfile(lock, True))
146 for lock in ss['lockfiles']:
147 locks.append(bb.utils.lockfile(lock))
148
149 for state in ss['dirs']:
150 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
151 for walkroot, dirs, files in os.walk(state[1]):
152 for file in files:
153 srcpath = os.path.join(walkroot, file)
154 dstpath = srcpath.replace(state[1], state[2])
155 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
156 sharedfiles.append(dstpath)
157 for dir in dirs:
158 srcdir = os.path.join(walkroot, dir)
159 dstdir = srcdir.replace(state[1], state[2])
160 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
161 if not dstdir.endswith("/"):
162 dstdir = dstdir + "/"
163 shareddirs.append(dstdir)
164
165 # Check the file list for conflicts against files which already exist
166 whitelist = (d.getVar("SSTATE_DUPWHITELIST", True) or "").split()
167 match = []
168 for f in sharedfiles:
169 if os.path.exists(f):
170 f = os.path.normpath(f)
171 realmatch = True
172 for w in whitelist:
173 if f.startswith(w):
174 realmatch = False
175 break
176 if realmatch:
177 match.append(f)
178 sstate_search_cmd = "grep -rl '%s' %s --exclude=master.list | sed -e 's:^.*/::' -e 's:\.populate-sysroot::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
179 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
180 if search_output != "":
181 match.append("Matched in %s" % search_output.rstrip())
182 if match:
183 bb.warn("The recipe %s is trying to install files into a shared area when those files already exist. Those files and their manifest location are:\n %s\nPlease verify which package should provide the above files." % (d.getVar('PN', True), "\n ".join(match)))
184
185 # Write out the manifest
186 f = open(manifest, "w")
187 for file in sharedfiles:
188 f.write(file + "\n")
189
190 # We want to ensure that directories appear at the end of the manifest
191 # so that when we test to see if they should be deleted any contents
192 # added by the task will have been removed first.
193 dirs = sorted(shareddirs, key=len)
194 # Must remove children first, which will have a longer path than the parent
195 for di in reversed(dirs):
196 f.write(di + "\n")
197 f.close()
198
199 # Run the actual file install
200 for state in ss['dirs']:
201 if os.path.exists(state[1]):
202 oe.path.copyhardlinktree(state[1], state[2])
203
204 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS', True) or '').split():
205 bb.build.exec_func(postinst, d)
206
207 for lock in locks:
208 bb.utils.unlockfile(lock)
209
210def sstate_installpkg(ss, d):
211 import oe.path
212 import subprocess
213
214 def prepdir(dir):
215 # remove dir if it exists, ensure any parent directories do exist
216 if os.path.exists(dir):
217 oe.path.remove(dir)
218 bb.utils.mkdirhier(dir)
219 oe.path.remove(dir)
220
221 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['name'])
222 sstatefetch = d.getVar('SSTATE_PKGNAME', True) + '_' + ss['name'] + ".tgz"
223 sstatepkg = d.getVar('SSTATE_PKG', True) + '_' + ss['name'] + ".tgz"
224
225 if not os.path.exists(sstatepkg):
226 pstaging_fetch(sstatefetch, sstatepkg, d)
227
228 if not os.path.isfile(sstatepkg):
229 bb.note("Staging package %s does not exist" % sstatepkg)
230 return False
231
232 sstate_clean(ss, d)
233
234 d.setVar('SSTATE_INSTDIR', sstateinst)
235 d.setVar('SSTATE_PKG', sstatepkg)
236
237 for preinst in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split():
238 bb.build.exec_func(preinst, d)
239
240 bb.build.exec_func('sstate_unpack_package', d)
241
242 # Fixup hardcoded paths
243 #
244 # Note: The logic below must match the reverse logic in
245 # sstate_hardcode_path(d)
246
247 fixmefn = sstateinst + "fixmepath"
248 if os.path.isfile(fixmefn):
249 staging = d.getVar('STAGING_DIR', True)
250 staging_target = d.getVar('STAGING_DIR_TARGET', True)
251 staging_host = d.getVar('STAGING_DIR_HOST', True)
252
253 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
254 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIR:%s:g'" % (staging)
255 elif bb.data.inherits_class('cross', d):
256 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIR:%s:g'" % (staging_target, staging)
257 else:
258 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
259
260 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
261 for fixmevar in extra_staging_fixmes.split():
262 fixme_path = d.getVar(fixmevar, True)
263 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
264
265 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
266 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
267
268 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
269 subprocess.call(sstate_hardcode_cmd, shell=True)
270
271 # Need to remove this or we'd copy it into the target directory and may
272 # conflict with another writer
273 os.remove(fixmefn)
274
275 for state in ss['dirs']:
276 prepdir(state[1])
277 os.rename(sstateinst + state[0], state[1])
278 sstate_install(ss, d)
279
280 for plain in ss['plaindirs']:
281 workdir = d.getVar('WORKDIR', True)
282 src = sstateinst + "/" + plain.replace(workdir, '')
283 dest = plain
284 bb.utils.mkdirhier(src)
285 prepdir(dest)
286 os.rename(src, dest)
287
288 return True
289
290def sstate_clean_cachefile(ss, d):
291 import oe.path
292
293 sstatepkgfile = d.getVar('SSTATE_PATHSPEC', True) + "*_" + ss['name'] + ".tgz*"
294 bb.note("Removing %s" % sstatepkgfile)
295 oe.path.remove(sstatepkgfile)
296
297def sstate_clean_cachefiles(d):
298 for task in (d.getVar('SSTATETASKS', True) or "").split():
299 ss = sstate_state_fromvars(d, task[3:])
300 sstate_clean_cachefile(ss, d)
301
302def sstate_clean_manifest(manifest, d):
303 import oe.path
304
305 mfile = open(manifest)
306 entries = mfile.readlines()
307 mfile.close()
308
309 for entry in entries:
310 entry = entry.strip()
311 bb.debug(2, "Removing manifest: %s" % entry)
312 # We can race against another package populating directories as we're removing them
313 # so we ignore errors here.
314 try:
315 if entry.endswith("/"):
316 if os.path.islink(entry[:-1]):
317 os.remove(entry[:-1])
318 elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
319 os.rmdir(entry[:-1])
320 else:
321 oe.path.remove(entry)
322 except OSError:
323 pass
324
325 oe.path.remove(manifest)
326
327def sstate_clean(ss, d):
328 import oe.path
329
330 d2 = d.createCopy()
331 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True)
332 if extrainf:
333 d2.setVar("SSTATE_MANMACH", extrainf)
334 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['name'])
335
336 if os.path.exists(manifest):
337 locks = []
338 for lock in ss['lockfiles-shared']:
339 locks.append(bb.utils.lockfile(lock))
340 for lock in ss['lockfiles']:
341 locks.append(bb.utils.lockfile(lock))
342
343 sstate_clean_manifest(manifest, d)
344
345 for lock in locks:
346 bb.utils.unlockfile(lock)
347
348 stfile = d.getVar("STAMP", True) + ".do_" + ss['task']
349 oe.path.remove(stfile)
350 oe.path.remove(stfile + "_setscene")
351 if extrainf:
352 oe.path.remove(stfile + ".*" + extrainf)
353 oe.path.remove(stfile + "_setscene" + ".*" + extrainf)
354 else:
355 oe.path.remove(stfile + ".*")
356 oe.path.remove(stfile + "_setscene" + ".*")
357
358CLEANFUNCS += "sstate_cleanall"
359
360python sstate_cleanall() {
361 bb.note("Removing shared state for package %s" % d.getVar('PN', True))
362
363 manifest_dir = d.getVar('SSTATE_MANIFESTS', True)
364 if not os.path.exists(manifest_dir):
365 return
366
367 namemap = d.getVar('SSTATETASKNAMES', True).split()
368 tasks = d.getVar('SSTATETASKS', True).split()
369 for name in namemap:
370 taskname = tasks[namemap.index(name)]
371 shared_state = sstate_state_fromvars(d, taskname[3:])
372 sstate_clean(shared_state, d)
373}
374
375def sstate_hardcode_path(d):
376 import subprocess, platform
377
378 # Need to remove hardcoded paths and fix these when we install the
379 # staging packages.
380 #
381 # Note: the logic in this function needs to match the reverse logic
382 # in sstate_installpkg(ss, d)
383
384 staging = d.getVar('STAGING_DIR', True)
385 staging_target = d.getVar('STAGING_DIR_TARGET', True)
386 staging_host = d.getVar('STAGING_DIR_HOST', True)
387 sstate_builddir = d.getVar('SSTATE_BUILDDIR', True)
388
389 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
390 sstate_grep_cmd = "grep -l -e '%s'" % (staging)
391 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIR:g'" % (staging)
392 elif bb.data.inherits_class('cross', d):
393 sstate_grep_cmd = "grep -l -e '(%s|%s)'" % (staging_target, staging)
394 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIR:g'" % (staging_target, staging)
395 else:
396 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
397 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % (staging_host)
398
399 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
400 for fixmevar in extra_staging_fixmes.split():
401 fixme_path = d.getVar(fixmevar, True)
402 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
403
404 fixmefn = sstate_builddir + "fixmepath"
405
406 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD', True)
407 sstate_filelist_cmd = "tee %s" % (fixmefn)
408
409 # fixmepath file needs relative paths, drop sstate_builddir prefix
410 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
411
412 xargs_no_empty_run_cmd = '--no-run-if-empty'
413 if platform.system() == 'Darwin':
414 xargs_no_empty_run_cmd = ''
415
416 # Limit the fixpaths and sed operations based on the initial grep search
417 # This has the side effect of making sure the vfs cache is hot
418 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
419
420 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
421 subprocess.call(sstate_hardcode_cmd, shell=True)
422
423 # If the fixmefn is empty, remove it..
424 if os.stat(fixmefn).st_size == 0:
425 os.remove(fixmefn)
426 else:
427 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
428 subprocess.call(sstate_filelist_relative_cmd, shell=True)
429
430def sstate_package(ss, d):
431 import oe.path
432
433 def make_relative_symlink(path, outputpath, d):
434 # Replace out absolute TMPDIR paths in symlinks with relative ones
435 if not os.path.islink(path):
436 return
437 link = os.readlink(path)
438 if not os.path.isabs(link):
439 return
440 if not link.startswith(tmpdir):
441 return
442
443 depth = outputpath.rpartition(tmpdir)[2].count('/')
444 base = link.partition(tmpdir)[2].strip()
445 while depth > 1:
446 base = "/.." + base
447 depth -= 1
448 base = "." + base
449
450 bb.debug(2, "Replacing absolute path %s with relative path %s for %s" % (link, base, outputpath))
451 os.remove(path)
452 os.symlink(base, path)
453
454 tmpdir = d.getVar('TMPDIR', True)
455
456 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['name'])
457 sstatepkg = d.getVar('SSTATE_PKG', True) + '_'+ ss['name'] + ".tgz"
458 bb.utils.remove(sstatebuild, recurse=True)
459 bb.utils.mkdirhier(sstatebuild)
460 bb.utils.mkdirhier(os.path.dirname(sstatepkg))
461 for state in ss['dirs']:
462 if not os.path.exists(state[1]):
463 continue
464 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
465 for walkroot, dirs, files in os.walk(state[1]):
466 for file in files:
467 srcpath = os.path.join(walkroot, file)
468 dstpath = srcpath.replace(state[1], state[2])
469 make_relative_symlink(srcpath, dstpath, d)
470 for dir in dirs:
471 srcpath = os.path.join(walkroot, dir)
472 dstpath = srcpath.replace(state[1], state[2])
473 make_relative_symlink(srcpath, dstpath, d)
474 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
475 oe.path.copyhardlinktree(state[1], sstatebuild + state[0])
476
477 workdir = d.getVar('WORKDIR', True)
478 for plain in ss['plaindirs']:
479 pdir = plain.replace(workdir, sstatebuild)
480 bb.utils.mkdirhier(plain)
481 bb.utils.mkdirhier(pdir)
482 oe.path.copyhardlinktree(plain, pdir)
483
484 d.setVar('SSTATE_BUILDDIR', sstatebuild)
485 d.setVar('SSTATE_PKG', sstatepkg)
486 sstate_hardcode_path(d)
487 bb.build.exec_func('sstate_create_package', d)
488
489 bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
490
491 return
492
493def pstaging_fetch(sstatefetch, sstatepkg, d):
494 import bb.fetch2
495
496 # Only try and fetch if the user has configured a mirror
497 mirrors = d.getVar('SSTATE_MIRRORS', True)
498 if not mirrors:
499 return
500
501 # Copy the data object and override DL_DIR and SRC_URI
502 localdata = bb.data.createCopy(d)
503 bb.data.update_data(localdata)
504
505 dldir = localdata.expand("${SSTATE_DIR}")
506 bb.utils.mkdirhier(dldir)
507
508 localdata.delVar('MIRRORS')
509 localdata.delVar('FILESPATH')
510 localdata.setVar('DL_DIR', dldir)
511 localdata.setVar('PREMIRRORS', mirrors)
512
513 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
514 # we'll want to allow network access for the current set of fetches.
515 if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
516 localdata.delVar('BB_NO_NETWORK')
517
518 # Try a fetch from the sstate mirror, if it fails just return and
519 # we will build the package
520 for srcuri in ['file://{0}'.format(sstatefetch),
521 'file://{0}.siginfo'.format(sstatefetch)]:
522 localdata.setVar('SRC_URI', srcuri)
523 try:
524 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
525 fetcher.download()
526
527 # Need to optimise this, if using file:// urls, the fetcher just changes the local path
528 # For now work around by symlinking
529 localpath = bb.data.expand(fetcher.localpath(srcuri), localdata)
530 if localpath != sstatepkg and os.path.exists(localpath) and not os.path.exists(sstatepkg):
531 os.symlink(localpath, sstatepkg)
532
533 except bb.fetch2.BBFetchException:
534 break
535
536def sstate_setscene(d):
537 shared_state = sstate_state_fromvars(d)
538 accelerate = sstate_installpkg(shared_state, d)
539 if not accelerate:
540 raise bb.build.FuncFailed("No suitable staging package found")
541
542python sstate_task_prefunc () {
543 shared_state = sstate_state_fromvars(d)
544 sstate_clean(shared_state, d)
545}
546
547python sstate_task_postfunc () {
548 shared_state = sstate_state_fromvars(d)
549 sstate_install(shared_state, d)
550 for intercept in shared_state['interceptfuncs']:
551 bb.build.exec_func(intercept, d)
552 omask = os.umask(002)
553 if omask != 002:
554 bb.note("Using umask 002 (not %0o) for sstate packaging" % omask)
555 sstate_package(shared_state, d)
556 os.umask(omask)
557}
558
559
560#
561# Shell function to generate a sstate package from a directory
562# set as SSTATE_BUILDDIR
563#
564sstate_create_package () {
565 cd ${SSTATE_BUILDDIR}
566 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
567 # Need to handle empty directories
568 if [ "$(ls -A)" ]; then
569 set +e
570 tar -czf $TFILE *
571 if [ $? -ne 0 ] && [ $? -ne 1 ]; then
572 exit 1
573 fi
574 set -e
575 else
576 tar -cz --file=$TFILE --files-from=/dev/null
577 fi
578 chmod 0664 $TFILE
579 mv -f $TFILE ${SSTATE_PKG}
580
581 cd ${WORKDIR}
582 rm -rf ${SSTATE_BUILDDIR}
583}
584
585#
586# Shell function to decompress and prepare a package for installation
587#
588sstate_unpack_package () {
589 mkdir -p ${SSTATE_INSTDIR}
590 cd ${SSTATE_INSTDIR}
591 tar -xmvzf ${SSTATE_PKG}
592}
593
594# Need to inject information about classes not in the global configuration scope
595EXTRASSTATEMAPS += "do_deploy:deploy"
596
597BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
598
599def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d):
600
601 ret = []
602 mapping = {}
603 for t in d.getVar("SSTATETASKS", True).split():
604 mapping[t] = d.getVarFlag(t, "sstate-name", True)
605 for extra in d.getVar("EXTRASSTATEMAPS", True).split():
606 e = extra.split(":")
607 mapping[e[0]] = e[1]
608
609 for task in range(len(sq_fn)):
610 spec = sq_hashfn[task].split(" ")[1]
611 extrapath = sq_hashfn[task].split(" ")[0]
612
613 sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + mapping[sq_task[task]] + ".tgz")
614 if os.path.exists(sstatefile):
615 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
616 ret.append(task)
617 continue
618 else:
619 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
620
621 mirrors = d.getVar("SSTATE_MIRRORS", True)
622 if mirrors:
623 # Copy the data object and override DL_DIR and SRC_URI
624 localdata = bb.data.createCopy(d)
625 bb.data.update_data(localdata)
626
627 dldir = localdata.expand("${SSTATE_DIR}")
628 localdata.setVar('DL_DIR', dldir)
629 localdata.setVar('PREMIRRORS', mirrors)
630
631 bb.debug(2, "SState using premirror of: %s" % mirrors)
632
633 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
634 # we'll want to allow network access for the current set of fetches.
635 if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
636 localdata.delVar('BB_NO_NETWORK')
637
638 for task in range(len(sq_fn)):
639 if task in ret:
640 continue
641
642 spec = sq_hashfn[task].split(" ")[1]
643 extrapath = sq_hashfn[task].split(" ")[0]
644 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + mapping[sq_task[task]] + ".tgz")
645
646 srcuri = "file://" + sstatefile
647 localdata.setVar('SRC_URI', srcuri)
648 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
649
650 try:
651 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata)
652 fetcher.checkstatus()
653 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
654 ret.append(task)
655 except:
656 bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
657 pass
658
659 return ret
660
661BB_SETSCENE_DEPVALID = "setscene_depvalid"
662
663def setscene_depvalid(task, taskdependees, notneeded, d):
664 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
665 # task is included in taskdependees too
666
667 bb.debug(2, "Considering setscene task: %s" % (str(taskdependees[task])))
668
669 def isNative(x):
670 return x.endswith("-native")
671 def isNativeCross(x):
672 return x.endswith("-native") or x.endswith("-cross") or x.endswith("-cross-initial")
673 def isSafeDep(x):
674 if x in ["quilt-native", "autoconf-native", "automake-native", "gnu-config-native", "libtool-native", "pkgconfig-native", "gcc-cross", "binutils-cross", "gcc-cross-initial"]:
675 return True
676 return False
677 def isPostInstDep(x):
678 if x in ["qemu-native", "gdk-pixbuf-native", "qemuwrapper-cross", "depmodwrapper-cross", "systemd-systemctl-native", "gtk-update-icon-cache-native"]:
679 return True
680 return False
681
682 # We can skip these "safe" dependencies since the aren't runtime dependencies, just build time
683 if isSafeDep(taskdependees[task][0]) and taskdependees[task][1] == "do_populate_sysroot":
684 return True
685
686 # We only need to trigger populate_lic through direct dependencies
687 if taskdependees[task][1] == "do_populate_lic":
688 return True
689
690 for dep in taskdependees:
691 bb.debug(2, " considering dependency: %s" % (str(taskdependees[dep])))
692 if task == dep:
693 continue
694 if dep in notneeded:
695 continue
696 # do_package_write_* and do_package doesn't need do_package
697 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata']:
698 continue
699 # do_package_write_* and do_package doesn't need do_populate_sysroot, unless is a postinstall dependency
700 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata']:
701 if isPostInstDep(taskdependees[task][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
702 return False
703 continue
704 # Native/Cross packages don't exist and are noexec anyway
705 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata']:
706 continue
707
708 # Consider sysroot depending on sysroot tasks
709 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
710 # base-passwd/shadow-sysroot don't need their dependencies
711 if taskdependees[dep][0].endswith(("base-passwd", "shadow-sysroot")):
712 continue
713 # Nothing need depend on libc-initial/gcc-cross-initial
714 if taskdependees[task][0].endswith("-initial"):
715 continue
716 # Native/Cross populate_sysroot need their dependencies
717 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
718 return False
719 # Target populate_sysroot depended on by cross tools need to be installed
720 if isNativeCross(taskdependees[dep][0]):
721 return False
722 # Native/cross tools depended upon by target sysroot are not needed
723 if isNativeCross(taskdependees[task][0]):
724 continue
725 # Target populate_sysroot need their dependencies
726 return False
727
728 # This is due to the [depends] in useradd.bbclass complicating matters
729 # The logic *is* reversed here due to the way hard setscene dependencies are injected
730 if taskdependees[task][1] == 'do_package' and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
731 continue
732
733 # Safe fallthrough default
734 bb.debug(2, " Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])))
735 return False
736 return True
737
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
new file mode 100644
index 0000000000..ae1c546ad6
--- /dev/null
+++ b/meta/classes/staging.bbclass
@@ -0,0 +1,113 @@
1
2sysroot_stage_dir() {
3 src="$1"
4 dest="$2"
5 # if the src doesn't exist don't do anything
6 if [ ! -d "$src" ]; then
7 return
8 fi
9
10 # We only want to stage the contents of $src if it's non-empty so first rmdir $src
11 # then if it still exists (rmdir on non-empty dir fails) we can copy its contents
12 rmdir "$src" 2> /dev/null || true
13 # However we always want to stage a $src itself, even if it's empty
14 mkdir -p "$dest"
15 if [ -d "$src" ]; then
16 tar -cf - -C "$src" -p . | tar -xf - -C "$dest"
17 fi
18}
19
20sysroot_stage_libdir() {
21 src="$1"
22 dest="$2"
23
24 sysroot_stage_dir $src $dest
25}
26
27sysroot_stage_dirs() {
28 from="$1"
29 to="$2"
30
31 sysroot_stage_dir $from${includedir} $to${includedir}
32 if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
33 sysroot_stage_dir $from${bindir} $to${bindir}
34 sysroot_stage_dir $from${sbindir} $to${sbindir}
35 sysroot_stage_dir $from${base_bindir} $to${base_bindir}
36 sysroot_stage_dir $from${base_sbindir} $to${base_sbindir}
37 sysroot_stage_dir $from${libexecdir} $to${libexecdir}
38 sysroot_stage_dir $from${sysconfdir} $to${sysconfdir}
39 sysroot_stage_dir $from${localstatedir} $to${localstatedir}
40 fi
41 if [ -d $from${libdir} ]
42 then
43 sysroot_stage_libdir $from/${libdir} $to${libdir}
44 fi
45 if [ -d $from${base_libdir} ]
46 then
47 sysroot_stage_libdir $from${base_libdir} $to${base_libdir}
48 fi
49 if [ -d $from${nonarch_base_libdir} ]
50 then
51 sysroot_stage_libdir $from${nonarch_base_libdir} $to${nonarch_base_libdir}
52 fi
53 sysroot_stage_dir $from${datadir} $to${datadir}
54 # We don't care about docs/info/manpages/locales
55 rm -rf $to${mandir}/ $to${docdir}/ $to${infodir}/ ${to}${datadir}/locale/
56 rm -rf $to${datadir}/applications/ $to${datadir}/fonts/ $to${datadir}/pixmaps/
57}
58
59sysroot_stage_all() {
60 sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
61}
62
63do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
64do_populate_sysroot[umask] = "022"
65
66addtask populate_sysroot after do_install
67
68SYSROOT_PREPROCESS_FUNCS ?= ""
69SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir/"
70SYSROOT_LOCK = "${STAGING_DIR}/staging.lock"
71
72# We clean out any existing sstate from the sysroot if we rerun configure
73python sysroot_cleansstate () {
74 ss = sstate_state_fromvars(d, "populate_sysroot")
75 sstate_clean(ss, d)
76}
77do_configure[prefuncs] += "sysroot_cleansstate"
78
79
80BB_SETSCENE_VERIFY_FUNCTION = "sysroot_checkhashes"
81
82def sysroot_checkhashes(covered, tasknames, fnids, fns, d, invalidtasks = None):
83 problems = set()
84 configurefnids = set()
85 if not invalidtasks:
86 invalidtasks = xrange(len(tasknames))
87 for task in invalidtasks:
88 if tasknames[task] == "do_configure" and task not in covered:
89 configurefnids.add(fnids[task])
90 for task in covered:
91 if tasknames[task] == "do_populate_sysroot" and fnids[task] in configurefnids:
92 problems.add(task)
93 return problems
94
95python do_populate_sysroot () {
96 bb.build.exec_func("sysroot_stage_all", d)
97 for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS', True) or '').split():
98 bb.build.exec_func(f, d)
99}
100
101SSTATETASKS += "do_populate_sysroot"
102do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
103do_populate_sysroot[sstate-name] = "populate-sysroot"
104do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
105do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_HOST}/"
106do_populate_sysroot[stamp-extra-info] = "${MACHINE}"
107
108python do_populate_sysroot_setscene () {
109 sstate_setscene(d)
110}
111addtask do_populate_sysroot_setscene
112
113
diff --git a/meta/classes/syslinux.bbclass b/meta/classes/syslinux.bbclass
new file mode 100644
index 0000000000..944bd92da2
--- /dev/null
+++ b/meta/classes/syslinux.bbclass
@@ -0,0 +1,181 @@
1# syslinux.bbclass
2# Copyright (C) 2004-2006, Advanced Micro Devices, Inc. All Rights Reserved
3# Released under the MIT license (see packages/COPYING)
4
5# Provide syslinux specific functions for building bootable images.
6
7# External variables
8# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
9# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
10# ${AUTO_SYSLINUXMENU} - set this to 1 to enable creating an automatic menu
11# ${LABELS} - a list of targets for the automatic config
12# ${APPEND} - an override list of append strings for each label
13# ${SYSLINUX_OPTS} - additional options to add to the syslinux file ';' delimited
14# ${SYSLINUX_SPLASH} - A background for the vga boot menu if using the boot menu
15# ${SYSLINUX_DEFAULT_CONSOLE} - set to "console=ttyX" to change kernel boot default console
16# ${SYSLINUX_SERIAL} - Set an alternate serial port or turn off serial with empty string
17# ${SYSLINUX_SERIAL_TTY} - Set alternate console=tty... kernel boot argument
18
19do_bootimg[depends] += "syslinux:do_populate_sysroot \
20 syslinux-native:do_populate_sysroot"
21
22SYSLINUXCFG = "${S}/syslinux.cfg"
23
24ISOLINUXDIR = "/isolinux"
25SYSLINUXDIR = "/"
26# The kernel has an internal default console, which you can override with
27# a console=...some_tty...
28SYSLINUX_DEFAULT_CONSOLE ?= ""
29SYSLINUX_SERIAL ?= "0 115200"
30SYSLINUX_SERIAL_TTY ?= "ttyS0,115200"
31ISO_BOOTIMG = "isolinux/isolinux.bin"
32ISO_BOOTCAT = "isolinux/boot.cat"
33MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table"
34APPEND_prepend = " ${SYSLINUX_ROOT} "
35
36syslinux_populate() {
37 DEST=$1
38 BOOTDIR=$2
39 CFGNAME=$3
40
41 install -d ${DEST}${BOOTDIR}
42
43 # Install the config files
44 install -m 0644 ${SYSLINUXCFG} ${DEST}${BOOTDIR}/${CFGNAME}
45 if [ "${AUTO_SYSLINUXMENU}" = 1 ] ; then
46 install -m 0644 ${STAGING_DATADIR}/syslinux/vesamenu.c32 ${DEST}${BOOTDIR}/vesamenu.c32
47 install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${DEST}${BOOTDIR}/libcom32.c32
48 install -m 0444 ${STAGING_DATADIR}/syslinux/libutil.c32 ${DEST}${BOOTDIR}/libutil.c32
49 if [ "${SYSLINUX_SPLASH}" != "" ] ; then
50 install -m 0644 ${SYSLINUX_SPLASH} ${DEST}${BOOTDIR}/splash.lss
51 fi
52 fi
53}
54
55syslinux_iso_populate() {
56 iso_dir=$1
57 syslinux_populate $iso_dir ${ISOLINUXDIR} isolinux.cfg
58 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin $iso_dir${ISOLINUXDIR}
59 install -m 0644 ${STAGING_DATADIR}/syslinux/ldlinux.c32 $iso_dir${ISOLINUXDIR}
60}
61
62syslinux_hddimg_populate() {
63 hdd_dir=$1
64 syslinux_populate $hdd_dir ${SYSLINUXDIR} syslinux.cfg
65 install -m 0444 ${STAGING_DATADIR}/syslinux/ldlinux.sys $hdd_dir${SYSLINUXDIR}/ldlinux.sys
66}
67
68syslinux_hddimg_install() {
69 syslinux ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
70}
71
72syslinux_hdddirect_install() {
73 DEST=$1
74 syslinux $DEST
75}
76
77python build_syslinux_cfg () {
78 import copy
79 import sys
80
81 workdir = d.getVar('WORKDIR', True)
82 if not workdir:
83 bb.error("WORKDIR not defined, unable to package")
84 return
85
86 labels = d.getVar('LABELS', True)
87 if not labels:
88 bb.debug(1, "LABELS not defined, nothing to do")
89 return
90
91 if labels == []:
92 bb.debug(1, "No labels, nothing to do")
93 return
94
95 cfile = d.getVar('SYSLINUXCFG', True)
96 if not cfile:
97 raise bb.build.FuncFailed('Unable to read SYSLINUXCFG')
98
99 try:
100 cfgfile = file(cfile, 'w')
101 except OSError:
102 raise bb.build.funcFailed('Unable to open %s' % (cfile))
103
104 cfgfile.write('# Automatically created by OE\n')
105
106 opts = d.getVar('SYSLINUX_OPTS', True)
107
108 if opts:
109 for opt in opts.split(';'):
110 cfgfile.write('%s\n' % opt)
111
112 cfgfile.write('ALLOWOPTIONS 1\n');
113 syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE', True)
114 syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY', True)
115 syslinux_serial = d.getVar('SYSLINUX_SERIAL', True)
116 if syslinux_serial:
117 cfgfile.write('SERIAL %s\n' % syslinux_serial)
118
119 menu = d.getVar('AUTO_SYSLINUXMENU', True)
120
121 if menu and syslinux_serial:
122 cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0]))
123 else:
124 cfgfile.write('DEFAULT %s\n' % (labels.split()[0]))
125
126 timeout = d.getVar('SYSLINUX_TIMEOUT', True)
127
128 if timeout:
129 cfgfile.write('TIMEOUT %s\n' % timeout)
130 else:
131 cfgfile.write('TIMEOUT 50\n')
132
133 prompt = d.getVar('SYSLINUX_PROMPT', True)
134 if prompt:
135 cfgfile.write('PROMPT %s\n' % prompt)
136 else:
137 cfgfile.write('PROMPT 1\n')
138
139 if menu:
140 cfgfile.write('ui vesamenu.c32\n')
141 cfgfile.write('menu title Select kernel options and boot kernel\n')
142 cfgfile.write('menu tabmsg Press [Tab] to edit, [Return] to select\n')
143 splash = d.getVar('SYSLINUX_SPLASH', True)
144 if splash:
145 cfgfile.write('menu background splash.lss\n')
146
147 for label in labels.split():
148 localdata = bb.data.createCopy(d)
149
150 overrides = localdata.getVar('OVERRIDES', True)
151 if not overrides:
152 raise bb.build.FuncFailed('OVERRIDES not defined')
153
154 localdata.setVar('OVERRIDES', label + ':' + overrides)
155 bb.data.update_data(localdata)
156
157 btypes = [ [ "", syslinux_default_console ] ]
158 if menu and syslinux_serial:
159 btypes = [ [ "Graphics console ", syslinux_default_console ],
160 [ "Serial console ", syslinux_serial_tty ] ]
161
162 for btype in btypes:
163 cfgfile.write('LABEL %s%s\nKERNEL /vmlinuz\n' % (btype[0], label))
164
165 append = localdata.getVar('APPEND', True)
166 initrd = localdata.getVar('INITRD', True)
167
168 if append:
169 cfgfile.write('APPEND ')
170
171 if initrd:
172 cfgfile.write('initrd=/initrd ')
173
174 cfgfile.write('LABEL=%s '% (label))
175
176 cfgfile.write('%s %s\n' % (append, btype[1]))
177 else:
178 cfgfile.write('APPEND %s\n' % btype[1])
179
180 cfgfile.close()
181}
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
new file mode 100644
index 0000000000..3700b2eee3
--- /dev/null
+++ b/meta/classes/systemd.bbclass
@@ -0,0 +1,193 @@
1# The list of packages that should have systemd packaging scripts added. For
2# each entry, optionally have a SYSTEMD_SERVICE_[package] that lists the service
3# files in this package. If this variable isn't set, [package].service is used.
4SYSTEMD_PACKAGES ?= "${PN}"
5SYSTEMD_PACKAGES_class-native ?= ""
6SYSTEMD_PACKAGES_class-nativesdk ?= ""
7
8# Whether to enable or disable the services on installation.
9SYSTEMD_AUTO_ENABLE ??= "enable"
10
11# This class will be included in any recipe that supports systemd init scripts,
12# even if the systemd DISTRO_FEATURE isn't enabled. As such don't make any
13# changes directly but check the DISTRO_FEATURES first.
14python __anonymous() {
15 features = d.getVar("DISTRO_FEATURES", True).split()
16 # If the distro features have systemd but not sysvinit, inhibit update-rcd
17 # from doing any work so that pure-systemd images don't have redundant init
18 # files.
19 if "systemd" in features:
20 d.appendVar("DEPENDS", " systemd-systemctl-native")
21 if "sysvinit" not in features:
22 d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1")
23}
24
25systemd_postinst() {
26OPTS=""
27
28if [ -n "$D" ]; then
29 OPTS="--root=$D"
30fi
31
32if type systemctl >/dev/null 2>/dev/null; then
33 systemctl $OPTS ${SYSTEMD_AUTO_ENABLE} ${SYSTEMD_SERVICE}
34
35 if [ -z "$D" -a "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
36 systemctl restart ${SYSTEMD_SERVICE}
37 fi
38fi
39}
40
41systemd_prerm() {
42if type systemctl >/dev/null 2>/dev/null; then
43 if [ -z "$D" ]; then
44 systemctl stop ${SYSTEMD_SERVICE}
45 fi
46
47 systemctl disable ${SYSTEMD_SERVICE}
48fi
49}
50
51
52systemd_populate_packages[vardeps] += "systemd_prerm systemd_postinst"
53
54python systemd_populate_packages() {
55 if "systemd" not in d.getVar("DISTRO_FEATURES", True).split():
56 return
57
58 def get_package_var(d, var, pkg):
59 val = (d.getVar('%s_%s' % (var, pkg), True) or "").strip()
60 if val == "":
61 val = (d.getVar(var, True) or "").strip()
62 return val
63
64 # Check if systemd-packages already included in PACKAGES
65 def systemd_check_package(pkg_systemd):
66 packages = d.getVar('PACKAGES', True)
67 if not pkg_systemd in packages.split():
68 bb.error('%s does not appear in package list, please add it' % pkg_systemd)
69
70
71 def systemd_generate_package_scripts(pkg):
72 bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
73
74 # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE_pkg
75 # variable.
76 localdata = d.createCopy()
77 localdata.prependVar("OVERRIDES", pkg + ":")
78 bb.data.update_data(localdata)
79
80 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
81 if not postinst:
82 postinst = '#!/bin/sh\n'
83 postinst += localdata.getVar('systemd_postinst', True)
84 d.setVar('pkg_postinst_%s' % pkg, postinst)
85
86 prerm = d.getVar('pkg_prerm_%s' % pkg, True)
87 if not prerm:
88 prerm = '#!/bin/sh\n'
89 prerm += localdata.getVar('systemd_prerm', True)
90 d.setVar('pkg_prerm_%s' % pkg, prerm)
91
92
93 # Add files to FILES_*-systemd if existent and not already done
94 def systemd_append_file(pkg_systemd, file_append):
95 appended = False
96 if os.path.exists(oe.path.join(d.getVar("D", True), file_append)):
97 var_name = "FILES_" + pkg_systemd
98 files = d.getVar(var_name, False) or ""
99 if file_append not in files.split():
100 d.appendVar(var_name, " " + file_append)
101 appended = True
102 return appended
103
104 # Add systemd files to FILES_*-systemd, parse for Also= and follow recursive
105 def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
106 # avoid infinite recursion
107 if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
108 fullpath = oe.path.join(d.getVar("D", True), path, service)
109 if service.find('.service') != -1:
110 # for *.service add *@.service
111 service_base = service.replace('.service', '')
112 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
113 if service.find('.socket') != -1:
114 # for *.socket add *.service and *@.service
115 service_base = service.replace('.socket', '')
116 systemd_add_files_and_parse(pkg_systemd, path, service_base + '.service', keys)
117 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
118 for key in keys.split():
119 # recurse all dependencies found in keys ('Also';'Conflicts';..) and add to files
120 cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, fullpath, key)
121 pipe = os.popen(cmd, 'r')
122 line = pipe.readline()
123 while line:
124 line = line.replace('\n', '')
125 systemd_add_files_and_parse(pkg_systemd, path, line, keys)
126 line = pipe.readline()
127 pipe.close()
128
129 # Check service-files and call systemd_add_files_and_parse for each entry
130 def systemd_check_services():
131 searchpaths = [oe.path.join(d.getVar("sysconfdir", True), "systemd", "system"),]
132 searchpaths.append(oe.path.join(d.getVar("nonarch_base_libdir", True), "systemd", "system"))
133 searchpaths.append(oe.path.join(d.getVar("exec_prefix", True), d.getVar("nonarch_base_libdir", True), "systemd", "system"))
134 systemd_packages = d.getVar('SYSTEMD_PACKAGES', True)
135 has_exactly_one_service = len(systemd_packages.split()) == 1
136 if has_exactly_one_service:
137 has_exactly_one_service = len(get_package_var(d, 'SYSTEMD_SERVICE', systemd_packages).split()) == 1
138
139 keys = 'Also' # Conflicts??
140 if has_exactly_one_service:
141 # single service gets also the /dev/null dummies
142 keys = 'Also Conflicts'
143 # scan for all in SYSTEMD_SERVICE[]
144 for pkg_systemd in systemd_packages.split():
145 for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split():
146 path_found = ''
147 for path in searchpaths:
148 if os.path.exists(oe.path.join(d.getVar("D", True), path, service)):
149 path_found = path
150 break
151 if path_found != '':
152 systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
153 else:
154 raise bb.build.FuncFailed("SYSTEMD_SERVICE_%s value %s does not exist" % \
155 (pkg_systemd, service))
156
157 # Run all modifications once when creating package
158 if os.path.exists(d.getVar("D", True)):
159 for pkg in d.getVar('SYSTEMD_PACKAGES', True).split():
160 systemd_check_package(pkg)
161 if d.getVar('SYSTEMD_SERVICE_' + pkg, True):
162 systemd_generate_package_scripts(pkg)
163 systemd_check_services()
164}
165
166PACKAGESPLITFUNCS_prepend = "systemd_populate_packages "
167
168python rm_systemd_unitdir (){
169 import shutil
170 if "systemd" not in d.getVar("DISTRO_FEATURES", True).split():
171 systemd_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_unitdir', True))
172 if os.path.exists(systemd_unitdir):
173 shutil.rmtree(systemd_unitdir)
174 systemd_libdir = os.path.dirname(systemd_unitdir)
175 if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)):
176 os.rmdir(systemd_libdir)
177}
178do_install[postfuncs] += "rm_systemd_unitdir "
179
180python rm_sysvinit_initddir (){
181 import shutil
182 sysv_initddir = oe.path.join(d.getVar("D", True), (d.getVar('INIT_D_DIR', True) or "/etc/init.d"))
183
184 if ("systemd" in d.getVar("DISTRO_FEATURES", True).split() and
185 "sysvinit" not in d.getVar("DISTRO_FEATURES", True).split() and
186 os.path.exists(sysv_initddir)):
187 systemd_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_unitdir', True), "system")
188
189 # If systemd_unitdir contains anything, delete sysv_initddir
190 if (os.path.exists(systemd_unitdir) and os.listdir(systemd_unitdir)):
191 shutil.rmtree(sysv_initddir)
192}
193do_install[postfuncs] += "rm_sysvinit_initddir "
diff --git a/meta/classes/terminal.bbclass b/meta/classes/terminal.bbclass
new file mode 100644
index 0000000000..efbc4eb9ae
--- /dev/null
+++ b/meta/classes/terminal.bbclass
@@ -0,0 +1,88 @@
1OE_TERMINAL ?= 'auto'
2OE_TERMINAL[type] = 'choice'
3OE_TERMINAL[choices] = 'auto none \
4 ${@" ".join(o.name \
5 for o in oe.terminal.prioritized())}'
6
7OE_TERMINAL_EXPORTS += 'EXTRA_OEMAKE'
8OE_TERMINAL_EXPORTS[type] = 'list'
9
10XAUTHORITY ?= "${HOME}/.Xauthority"
11SHELL ?= "bash"
12
13
14def emit_terminal_func(command, envdata, d):
15 cmd_func = 'do_terminal'
16
17 envdata.setVar(cmd_func, 'exec ' + command)
18 envdata.setVarFlag(cmd_func, 'func', 1)
19
20 runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}"
21 runfile = runfmt.format(func=cmd_func, task=cmd_func, taskfunc=cmd_func, pid=os.getpid())
22 runfile = os.path.join(d.getVar('T', True), runfile)
23 bb.utils.mkdirhier(os.path.dirname(runfile))
24
25 with open(runfile, 'w') as script:
26 script.write('#!/bin/sh -e\n')
27 bb.data.emit_func(cmd_func, script, envdata)
28 script.write(cmd_func)
29 script.write("\n")
30 os.chmod(runfile, 0755)
31
32 return runfile
33
34def oe_terminal(command, title, d):
35 import oe.data
36 import oe.terminal
37
38 envdata = bb.data.init()
39
40 for v in os.environ:
41 envdata.setVar(v, os.environ[v])
42 envdata.setVarFlag(v, 'export', 1)
43
44 for export in oe.data.typed_value('OE_TERMINAL_EXPORTS', d):
45 value = d.getVar(export, True)
46 if value is not None:
47 os.environ[export] = str(value)
48 envdata.setVar(export, str(value))
49 envdata.setVarFlag(export, 'export', 1)
50 if export == "PSEUDO_DISABLED":
51 if "PSEUDO_UNLOAD" in os.environ:
52 del os.environ["PSEUDO_UNLOAD"]
53 envdata.delVar("PSEUDO_UNLOAD")
54
55 # Add in all variables from the user's original environment which
56 # haven't subsequntly been set/changed
57 origbbenv = d.getVar("BB_ORIGENV", False) or {}
58 for key in origbbenv:
59 if key in envdata:
60 continue
61 value = origbbenv.getVar(key, True)
62 if value is not None:
63 os.environ[key] = str(value)
64 envdata.setVar(key, str(value))
65 envdata.setVarFlag(key, 'export', 1)
66
67 # Replace command with an executable wrapper script
68 command = emit_terminal_func(command, envdata, d)
69
70 terminal = oe.data.typed_value('OE_TERMINAL', d).lower()
71 if terminal == 'none':
72 bb.fatal('Devshell usage disabled with OE_TERMINAL')
73 elif terminal != 'auto':
74 try:
75 oe.terminal.spawn(terminal, command, title, None, d)
76 return
77 except oe.terminal.UnsupportedTerminal:
78 bb.warn('Unsupported terminal "%s", defaulting to "auto"' %
79 terminal)
80 except oe.terminal.ExecutionError as exc:
81 bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
82
83 try:
84 oe.terminal.spawn_preferred(command, title, None, d)
85 except oe.terminal.NoSupportedTerminals:
86 bb.fatal('No valid terminal found, unable to open devshell')
87 except oe.terminal.ExecutionError as exc:
88 bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
diff --git a/meta/classes/testimage-auto.bbclass b/meta/classes/testimage-auto.bbclass
new file mode 100644
index 0000000000..3d0e28994a
--- /dev/null
+++ b/meta/classes/testimage-auto.bbclass
@@ -0,0 +1,23 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5
6# Run tests automatically on an image after the image is constructed
7# (as opposed to testimage.bbclass alone where tests must be called
8# manually using bitbake -c testimage <image>).
9#
10# NOTE: to use this class, simply set TEST_IMAGE = "1" - no need to
11# inherit it since that will be done in image.bbclass when this variable
12# has been set.
13#
14# See testimage.bbclass for the test implementation.
15
16inherit testimage
17
18python do_testimage_auto() {
19 testimage_main(d)
20}
21addtask testimage_auto before do_build after do_rootfs
22do_testimage_auto[depends] += "qemu-native:do_populate_sysroot"
23do_testimage_auto[depends] += "qemu-helper-native:do_populate_sysroot"
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
new file mode 100644
index 0000000000..2f9c974554
--- /dev/null
+++ b/meta/classes/testimage.bbclass
@@ -0,0 +1,179 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5
6# testimage.bbclass enables testing of qemu images using python unittests.
7# Most of the tests are commands run on target image over ssh.
8# To use it add testimage to global inherit and call your target image with -c testimage
9# You can try it out like this:
10# - first build a qemu core-image-sato
11# - add INHERIT += "testimage" in local.conf
12# - then bitbake core-image-sato -c testimage. That will run a standard suite of tests.
13
14# You can set (or append to) TEST_SUITES in local.conf to select the tests
15# which you want to run for your target.
16# The test names are the module names in meta/lib/oeqa/runtime.
17# Each name in TEST_SUITES represents a required test for the image. (no skipping allowed)
18# Appending "auto" means that it will try to run all tests that are suitable for the image (each test decides that on it's own).
19# Note that order in TEST_SUITES is important (it's the order tests run) and it influences tests dependencies.
20# A layer can add its own tests in lib/oeqa/runtime, provided it extends BBPATH as normal in its layer.conf.
21
22# TEST_LOG_DIR contains a ssh log (what command is running, output and return codes) and a qemu boot log till login
23# Booting is handled by this class, and it's not a test in itself.
24# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt.
25
26TEST_LOG_DIR ?= "${WORKDIR}/testimage"
27
28DEFAULT_TEST_SUITES = "ping auto"
29DEFAULT_TEST_SUITES_pn-core-image-minimal = "ping"
30DEFAULT_TEST_SUITES_pn-core-image-sato = "ping ssh df connman syslog xorg scp vnc date rpm smart dmesg"
31DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "ping ssh df connman syslog xorg scp vnc date perl ldd gcc rpm smart dmesg"
32
33TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
34
35TEST_QEMUBOOT_TIMEOUT ?= "1000"
36
37python do_testimage() {
38 testimage_main(d)
39}
40addtask testimage
41do_testimage[nostamp] = "1"
42do_testimage[depends] += "qemu-native:do_populate_sysroot"
43do_testimage[depends] += "qemu-helper-native:do_populate_sysroot"
44
45
46def get_tests_list(d):
47 testsuites = d.getVar("TEST_SUITES", True).split()
48 bbpath = d.getVar("BBPATH", True).split(':')
49
50 # This relies on lib/ under each directory in BBPATH being added to sys.path
51 # (as done by default in base.bbclass)
52 testslist = []
53 for testname in testsuites:
54 if testname != "auto":
55 found = False
56 for p in bbpath:
57 if os.path.exists(os.path.join(p, 'lib', 'oeqa', 'runtime', testname + '.py')):
58 testslist.append("oeqa.runtime." + testname)
59 found = True
60 break
61 if not found:
62 bb.error('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname)
63
64 if "auto" in testsuites:
65 def add_auto_list(path):
66 if not os.path.exists(os.path.join(path, '__init__.py')):
67 bb.fatal('Tests directory %s exists but is missing __init__.py' % path)
68 files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
69 for f in files:
70 module = 'oeqa.runtime.' + f[:-3]
71 if module not in testslist:
72 testslist.append(module)
73
74 for p in bbpath:
75 testpath = os.path.join(p, 'lib', 'oeqa', 'runtime')
76 bb.debug(2, 'Searching for tests in %s' % testpath)
77 if os.path.exists(testpath):
78 add_auto_list(testpath)
79
80 return testslist
81
82def testimage_main(d):
83 import unittest
84 import os
85 import oeqa.runtime
86 import re
87 import shutil
88 import time
89 from oeqa.oetest import runTests
90 from oeqa.utils.sshcontrol import SSHControl
91 from oeqa.utils.qemurunner import QemuRunner
92
93 testdir = d.getVar("TEST_LOG_DIR", True)
94 bb.utils.mkdirhier(testdir)
95
96 # tests in TEST_SUITES become required tests
97 # they won't be skipped even if they aren't suitable for a image (like xorg for minimal)
98 # testslist is what we'll actually pass to the unittest loader
99 testslist = get_tests_list(d)
100 testsrequired = [t for t in d.getVar("TEST_SUITES", True).split() if t != "auto"]
101
102 class TestContext:
103 def __init__(self):
104 self.d = d
105 self.testslist = testslist
106 self.testsrequired = testsrequired
107 self.filesdir = os.path.join(os.path.dirname(os.path.abspath(oeqa.runtime.__file__)),"files")
108
109 # test context
110 tc = TestContext()
111
112 # prepare qemu instance
113 # and boot each supported fs type
114 machine=d.getVar("MACHINE", True)
115 #will handle fs type eventually, stick with ext3 for now
116 #make a copy of the original rootfs and use that for tests
117 origrootfs=os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME",True) + '.ext3')
118 testrootfs=os.path.join(testdir, d.getVar("IMAGE_LINK_NAME", True) + '-testimage.ext3')
119 try:
120 shutil.copyfile(origrootfs, testrootfs)
121 except Exception as e:
122 bb.fatal("Error copying rootfs: %s" % e)
123
124 try:
125 boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT", True))
126 except ValueError:
127 boottime = 1000
128
129 qemu = QemuRunner(machine=machine, rootfs=testrootfs,
130 tmpdir = d.getVar("TMPDIR", True),
131 deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE", True),
132 display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True),
133 logfile = os.path.join(testdir, "qemu_boot_log.%s" % d.getVar('DATETIME', True)),
134 boottime = boottime)
135
136 qemuloglink = os.path.join(testdir, "qemu_boot_log")
137 if os.path.islink(qemuloglink):
138 os.unlink(qemuloglink)
139 os.symlink(qemu.logfile, qemuloglink)
140
141 sshlog = os.path.join(testdir, "ssh_target_log.%s" % d.getVar('DATETIME', True))
142 sshloglink = os.path.join(testdir, "ssh_target_log")
143 if os.path.islink(sshloglink):
144 os.unlink(sshloglink)
145 os.symlink(sshlog, sshloglink)
146
147 bb.note("DISPLAY value: %s" % qemu.display)
148 bb.note("rootfs file: %s" % qemu.rootfs)
149 bb.note("Qemu log file: %s" % qemu.logfile)
150 bb.note("SSH log file: %s" % sshlog)
151
152 pn = d.getVar("PN", True)
153 #catch exceptions when loading or running tests (mostly our own errors)
154 try:
155 if qemu.launch():
156
157 # set more context - ssh instance and qemu
158 # we do these here because we needed qemu to boot and get the ip
159 tc.qemu = qemu
160 tc.target = SSHControl(host=qemu.ip,logfile=sshlog)
161 # run tests and get the results
162 starttime = time.time()
163 result = runTests(tc)
164 stoptime = time.time()
165 if result.wasSuccessful():
166 bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
167 msg = "%s - OK - All required tests passed" % pn
168 skipped = len(result.skipped)
169 if skipped:
170 msg += " (skipped=%d)" % skipped
171 bb.plain(msg)
172 else:
173 raise bb.build.FuncFailed("%s - FAILED - check the task log and the ssh log" % pn )
174 else:
175 raise bb.build.FuncFailed("%s - FAILED to start qemu - check the task log and the boot log" % pn)
176 finally:
177 qemu.kill()
178
179testimage_main[vardepsexclude] =+ "BB_ORIGENV"
diff --git a/meta/classes/tinderclient.bbclass b/meta/classes/tinderclient.bbclass
new file mode 100644
index 0000000000..6984efd1be
--- /dev/null
+++ b/meta/classes/tinderclient.bbclass
@@ -0,0 +1,368 @@
1def tinder_http_post(server, selector, content_type, body):
2 import httplib
3 # now post it
4 for i in range(0,5):
5 try:
6 h = httplib.HTTP(server)
7 h.putrequest('POST', selector)
8 h.putheader('content-type', content_type)
9 h.putheader('content-length', str(len(body)))
10 h.endheaders()
11 h.send(body)
12 errcode, errmsg, headers = h.getreply()
13 #print errcode, errmsg, headers
14 return (errcode,errmsg, headers, h.file)
15 except:
16 print "Error sending the report!"
17 # try again
18 pass
19
20 # return some garbage
21 return (-1, "unknown", "unknown", None)
22
23def tinder_form_data(bound, dict, log):
24 output = []
25 # for each key in the dictionary
26 for name in dict:
27 assert dict[name]
28 output.append( "--" + bound )
29 output.append( 'Content-Disposition: form-data; name="%s"' % name )
30 output.append( "" )
31 output.append( dict[name] )
32 if log:
33 output.append( "--" + bound )
34 output.append( 'Content-Disposition: form-data; name="log"; filename="log.txt"' )
35 output.append( '' )
36 output.append( log )
37 output.append( '--' + bound + '--' )
38 output.append( '' )
39
40 return "\r\n".join(output)
41
42def tinder_time_string():
43 """
44 Return the time as GMT
45 """
46 return ""
47
48def tinder_format_http_post(d,status,log):
49 """
50 Format the Tinderbox HTTP post with the data needed
51 for the tinderbox to be happy.
52 """
53
54 import random
55
56 # the variables we will need to send on this form post
57 variables = {
58 "tree" : d.getVar('TINDER_TREE', True),
59 "machine_name" : d.getVar('TINDER_MACHINE', True),
60 "os" : os.uname()[0],
61 "os_version" : os.uname()[2],
62 "compiler" : "gcc",
63 "clobber" : d.getVar('TINDER_CLOBBER', True) or "0",
64 "srcdate" : d.getVar('SRCDATE', True),
65 "PN" : d.getVar('PN', True),
66 "PV" : d.getVar('PV', True),
67 "PR" : d.getVar('PR', True),
68 "FILE" : d.getVar('FILE', True) or "N/A",
69 "TARGETARCH" : d.getVar('TARGET_ARCH', True),
70 "TARGETFPU" : d.getVar('TARGET_FPU', True) or "Unknown",
71 "TARGETOS" : d.getVar('TARGET_OS', True) or "Unknown",
72 "MACHINE" : d.getVar('MACHINE', True) or "Unknown",
73 "DISTRO" : d.getVar('DISTRO', True) or "Unknown",
74 "zecke-rocks" : "sure",
75 }
76
77 # optionally add the status
78 if status:
79 variables["status"] = str(status)
80
81 # try to load the machine id
82 # we only need on build_status.pl but sending it
83 # always does not hurt
84 try:
85 f = file(d.getVar('TMPDIR',True)+'/tinder-machine.id', 'r')
86 id = f.read()
87 variables['machine_id'] = id
88 except:
89 pass
90
91 # the boundary we will need
92 boundary = "----------------------------------%d" % int(random.random()*1000000000000)
93
94 # now format the body
95 body = tinder_form_data( boundary, variables, log )
96
97 return ("multipart/form-data; boundary=%s" % boundary),body
98
99
100def tinder_build_start(d):
101 """
102 Inform the tinderbox that a build is starting. We do this
103 by posting our name and tree to the build_start.pl script
104 on the server.
105 """
106
107 # get the body and type
108 content_type, body = tinder_format_http_post(d,None,None)
109 server = d.getVar('TINDER_HOST', True )
110 url = d.getVar('TINDER_URL', True )
111
112 selector = url + "/xml/build_start.pl"
113
114 #print "selector %s and url %s" % (selector, url)
115
116 # now post it
117 errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
118 #print errcode, errmsg, headers
119 report = h_file.read()
120
121 # now let us find the machine id that was assigned to us
122 search = "<machine id='"
123 report = report[report.find(search)+len(search):]
124 report = report[0:report.find("'")]
125
126 bb.note("Machine ID assigned by tinderbox: %s" % report )
127
128 # now we will need to save the machine number
129 # we will override any previous numbers
130 f = file(d.getVar('TMPDIR', True)+"/tinder-machine.id", 'w')
131 f.write(report)
132
133
134def tinder_send_http(d, status, _log):
135 """
136 Send this log as build status
137 """
138
139 # get the body and type
140 server = d.getVar('TINDER_HOST', True)
141 url = d.getVar('TINDER_URL', True)
142
143 selector = url + "/xml/build_status.pl"
144
145 # now post it - in chunks of 10.000 charachters
146 new_log = _log
147 while len(new_log) > 0:
148 content_type, body = tinder_format_http_post(d,status,new_log[0:18000])
149 errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
150 #print errcode, errmsg, headers
151 #print h.file.read()
152 new_log = new_log[18000:]
153
154
155def tinder_print_info(d):
156 """
157 Print the TinderBox Info
158 Including informations of the BaseSystem and the Tree
159 we use.
160 """
161
162 # get the local vars
163 time = tinder_time_string()
164 ops = os.uname()[0]
165 version = os.uname()[2]
166 url = d.getVar( 'TINDER_URL' , True )
167 tree = d.getVar( 'TINDER_TREE', True )
168 branch = d.getVar( 'TINDER_BRANCH', True )
169 srcdate = d.getVar( 'SRCDATE', True )
170 machine = d.getVar( 'MACHINE', True )
171 distro = d.getVar( 'DISTRO', True )
172 bbfiles = d.getVar( 'BBFILES', True )
173 tarch = d.getVar( 'TARGET_ARCH', True )
174 fpu = d.getVar( 'TARGET_FPU', True )
175 oerev = d.getVar( 'OE_REVISION', True ) or "unknown"
176
177 # there is a bug with tipple quoted strings
178 # i will work around but will fix the original
179 # bug as well
180 output = []
181 output.append("== Tinderbox Info" )
182 output.append("Time: %(time)s" )
183 output.append("OS: %(ops)s" )
184 output.append("%(version)s" )
185 output.append("Compiler: gcc" )
186 output.append("Tinderbox Client: 0.1" )
187 output.append("Tinderbox Client Last Modified: yesterday" )
188 output.append("Tinderbox Protocol: 0.1" )
189 output.append("URL: %(url)s" )
190 output.append("Tree: %(tree)s" )
191 output.append("Config:" )
192 output.append("branch = '%(branch)s'" )
193 output.append("TARGET_ARCH = '%(tarch)s'" )
194 output.append("TARGET_FPU = '%(fpu)s'" )
195 output.append("SRCDATE = '%(srcdate)s'" )
196 output.append("MACHINE = '%(machine)s'" )
197 output.append("DISTRO = '%(distro)s'" )
198 output.append("BBFILES = '%(bbfiles)s'" )
199 output.append("OEREV = '%(oerev)s'" )
200 output.append("== End Tinderbox Client Info" )
201
202 # now create the real output
203 return "\n".join(output) % vars()
204
205
206def tinder_print_env():
207 """
208 Print the environment variables of this build
209 """
210 time_start = tinder_time_string()
211 time_end = tinder_time_string()
212
213 # build the environment
214 env = ""
215 for var in os.environ:
216 env += "%s=%s\n" % (var, os.environ[var])
217
218 output = []
219 output.append( "---> TINDERBOX RUNNING env %(time_start)s" )
220 output.append( env )
221 output.append( "<--- TINDERBOX FINISHED (SUCCESS) %(time_end)s" )
222
223 return "\n".join(output) % vars()
224
225def tinder_tinder_start(d, event):
226 """
227 PRINT the configuration of this build
228 """
229
230 time_start = tinder_time_string()
231 config = tinder_print_info(d)
232 #env = tinder_print_env()
233 time_end = tinder_time_string()
234 packages = " ".join( event.getPkgs() )
235
236 output = []
237 output.append( "---> TINDERBOX PRINTING CONFIGURATION %(time_start)s" )
238 output.append( config )
239 #output.append( env )
240 output.append( "<--- TINDERBOX FINISHED PRINTING CONFIGURATION %(time_end)s" )
241 output.append( "---> TINDERBOX BUILDING '%(packages)s'" )
242 output.append( "<--- TINDERBOX STARTING BUILD NOW" )
243
244 output.append( "" )
245
246 return "\n".join(output) % vars()
247
248def tinder_do_tinder_report(event):
249 """
250 Report to the tinderbox:
251 On the BuildStart we will inform the box directly
252 On the other events we will write to the TINDER_LOG and
253 when the Task is finished we will send the report.
254
255 The above is not yet fully implemented. Currently we send
256 information immediately. The caching/queuing needs to be
257 implemented. Also sending more or less information is not
258 implemented yet.
259
260 We have two temporary files stored in the TMP directory. One file
261 contains the assigned machine id for the tinderclient. This id gets
262 assigned when we connect the box and start the build process the second
263 file is used to workaround an EventHandler limitation. If BitBake is ran
264 with the continue option we want the Build to fail even if we get the
265 BuildCompleted Event. In this case we have to look up the status and
266 send it instead of 100/success.
267 """
268 import glob
269
270 # variables
271 name = bb.event.getName(event)
272 log = ""
273 status = 1
274 # Check what we need to do Build* shows we start or are done
275 if name == "BuildStarted":
276 tinder_build_start(event.data)
277 log = tinder_tinder_start(event.data,event)
278
279 try:
280 # truncate the tinder log file
281 f = file(event.data.getVar('TINDER_LOG', True), 'w')
282 f.write("")
283 f.close()
284 except:
285 pass
286
287 try:
288 # write a status to the file. This is needed for the -k option
289 # of BitBake
290 g = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
291 g.write("")
292 g.close()
293 except IOError:
294 pass
295
296 # Append the Task-Log (compile,configure...) to the log file
297 # we will send to the server
298 if name == "TaskSucceeded" or name == "TaskFailed":
299 log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
300
301 if len(log_file) != 0:
302 to_file = event.data.getVar('TINDER_LOG', True)
303 log += "".join(open(log_file[0], 'r').readlines())
304
305 # set the right 'HEADER'/Summary for the TinderBox
306 if name == "TaskStarted":
307 log += "---> TINDERBOX Task %s started\n" % event.task
308 elif name == "TaskSucceeded":
309 log += "<--- TINDERBOX Task %s done (SUCCESS)\n" % event.task
310 elif name == "TaskFailed":
311 log += "<--- TINDERBOX Task %s failed (FAILURE)\n" % event.task
312 elif name == "PkgStarted":
313 log += "---> TINDERBOX Package %s started\n" % event.data.getVar('PF', True)
314 elif name == "PkgSucceeded":
315 log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % event.data.getVar('PF', True)
316 elif name == "PkgFailed":
317 if not event.data.getVar('TINDER_AUTOBUILD', True) == "0":
318 build.exec_task('do_clean', event.data)
319 log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % event.data.getVar('PF', True)
320 status = 200
321 # remember the failure for the -k case
322 h = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
323 h.write("200")
324 elif name == "BuildCompleted":
325 log += "Build Completed\n"
326 status = 100
327 # Check if we have a old status...
328 try:
329 h = file(event.data.getVar('TMPDIR',True)+'/tinder-status', 'r')
330 status = int(h.read())
331 except:
332 pass
333
334 elif name == "MultipleProviders":
335 log += "---> TINDERBOX Multiple Providers\n"
336 log += "multiple providers are available (%s);\n" % ", ".join(event.getCandidates())
337 log += "consider defining PREFERRED_PROVIDER_%s\n" % event.getItem()
338 log += "is runtime: %d\n" % event.isRuntime()
339 log += "<--- TINDERBOX Multiple Providers\n"
340 elif name == "NoProvider":
341 log += "Error: No Provider for: %s\n" % event.getItem()
342 log += "Error:Was Runtime: %d\n" % event.isRuntime()
343 status = 200
344 # remember the failure for the -k case
345 h = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
346 h.write("200")
347
348 # now post the log
349 if len(log) == 0:
350 return
351
352 # for now we will use the http post method as it is the only one
353 log_post_method = tinder_send_http
354 log_post_method(event.data, status, log)
355
356
357# we want to be an event handler
358addhandler tinderclient_eventhandler
359python tinderclient_eventhandler() {
360 if e.data is None or bb.event.getName(e) == "MsgNote":
361 return
362
363 do_tinder_report = e.data.getVar('TINDER_REPORT', True)
364 if do_tinder_report and do_tinder_report == "1":
365 tinder_do_tinder_report(e)
366
367 return
368}
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
new file mode 100644
index 0000000000..614f1c0ffd
--- /dev/null
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -0,0 +1,188 @@
1inherit siteinfo kernel-arch
2
3# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
4# doesn't always match our expectations... but we default to the stock value
5REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
6
7# This function creates an environment-setup-script for use in a deployable SDK
8toolchain_create_sdk_env_script () {
9 # Create environment setup script
10 script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}}
11 rm -f $script
12 touch $script
13 echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${bindir_nativesdk}/${TARGET_SYS}:$PATH' >> $script
14 echo 'export PKG_CONFIG_SYSROOT_DIR=${SDKTARGETSYSROOT}' >> $script
15 echo 'export PKG_CONFIG_PATH=${SDKTARGETSYSROOT}${libdir}/pkgconfig' >> $script
16 echo 'export CONFIG_SITE=${SDKPATH}/site-config-${REAL_MULTIMACH_TARGET_SYS}' >> $script
17 echo 'export CC="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=${SDKTARGETSYSROOT}"' >> $script
18 echo 'export CXX="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=${SDKTARGETSYSROOT}"' >> $script
19 echo 'export CPP="${TARGET_PREFIX}gcc -E ${TARGET_CC_ARCH} --sysroot=${SDKTARGETSYSROOT}"' >> $script
20 echo 'export AS="${TARGET_PREFIX}as ${TARGET_AS_ARCH}"' >> $script
21 echo 'export LD="${TARGET_PREFIX}ld ${TARGET_LD_ARCH} --sysroot=${SDKTARGETSYSROOT}"' >> $script
22 echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
23 echo 'export STRIP=${TARGET_PREFIX}strip' >> $script
24 echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
25 echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
26 echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
27 echo 'export AR=${TARGET_PREFIX}ar' >> $script
28 echo 'export NM=${TARGET_PREFIX}nm' >> $script
29 echo 'export M4=m4' >> $script
30 echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
31 echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=${SDKTARGETSYSROOT}"' >> $script
32 if [ "${TARGET_OS}" = "darwin8" ]; then
33 echo 'export TARGET_CFLAGS="-I${SDKTARGETSYSROOT}${includedir}"' >> $script
34 echo 'export TARGET_LDFLAGS="-L${SDKTARGETSYSROOT}${libdir}"' >> $script
35 # Workaround darwin toolchain sysroot path problems
36 cd ${SDK_OUTPUT}${SDKTARGETSYSROOT}/usr
37 ln -s /usr/local local
38 fi
39 echo 'export CFLAGS="${TARGET_CFLAGS}"' >> $script
40 echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script
41 echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script
42 echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script
43 echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script
44 echo 'export OECORE_TARGET_SYSROOT="${SDKTARGETSYSROOT}"' >> $script
45 echo 'export OECORE_ACLOCAL_OPTS="-I ${SDKPATHNATIVE}/usr/share/aclocal"' >> $script
46 echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
47 echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
48 echo 'export PYTHONHOME=${SDKPATHNATIVE}${prefix_nativesdk}' >> $script
49 echo 'export ARCH=${ARCH}' >> $script
50 echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script
51}
52
53# This function creates an environment-setup-script in the TMPDIR which enables
54# a OE-core IDE to integrate with the build tree
55toolchain_create_tree_env_script () {
56 script=${TMPDIR}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
57 rm -f $script
58 touch $script
59 echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${PATH}' >> $script
60 echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
61 echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
62
63 echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
64
65 echo 'export CC="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=${STAGING_DIR_TARGET}"' >> $script
66 echo 'export CXX="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=${STAGING_DIR_TARGET}"' >> $script
67 echo 'export CPP="${TARGET_PREFIX}gcc -E ${TARGET_CC_ARCH} --sysroot=${STAGING_DIR_TARGET}"' >> $script
68 echo 'export AS="${TARGET_PREFIX}as ${TARGET_AS_ARCH}"' >> $script
69 echo 'export LD="${TARGET_PREFIX}ld ${TARGET_LD_ARCH} --sysroot=${STAGING_DIR_TARGET}"' >> $script
70 echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
71 echo 'export STRIP=${TARGET_PREFIX}strip' >> $script
72 echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
73 echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
74 echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
75 echo 'export AR=${TARGET_PREFIX}ar' >> $script
76 echo 'export NM=${TARGET_PREFIX}nm' >> $script
77 echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
78 echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${BUILD_SYS} --with-libtool-sysroot=${STAGING_DIR_TARGET}"' >> $script
79 if [ "${TARGET_OS}" = "darwin8" ]; then
80 echo 'export TARGET_CFLAGS="-I${STAGING_DIR}${MACHINE}${includedir}"' >> $script
81 echo 'export TARGET_LDFLAGS="-L${STAGING_DIR}${MACHINE}${libdir}"' >> $script
82 # Workaround darwin toolchain sysroot path problems
83 cd ${SDK_OUTPUT}${SDKTARGETSYSROOT}/usr
84 ln -s /usr/local local
85 fi
86 echo 'export CFLAGS="${TARGET_CFLAGS}"' >> $script
87 echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script
88 echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script
89 echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script
90 echo 'export OECORE_NATIVE_SYSROOT="${STAGING_DIR_NATIVE}"' >> $script
91 echo 'export OECORE_TARGET_SYSROOT="${STAGING_DIR_TARGET}"' >> $script
92 echo 'export OECORE_ACLOCAL_OPTS="-I ${STAGING_DIR_NATIVE}/usr/share/aclocal"' >> $script
93 echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
94 echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
95 echo 'export ARCH=${ARCH}' >> $script
96 echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script
97}
98
99# This function creates an environment-setup-script for use by the ADT installer
100toolchain_create_sdk_env_script_for_installer () {
101 # Create environment setup script
102 local multimach_target_sys=$1
103 script=${SDK_OUTPUT}/${SDKPATH}/environment-setup-${multimach_target_sys}
104 rm -f $script
105 touch $script
106 echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${bindir_nativesdk}/${TARGET_SYS}:$PATH' >> $script
107 echo 'export PKG_CONFIG_SYSROOT_DIR=##SDKTARGETSYSROOT##' >> $script
108 echo 'export PKG_CONFIG_PATH=##SDKTARGETSYSROOT##${target_libdir}/pkgconfig' >> $script
109 echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
110 echo 'export CC="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=##SDKTARGETSYSROOT##"' >> $script
111 echo 'export CXX="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=##SDKTARGETSYSROOT##"' >> $script
112 echo 'export CPP="${TARGET_PREFIX}gcc -E ${TARGET_CC_ARCH} --sysroot=##SDKTARGETSYSROOT##"' >> $script
113 echo 'export AS="${TARGET_PREFIX}as ${TARGET_AS_ARCH}"' >> $script
114 echo 'export LD="${TARGET_PREFIX}ld ${TARGET_LD_ARCH} --sysroot=##SDKTARGETSYSROOT##"' >> $script
115 echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
116 echo 'export STRIP=${TARGET_PREFIX}strip' >> $script
117 echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
118 echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
119 echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
120 echo 'export AR=${TARGET_PREFIX}ar' >> $script
121 echo 'export NM=${TARGET_PREFIX}nm' >> $script
122 echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
123 echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=##SDKTARGETSYSROOT##"' >> $script
124 if [ "${TARGET_OS}" = "darwin8" ]; then
125 echo 'export TARGET_CFLAGS="-I##SDKTARGETSYSROOT##${target_includedir}"' >> $script
126 echo 'export TARGET_LDFLAGS="-L##SDKTARGETSYSROOT##{target_libdir}"' >> $script
127 # Workaround darwin toolchain sysroot path problems
128 cd ${SDK_OUTPUT}${SDKTARGETSYSROOT}/usr
129 ln -s /usr/local local
130 fi
131 echo 'export CFLAGS="${TARGET_CFLAGS}"' >> $script
132 echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script
133 echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script
134 echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script
135 echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script
136 echo 'export OECORE_TARGET_SYSROOT="##SDKTARGETSYSROOT##"' >> $script
137 echo 'export OECORE_ACLOCAL_OPTS="-I ${SDKPATHNATIVE}/usr/share/aclocal"' >> $script
138 echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
139 echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
140 echo 'export PYTHONHOME=${SDKPATHNATIVE}${prefix_nativesdk}' >> $script
141 echo 'export ARCH=${ARCH}' >> $script
142 echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script
143}
144
145#we get the cached site config in the runtime
146TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d, True)}"
147TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
148TOOLCHAIN_NEED_CONFIGSITE_CACHE = "${TCLIBC} ncurses"
149
150#This function create a site config file
151toolchain_create_sdk_siteconfig () {
152 local siteconfig=$1
153
154 rm -f $siteconfig
155 touch $siteconfig
156
157 for sitefile in ${TOOLCHAIN_CONFIGSITE_NOCACHE} ; do
158 cat $sitefile >> $siteconfig
159 done
160
161 #get cached site config
162 for sitefile in ${TOOLCHAIN_NEED_CONFIGSITE_CACHE}; do
163 if [ -r ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config ]; then
164 cat ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config >> $siteconfig
165 fi
166 done
167}
168# The immediate expansion above can result in unwanted path dependencies here
169toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE"
170
171#This function create a version information file
172toolchain_create_sdk_version () {
173 local versionfile=$1
174 rm -f $versionfile
175 touch $versionfile
176 echo 'Distro: ${DISTRO}' >> $versionfile
177 echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile
178 echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile
179 echo 'Timestamp: ${DATETIME}' >> $versionfile
180}
181toolchain_create_sdk_version[vardepsexclude] = "DATETIME"
182
183python __anonymous () {
184 deps = ""
185 for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE', True) or "").split():
186 deps += " %s:do_populate_sysroot" % dep
187 d.appendVarFlag('do_configure', 'depends', deps)
188}
diff --git a/meta/classes/typecheck.bbclass b/meta/classes/typecheck.bbclass
new file mode 100644
index 0000000000..72da932232
--- /dev/null
+++ b/meta/classes/typecheck.bbclass
@@ -0,0 +1,12 @@
1# Check types of bitbake configuration variables
2#
3# See oe.types for details.
4
5python check_types() {
6 import oe.types
7 for key in e.data.keys():
8 if e.data.getVarFlag(key, "type"):
9 oe.data.typed_value(key, e.data)
10}
11addhandler check_types
12check_types[eventmask] = "bb.event.ConfigParsed"
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
new file mode 100644
index 0000000000..5068f49bfc
--- /dev/null
+++ b/meta/classes/uboot-config.bbclass
@@ -0,0 +1,59 @@
1# Handle U-Boot config for a machine
2#
3# The format to specify it, in the machine, is:
4#
5# UBOOT_CONFIG ??= <default>
6# UBOOT_CONFIG[foo] = "config,images"
7#
8# or
9#
10# UBOOT_MACHINE = "config"
11#
12# Copyright 2013 (C) O.S. Systems Software LTDA.
13
14python () {
15 ubootmachine = d.getVar("UBOOT_MACHINE", True)
16 ubootconfigflags = d.getVarFlags('UBOOT_CONFIG')
17
18 if not ubootmachine and not ubootconfigflags:
19 PN = d.getVar("PN", True)
20 FILE = os.path.basename(d.getVar("FILE", True))
21 bb.debug(1, "To build %s, see %s for instructions on \
22 setting up your machine config" % (PN, FILE))
23 raise bb.parse.SkipPackage("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE", True))
24
25 if ubootmachine and ubootconfigflags:
26 raise bb.parse.SkipPackage("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
27
28 if not ubootconfigflags:
29 return
30
31 ubootconfig = (d.getVar('UBOOT_CONFIG', True) or "").split()
32 if len(ubootconfig) > 1:
33 raise bb.parse.SkipPackage('You can only have a single default for UBOOT_CONFIG.')
34 elif len(ubootconfig) == 0:
35 raise bb.parse.SkipPackage('You must set a default in UBOOT_CONFIG.')
36 ubootconfig = ubootconfig[0]
37
38 for f, v in ubootconfigflags.items():
39 if f == 'defaultval':
40 continue
41
42 items = v.split(',')
43 if items[0] and len(items) > 2:
44 raise bb.parse.SkipPackage('Only config,images can be specified!')
45
46 if ubootconfig == f:
47 bb.debug(1, "Setting UBOOT_MACHINE to %s." % items[0])
48 d.setVar('UBOOT_MACHINE', items[0])
49
50 # IMAGE_FSTYPES appending
51 if len(items) > 1 and items[1]:
52 bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
53 d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
54
55 # Go out as we found a match!
56 break
57 else:
58 raise ValueError("UBOOT_CONFIG %s is not supported" % ubootconfig)
59}
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
new file mode 100644
index 0000000000..f75f5b6115
--- /dev/null
+++ b/meta/classes/update-alternatives.bbclass
@@ -0,0 +1,267 @@
1# This class is used to help the alternatives system which is useful when
2# multiple sources provide same command. You can use update-alternatives
3# command directly in your recipe, but in most cases this class simplifies
4# that job.
5#
6# To use this class a number of variables should be defined:
7#
8# List all of the alternatives needed by a package:
9# ALTERNATIVE_<pkg> = "name1 name2 name3 ..."
10#
11# i.e. ALTERNATIVE_busybox = "sh sed test bracket"
12#
13# The pathname of the link
14# ALTERNATIVE_LINK_NAME[name] = "target"
15#
16# This is the name of the binary once it's been installed onto the runtime.
17# This name is global to all split packages in this recipe, and should match
18# other recipes with the same functionality.
19# i.e. ALTERNATIVE_LINK_NAME[bracket] = "/usr/bin/["
20#
21# NOTE: If ALTERNATIVE_LINK_NAME is not defined, it defaults to ${bindir}/name
22#
23# The default link to create for all targets
24# ALTERNATIVE_TARGET = "target"
25#
26# This is useful in a multicall binary case
27# i.e. ALTERNATIVE_TARGET = "/bin/busybox"
28#
29# A non-default link to create for a target
30# ALTERNATIVE_TARGET[name] = "target"
31#
32# This is the name of the binary as it's been install by do_install
33# i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash"
34#
35# A package specific link for a target
36# ALTERNATIVE_TARGET_<pkg>[name] = "target"
37#
38# This is useful when a recipe provides multiple alternatives for the
39# same item.
40#
41# NOTE: If ALTERNATIVE_TARGET is not defined, it will inherit the value
42# from ALTERNATIVE_LINK_NAME.
43#
44# NOTE: If the ALTERNATIVE_LINK_NAME and ALTERNATIVE_TARGET are the same,
45# ALTERNATIVE_TARGET will have '.{BPN}' appended to it. If the file
46# referenced has not been renamed, it will also be renamed. (This avoids
47# the need to rename alternative files in the do_install step, but still
48# supports it if necessary for some reason.)
49#
50# The default priority for any alternatives
51# ALTERNATIVE_PRIORITY = "priority"
52#
53# i.e. default is ALTERNATIVE_PRIORITY = "10"
54#
55# The non-default priority for a specific target
56# ALTERNATIVE_PRIORITY[name] = "priority"
57#
58# The package priority for a specific target
59# ALTERNATIVE_PRIORITY_<pkg>[name] = "priority"
60
61ALTERNATIVE_PRIORITY = "10"
62
63# We need special processing for vardeps because it can not work on
64# modified flag values. So we agregate the flags into a new variable
65# and include that vairable in the set.
66UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
67
68def gen_updatealternativesvardeps(d):
69 pkgs = (d.getVar("PACKAGES", True) or "").split()
70 vars = (d.getVar("UPDALTVARS", True) or "").split()
71
72 # First compute them for non_pkg versions
73 for v in vars:
74 for flag in (d.getVarFlags(v) or {}):
75 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
76 continue
77 d.appendVar('%s_VARDEPS' % (v), ' %s:%s' % (flag, d.getVarFlag(v, flag, False)))
78
79 for p in pkgs:
80 for v in vars:
81 for flag in (d.getVarFlags("%s_%s" % (v,p)) or {}):
82 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
83 continue
84 d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
85
86def ua_extend_depends(d):
87 if not 'virtual/update-alternatives' in d.getVar('PROVIDES', True):
88 d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives')
89
90python __anonymous() {
91 # Update Alternatives only works on target packages...
92 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or \
93 bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or \
94 bb.data.inherits_class('cross-canadian', d):
95 return
96
97 # compute special vardeps
98 gen_updatealternativesvardeps(d)
99
100 # extend the depends to include virtual/update-alternatives
101 ua_extend_depends(d)
102}
103
104def gen_updatealternativesvars(d):
105 ret = []
106 pkgs = (d.getVar("PACKAGES", True) or "").split()
107 vars = (d.getVar("UPDALTVARS", True) or "").split()
108
109 for v in vars:
110 ret.append(v + "_VARDEPS")
111
112 for p in pkgs:
113 for v in vars:
114 ret.append(v + "_" + p)
115 ret.append(v + "_VARDEPS_" + p)
116 return " ".join(ret)
117
118# Now the new stuff, we use a custom function to generate the right values
119populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}"
120
121# We need to do the rename after the image creation step, but before
122# the split and strip steps.. packagecopy seems to be the earliest reasonable
123# place.
124python perform_packagecopy_append () {
125 # Check for deprecated usage...
126 pn = d.getVar('BPN', True)
127 if d.getVar('ALTERNATIVE_LINKS', True) != None:
128 bb.fatal('%s: Use of ALTERNATIVE_LINKS/ALTERNATIVE_PATH/ALTERNATIVE_NAME is no longer supported, please convert to the updated syntax, see update-alternatives.bbclass for more info.' % pn)
129
130 # Do actual update alternatives processing
131 pkgdest = d.getVar('PKGD', True)
132 for pkg in (d.getVar('PACKAGES', True) or "").split():
133 # If the src == dest, we know we need to rename the dest by appending ${BPN}
134 link_rename = {}
135 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
136 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
137 if not alt_link:
138 alt_link = "%s/%s" % (d.getVar('bindir', True), alt_name)
139 d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link)
140
141 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
142 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
143 # Sometimes alt_target is specified as relative to the link name.
144 alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
145
146 # If the link and target are the same name, we need to rename the target.
147 if alt_link == alt_target:
148 src = '%s/%s' % (pkgdest, alt_target)
149 alt_target_rename = '%s.%s' % (alt_target, pn)
150 dest = '%s/%s' % (pkgdest, alt_target_rename)
151 if os.path.lexists(dest):
152 bb.note('%s: Already renamed: %s' % (pn, alt_target_rename))
153 elif os.path.lexists(src):
154 if os.path.islink(src):
155 # Delay rename of links
156 link_rename[alt_target] = alt_target_rename
157 else:
158 bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename))
159 os.rename(src, dest)
160 else:
161 bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename))
162 continue
163 d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, alt_target_rename)
164
165 # Process delayed link names
166 # Do these after other renames so we can correct broken links
167 for alt_target in link_rename:
168 src = '%s/%s' % (pkgdest, alt_target)
169 dest = '%s/%s' % (pkgdest, link_rename[alt_target])
170 link = os.readlink(src)
171 link_target = oe.path.realpath(src, pkgdest, True)
172
173 if os.path.lexists(link_target):
174 # Ok, the link_target exists, we can rename
175 bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, link_rename[alt_target]))
176 os.rename(src, dest)
177 else:
178 # Try to resolve the broken link to link.${BPN}
179 link_maybe = '%s.%s' % (os.readlink(src), pn)
180 if os.path.lexists(os.path.join(os.path.dirname(src), link_maybe)):
181 # Ok, the renamed link target exists.. create a new link, and remove the original
182 bb.note('%s: Creating new link %s -> %s' % (pn, link_rename[alt_target], link_maybe))
183 os.symlink(link_maybe, dest)
184 os.unlink(src)
185 else:
186 bb.warn('%s: Unable to resolve dangling symlink: %s' % (pn, alt_target))
187}
188
189PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives "
190
191python populate_packages_updatealternatives () {
192 pn = d.getVar('BPN', True)
193
194 # Do actual update alternatives processing
195 pkgdest = d.getVar('PKGD', True)
196 for pkg in (d.getVar('PACKAGES', True) or "").split():
197 # Create post install/removal scripts
198 alt_setup_links = ""
199 alt_remove_links = ""
200 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
201 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
202 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
203 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
204 # Sometimes alt_target is specified as relative to the link name.
205 alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
206
207 alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name, True)
208 alt_priority = alt_priority or d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg, True) or d.getVar('ALTERNATIVE_PRIORITY', True)
209
210 # This shouldn't trigger, as it should have been resolved earlier!
211 if alt_link == alt_target:
212 bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
213 alt_target = '%s.%s' % (alt_target, pn)
214
215 if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
216 bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
217 continue
218
219 # Default to generate shell script.. eventually we may want to change this...
220 alt_target = os.path.normpath(alt_target)
221
222 alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
223 alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target)
224
225 if alt_setup_links:
226 # RDEPENDS setup
227 provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives', True)
228 if provider:
229 #bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
230 d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX') + provider)
231
232 bb.note('adding update-alternatives calls to postinst/postrm for %s' % pkg)
233 bb.note('%s' % alt_setup_links)
234 postinst = d.getVar('pkg_postinst_%s' % pkg, True) or '#!/bin/sh\n'
235 postinst += alt_setup_links
236 d.setVar('pkg_postinst_%s' % pkg, postinst)
237
238 bb.note('%s' % alt_remove_links)
239 postrm = d.getVar('pkg_postrm_%s' % pkg, True) or '#!/bin/sh\n'
240 postrm += alt_remove_links
241 d.setVar('pkg_postrm_%s' % pkg, postrm)
242}
243
244python package_do_filedeps_append () {
245 pn = d.getVar('BPN', True)
246 pkgdest = d.getVar('PKGDEST', True)
247
248 for pkg in packages.split():
249 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
250 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
251 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
252 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
253
254 if alt_link == alt_target:
255 bb.warn('alt_link == alt_target: %s == %s' % (alt_link, alt_target))
256 alt_target = '%s.%s' % (alt_target, pn)
257
258 if not os.path.lexists('%s/%s/%s' % (pkgdest, pkg, alt_target)):
259 continue
260
261 # Add file provide
262 trans_target = oe.package.file_translate(alt_target)
263 d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link)
264 if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg, True) or ""):
265 d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target)
266}
267
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
new file mode 100644
index 0000000000..f726f2f4b1
--- /dev/null
+++ b/meta/classes/update-rc.d.bbclass
@@ -0,0 +1,103 @@
1UPDATERCPN ?= "${PN}"
2
3DEPENDS_append = " update-rc.d-native"
4UPDATERCD = "update-rc.d"
5UPDATERCD_virtclass-cross = ""
6UPDATERCD_class-native = ""
7UPDATERCD_class-nativesdk = ""
8
9RRECOMMENDS_${UPDATERCPN}_append = " ${UPDATERCD}"
10
11INITSCRIPT_PARAMS ?= "defaults"
12
13INIT_D_DIR = "${sysconfdir}/init.d"
14
15updatercd_postinst() {
16if test "x$D" != "x"; then
17 OPT="-r $D"
18else
19 OPT="-s"
20fi
21if type update-rc.d >/dev/null 2>/dev/null; then
22 update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
23fi
24}
25
26updatercd_prerm() {
27if test "x$D" = "x"; then
28 ${INIT_D_DIR}/${INITSCRIPT_NAME} stop
29fi
30}
31
32updatercd_postrm() {
33if test "$D" != ""; then
34 OPT="-f -r $D"
35else
36 OPT=""
37fi
38if type update-rc.d >/dev/null 2>/dev/null; then
39 update-rc.d $OPT ${INITSCRIPT_NAME} remove
40fi
41}
42
43
44def update_rc_after_parse(d):
45 if d.getVar('INITSCRIPT_PACKAGES') == None:
46 if d.getVar('INITSCRIPT_NAME') == None:
47 raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE'))
48 if d.getVar('INITSCRIPT_PARAMS') == None:
49 raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE'))
50
51python __anonymous() {
52 update_rc_after_parse(d)
53}
54
55PACKAGESPLITFUNCS_prepend = "populate_packages_updatercd "
56
57populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_postinst"
58
59python populate_packages_updatercd () {
60 def update_rcd_package(pkg):
61 bb.debug(1, 'adding update-rc.d calls to postinst/postrm for %s' % pkg)
62 """
63 update_rc.d postinst is appended here because pkg_postinst may require to
64 execute on the target. Not doing so may cause update_rc.d postinst invoked
65 twice to cause unwanted warnings.
66 """
67
68 localdata = bb.data.createCopy(d)
69 overrides = localdata.getVar("OVERRIDES", True)
70 localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides))
71 bb.data.update_data(localdata)
72
73 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
74 if not postinst:
75 postinst = '#!/bin/sh\n'
76 postinst += localdata.getVar('updatercd_postinst', True)
77 d.setVar('pkg_postinst_%s' % pkg, postinst)
78
79 prerm = d.getVar('pkg_prerm_%s' % pkg, True)
80 if not prerm:
81 prerm = '#!/bin/sh\n'
82 prerm += localdata.getVar('updatercd_prerm', True)
83 d.setVar('pkg_prerm_%s' % pkg, prerm)
84
85 postrm = d.getVar('pkg_postrm_%s' % pkg, True)
86 if not postrm:
87 postrm = '#!/bin/sh\n'
88 postrm += localdata.getVar('updatercd_postrm', True)
89 d.setVar('pkg_postrm_%s' % pkg, postrm)
90
91 # Check that this class isn't being inhibited (generally, by
92 # systemd.bbclass) before doing any work.
93 if "sysvinit" in d.getVar("DISTRO_FEATURES").split() or \
94 not d.getVar("INHIBIT_UPDATERCD_BBCLASS", True):
95 pkgs = d.getVar('INITSCRIPT_PACKAGES', True)
96 if pkgs == None:
97 pkgs = d.getVar('UPDATERCPN', True)
98 packages = (d.getVar('PACKAGES', True) or "").split()
99 if not pkgs in packages and packages != []:
100 pkgs = packages[0]
101 for pkg in pkgs.split():
102 update_rcd_package(pkg)
103}
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
new file mode 100644
index 0000000000..a850e9db6a
--- /dev/null
+++ b/meta/classes/useradd.bbclass
@@ -0,0 +1,193 @@
1inherit useradd_base
2
3# base-passwd-cross provides the default passwd and group files in the
4# target sysroot, and shadow -native and -sysroot provide the utilities
5# and support files needed to add and modify user and group accounts
6DEPENDS_append = "${USERADDDEPENDS}"
7USERADDDEPENDS = " base-passwd shadow-native shadow-sysroot shadow"
8USERADDDEPENDS_virtclass-cross = ""
9USERADDDEPENDS_class-native = ""
10USERADDDEPENDS_class-nativesdk = ""
11
12# This preinstall function can be run in four different contexts:
13#
14# a) Before do_install
15# b) At do_populate_sysroot_setscene when installing from sstate packages
16# c) As the preinst script in the target package at do_rootfs time
17# d) As the preinst script in the target package on device as a package upgrade
18#
19useradd_preinst () {
20OPT=""
21SYSROOT=""
22
23if test "x$D" != "x"; then
24 # Installing into a sysroot
25 SYSROOT="$D"
26 OPT="--root $D"
27
28 # Add groups and users defined for all recipe packages
29 GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
30 USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
31 GROUPMEMS_PARAM="${@get_all_cmd_params(d, 'groupmems')}"
32else
33 # Installing onto a target
34 # Add groups and users defined only for this package
35 GROUPADD_PARAM="${GROUPADD_PARAM}"
36 USERADD_PARAM="${USERADD_PARAM}"
37 GROUPMEMS_PARAM="${GROUPMEMS_PARAM}"
38fi
39
40# Perform group additions first, since user additions may depend
41# on these groups existing
42if test "x$GROUPADD_PARAM" != "x"; then
43 echo "Running groupadd commands..."
44 # Invoke multiple instances of groupadd for parameter lists
45 # separated by ';'
46 opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1`
47 remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2-`
48 while test "x$opts" != "x"; do
49 perform_groupadd "$SYSROOT" "$OPT $opts" 10
50 if test "x$opts" = "x$remaining"; then
51 break
52 fi
53 opts=`echo "$remaining" | cut -d ';' -f 1`
54 remaining=`echo "$remaining" | cut -d ';' -f 2-`
55 done
56fi
57
58if test "x$USERADD_PARAM" != "x"; then
59 echo "Running useradd commands..."
60 # Invoke multiple instances of useradd for parameter lists
61 # separated by ';'
62 opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1`
63 remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2-`
64 while test "x$opts" != "x"; do
65 perform_useradd "$SYSROOT" "$OPT $opts" 10
66 if test "x$opts" = "x$remaining"; then
67 break
68 fi
69 opts=`echo "$remaining" | cut -d ';' -f 1`
70 remaining=`echo "$remaining" | cut -d ';' -f 2-`
71 done
72fi
73
74if test "x$GROUPMEMS_PARAM" != "x"; then
75 echo "Running groupmems commands..."
76 # Invoke multiple instances of groupmems for parameter lists
77 # separated by ';'
78 opts=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 1`
79 remaining=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 2-`
80 while test "x$opts" != "x"; do
81 perform_groupmems "$SYSROOT" "$OPT $opts" 10
82 if test "x$opts" = "x$remaining"; then
83 break
84 fi
85 opts=`echo "$remaining" | cut -d ';' -f 1`
86 remaining=`echo "$remaining" | cut -d ';' -f 2-`
87 done
88fi
89}
90
91useradd_sysroot () {
92 # Pseudo may (do_install) or may not (do_populate_sysroot_setscene) be running
93 # at this point so we're explicit about the environment so pseudo can load if
94 # not already present.
95 export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir}/pseudo"
96
97 # Explicitly set $D since it isn't set to anything
98 # before do_install
99 D=${STAGING_DIR_TARGET}
100 useradd_preinst
101}
102
103useradd_sysroot_sstate () {
104 if [ "${BB_CURRENTTASK}" = "package_setscene" -o "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]
105 then
106 useradd_sysroot
107 fi
108}
109
110do_install[prefuncs] += "${SYSROOTFUNC}"
111SYSROOTFUNC = "useradd_sysroot"
112SYSROOTFUNC_virtclass-cross = ""
113SYSROOTFUNC_class-native = ""
114SYSROOTFUNC_class-nativesdk = ""
115SSTATEPREINSTFUNCS += "${SYSROOTPOSTFUNC}"
116SYSROOTPOSTFUNC = "useradd_sysroot_sstate"
117SYSROOTPOSTFUNC_virtclass-cross = ""
118SYSROOTPOSTFUNC_class-native = ""
119SYSROOTPOSTFUNC_class-nativesdk = ""
120
121USERADDSETSCENEDEPS = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
122USERADDSETSCENEDEPS_virtclass-cross = ""
123USERADDSETSCENEDEPS_class-native = ""
124USERADDSETSCENEDEPS_class-nativesdk = ""
125do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
126do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}"
127
128# Recipe parse-time sanity checks
129def update_useradd_after_parse(d):
130 useradd_packages = d.getVar('USERADD_PACKAGES', True)
131
132 if not useradd_packages:
133 raise bb.build.FuncFailed("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE'))
134
135 for pkg in useradd_packages.split():
136 if not d.getVar('USERADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg, True):
137 bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE'), pkg))
138
139python __anonymous() {
140 update_useradd_after_parse(d)
141}
142
143# Return a single [GROUP|USER]ADD_PARAM formatted string which includes the
144# [group|user]add parameters for all USERADD_PACKAGES in this recipe
145def get_all_cmd_params(d, cmd_type):
146 import string
147
148 param_type = cmd_type.upper() + "_PARAM_%s"
149 params = []
150
151 useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
152 for pkg in useradd_packages.split():
153 param = d.getVar(param_type % pkg, True)
154 if param:
155 params.append(param)
156
157 return "; ".join(params)
158
159# Adds the preinst script into generated packages
160fakeroot python populate_packages_prepend () {
161 def update_useradd_package(pkg):
162 bb.debug(1, 'adding user/group calls to preinst for %s' % pkg)
163
164 """
165 useradd preinst is appended here because pkg_preinst may be
166 required to execute on the target. Not doing so may cause
167 useradd preinst to be invoked twice, causing unwanted warnings.
168 """
169 preinst = d.getVar('pkg_preinst_%s' % pkg, True) or d.getVar('pkg_preinst', True)
170 if not preinst:
171 preinst = '#!/bin/sh\n'
172 preinst += 'bbnote () {\n%s}\n' % d.getVar('bbnote', True)
173 preinst += 'bbwarn () {\n%s}\n' % d.getVar('bbwarn', True)
174 preinst += 'bbfatal () {\n%s}\n' % d.getVar('bbfatal', True)
175 preinst += 'perform_groupadd () {\n%s}\n' % d.getVar('perform_groupadd', True)
176 preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd', True)
177 preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems', True)
178 preinst += d.getVar('useradd_preinst', True)
179 d.setVar('pkg_preinst_%s' % pkg, preinst)
180
181 # RDEPENDS setup
182 rdepends = d.getVar("RDEPENDS_%s" % pkg, True) or ""
183 rdepends += ' ' + d.getVar('MLPREFIX') + 'base-passwd'
184 rdepends += ' ' + d.getVar('MLPREFIX') + 'shadow'
185 d.setVar("RDEPENDS_%s" % pkg, rdepends)
186
187 # Add the user/group preinstall scripts and RDEPENDS requirements
188 # to packages specified by USERADD_PACKAGES
189 if not bb.data.inherits_class('nativesdk', d):
190 useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
191 for pkg in useradd_packages.split():
192 update_useradd_package(pkg)
193}
diff --git a/meta/classes/useradd_base.bbclass b/meta/classes/useradd_base.bbclass
new file mode 100644
index 0000000000..7aafe29a4a
--- /dev/null
+++ b/meta/classes/useradd_base.bbclass
@@ -0,0 +1,230 @@
1# This bbclass provides basic functionality for user/group settings.
2# This bbclass is intended to be inherited by useradd.bbclass and
3# extrausers.bbclass.
4
5# The following functions basically have similar logic.
6# *) Perform necessary checks before invoking the actual command
7# *) Invoke the actual command, make retries if necessary
8# *) Error out if an error occurs.
9
10# Note that before invoking these functions, make sure the global variable
11# PSEUDO is set up correctly.
12
13perform_groupadd () {
14 local rootdir="$1"
15 local opts="$2"
16 local retries="$3"
17 bbnote "Performing groupadd with [$opts] and $retries times of retry"
18 local groupname=`echo "$opts" | awk '{ print $NF }'`
19 local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
20 if test "x$group_exists" = "x"; then
21 local count=0
22 while true; do
23 eval $PSEUDO groupadd $opts || true
24 group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
25 if test "x$group_exists" = "x"; then
26 bbwarn "groupadd command did not succeed. Retrying..."
27 sleep 1
28 else
29 break
30 fi
31 count=`expr $count + 1`
32 if test $count = $retries; then
33 bbfatal "Tried running groupadd command $retries times without scucess, giving up"
34 fi
35 done
36 else
37 bbwarn "group $groupname already exists, not re-creating it"
38 fi
39}
40
41perform_useradd () {
42 local rootdir="$1"
43 local opts="$2"
44 local retries="$3"
45 bbnote "Performing useradd with [$opts] and $retries times of retry"
46 local username=`echo "$opts" | awk '{ print $NF }'`
47 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
48 if test "x$user_exists" = "x"; then
49 local count=0
50 while true; do
51 eval $PSEUDO useradd $opts || true
52 user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
53 if test "x$user_exists" = "x"; then
54 bbwarn "useradd command did not succeed. Retrying..."
55 sleep 1
56 else
57 break
58 fi
59 count=`expr $count + 1`
60 if test $count = $retries; then
61 bbfatal "Tried running useradd command $retries times without scucess, giving up"
62 fi
63 done
64 else
65 bbwarn "user $username already exists, not re-creating it"
66 fi
67}
68
69perform_groupmems () {
70 local rootdir="$1"
71 local opts="$2"
72 local retries="$3"
73 bbnote "Performing groupmems with [$opts] and $retries times of retry"
74 local groupname=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-g" || $i == "--group") print $(i+1) }'`
75 local username=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-a" || $i == "--add") print $(i+1) }'`
76 bbnote "Running groupmems command with group $groupname and user $username"
77 # groupmems fails if /etc/gshadow does not exist
78 local gshadow=""
79 if [ -f $rootdir${sysconfdir}/gshadow ]; then
80 gshadow="yes"
81 else
82 gshadow="no"
83 touch $rootdir${sysconfdir}/gshadow
84 fi
85 local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
86 if test "x$mem_exists" = "x"; then
87 local count=0
88 while true; do
89 eval $PSEUDO groupmems $opts || true
90 mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
91 if test "x$mem_exists" = "x"; then
92 bbwarn "groupmems command did not succeed. Retrying..."
93 sleep 1
94 else
95 break
96 fi
97 count=`expr $count + 1`
98 if test $count = $retries; then
99 if test "x$gshadow" = "xno"; then
100 rm -f $rootdir${sysconfdir}/gshadow
101 rm -f $rootdir${sysconfdir}/gshadow-
102 fi
103 bbfatal "Tried running groupmems command $retries times without scucess, giving up"
104 fi
105 done
106 else
107 bbwarn "group $groupname already contains $username, not re-adding it"
108 fi
109 if test "x$gshadow" = "xno"; then
110 rm -f $rootdir${sysconfdir}/gshadow
111 rm -f $rootdir${sysconfdir}/gshadow-
112 fi
113}
114
115perform_groupdel () {
116 local rootdir="$1"
117 local opts="$2"
118 local retries="$3"
119 bbnote "Performing groupdel with [$opts] and $retries times of retry"
120 local groupname=`echo "$opts" | awk '{ print $NF }'`
121 local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
122 if test "x$group_exists" != "x"; then
123 local count=0
124 while true; do
125 eval $PSEUDO groupdel $opts || true
126 group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
127 if test "x$group_exists" != "x"; then
128 bbwarn "groupdel command did not succeed. Retrying..."
129 sleep 1
130 else
131 break
132 fi
133 count=`expr $count + 1`
134 if test $count = $retries; then
135 bbfatal "Tried running groupdel command $retries times without scucess, giving up"
136 fi
137 done
138 else
139 bbwarn "group $groupname doesn't exist, not removing it"
140 fi
141}
142
143perform_userdel () {
144 local rootdir="$1"
145 local opts="$2"
146 local retries="$3"
147 bbnote "Performing userdel with [$opts] and $retries times of retry"
148 local username=`echo "$opts" | awk '{ print $NF }'`
149 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
150 if test "x$user_exists" != "x"; then
151 local count=0
152 while true; do
153 eval $PSEUDO userdel $opts || true
154 user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
155 if test "x$user_exists" != "x"; then
156 bbwarn "userdel command did not succeed. Retrying..."
157 sleep 1
158 else
159 break
160 fi
161 count=`expr $count + 1`
162 if test $count = $retries; then
163 bbfatal "Tried running userdel command $retries times without scucess, giving up"
164 fi
165 done
166 else
167 bbwarn "user $username doesn't exist, not removing it"
168 fi
169}
170
171perform_groupmod () {
172 # Other than the return value of groupmod, there's no simple way to judge whether the command
173 # succeeds, so we disable -e option temporarily
174 set +e
175 local rootdir="$1"
176 local opts="$2"
177 local retries="$3"
178 bbnote "Performing groupmod with [$opts] and $retries times of retry"
179 local groupname=`echo "$opts" | awk '{ print $NF }'`
180 local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
181 if test "x$group_exists" != "x"; then
182 local count=0
183 while true; do
184 eval $PSEUDO groupmod $opts
185 if test $? != 0; then
186 bbwarn "groupmod command did not succeed. Retrying..."
187 sleep 1
188 else
189 break
190 fi
191 count=`expr $count + 1`
192 if test $count = $retries; then
193 bbfatal "Tried running groupmod command $retries times without scucess, giving up"
194 fi
195 done
196 else
197 bbwarn "group $groupname doesn't exist, unable to modify it"
198 fi
199 set -e
200}
201
202perform_usermod () {
203 # Same reason with groupmod, temporarily disable -e option
204 set +e
205 local rootdir="$1"
206 local opts="$2"
207 local retries="$3"
208 bbnote "Performing usermod with [$opts] and $retries times of retry"
209 local username=`echo "$opts" | awk '{ print $NF }'`
210 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
211 if test "x$user_exists" != "x"; then
212 local count=0
213 while true; do
214 eval $PSEUDO usermod $opts
215 if test $? != 0; then
216 bbwarn "usermod command did not succeed. Retrying..."
217 sleep 1
218 else
219 break
220 fi
221 count=`expr $count + 1`
222 if test $count = $retries; then
223 bbfatal "Tried running usermod command $retries times without scucess, giving up"
224 fi
225 done
226 else
227 bbwarn "user $username doesn't exist, unable to modify it"
228 fi
229 set -e
230}
diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass
new file mode 100644
index 0000000000..507e0f1c94
--- /dev/null
+++ b/meta/classes/utility-tasks.bbclass
@@ -0,0 +1,62 @@
1addtask listtasks
2do_listtasks[nostamp] = "1"
3python do_listtasks() {
4 import sys
5 # emit variables and shell functions
6 #bb.data.emit_env(sys.__stdout__, d)
7 # emit the metadata which isnt valid shell
8 for e in d.keys():
9 if d.getVarFlag(e, 'task'):
10 bb.plain("%s" % e)
11}
12
13CLEANFUNCS ?= ""
14
15T_task-clean = "${LOG_DIR}/cleanlogs/${PN}"
16addtask clean
17do_clean[nostamp] = "1"
18python do_clean() {
19 """clear the build and temp directories"""
20 dir = d.expand("${WORKDIR}")
21 bb.note("Removing " + dir)
22 oe.path.remove(dir)
23
24 dir = "%s.*" % bb.data.expand(d.getVar('STAMP'), d)
25 bb.note("Removing " + dir)
26 oe.path.remove(dir)
27
28 for f in (d.getVar('CLEANFUNCS', True) or '').split():
29 bb.build.exec_func(f, d)
30}
31
32addtask checkuri
33do_checkuri[nostamp] = "1"
34python do_checkuri() {
35 src_uri = (d.getVar('SRC_URI', True) or "").split()
36 if len(src_uri) == 0:
37 return
38
39 localdata = bb.data.createCopy(d)
40 bb.data.update_data(localdata)
41
42 try:
43 fetcher = bb.fetch2.Fetch(src_uri, localdata)
44 fetcher.checkstatus()
45 except bb.fetch2.BBFetchException, e:
46 raise bb.build.FuncFailed(e)
47}
48
49addtask checkuriall after do_checkuri
50do_checkuriall[recrdeptask] = "do_checkuriall do_checkuri"
51do_checkuriall[recideptask] = "do_${BB_DEFAULT_TASK}"
52do_checkuriall[nostamp] = "1"
53do_checkuriall() {
54 :
55}
56
57addtask fetchall after do_fetch
58do_fetchall[recrdeptask] = "do_fetchall do_fetch"
59do_fetchall[recideptask] = "do_${BB_DEFAULT_TASK}"
60do_fetchall() {
61 :
62}
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
new file mode 100644
index 0000000000..0a533afb1f
--- /dev/null
+++ b/meta/classes/utils.bbclass
@@ -0,0 +1,368 @@
1# For compatibility
2def base_path_join(a, *p):
3 return oe.path.join(a, *p)
4
5def base_path_relative(src, dest):
6 return oe.path.relative(src, dest)
7
8def base_path_out(path, d):
9 return oe.path.format_display(path, d)
10
11def base_read_file(filename):
12 return oe.utils.read_file(filename)
13
14def base_ifelse(condition, iftrue = True, iffalse = False):
15 return oe.utils.ifelse(condition, iftrue, iffalse)
16
17def base_conditional(variable, checkvalue, truevalue, falsevalue, d):
18 return oe.utils.conditional(variable, checkvalue, truevalue, falsevalue, d)
19
20def base_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
21 return oe.utils.less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
22
23def base_version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
24 return oe.utils.version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
25
26def base_contains(variable, checkvalues, truevalue, falsevalue, d):
27 return oe.utils.contains(variable, checkvalues, truevalue, falsevalue, d)
28
29def base_both_contain(variable1, variable2, checkvalue, d):
30 return oe.utils.both_contain(variable1, variable2, checkvalue, d)
31
32def base_prune_suffix(var, suffixes, d):
33 return oe.utils.prune_suffix(var, suffixes, d)
34
35def oe_filter(f, str, d):
36 return oe.utils.str_filter(f, str, d)
37
38def oe_filter_out(f, str, d):
39 return oe.utils.str_filter_out(f, str, d)
40
41def machine_paths(d):
42 """List any existing machine specific filespath directories"""
43 machine = d.getVar("MACHINE", True)
44 filespathpkg = d.getVar("FILESPATHPKG", True).split(":")
45 for basepath in d.getVar("FILESPATHBASE", True).split(":"):
46 for pkgpath in filespathpkg:
47 machinepath = os.path.join(basepath, pkgpath, machine)
48 if os.path.isdir(machinepath):
49 yield machinepath
50
51def is_machine_specific(d):
52 """Determine whether the current recipe is machine specific"""
53 machinepaths = set(machine_paths(d))
54 srcuri = d.getVar("SRC_URI", True).split()
55 for url in srcuri:
56 fetcher = bb.fetch2.Fetch([srcuri], d)
57 if url.startswith("file://"):
58 if any(fetcher.localpath(url).startswith(mp + "/") for mp in machinepaths):
59 return True
60
61oe_soinstall() {
62 # Purpose: Install shared library file and
63 # create the necessary links
64 # Example:
65 #
66 # oe_
67 #
68 #bbnote installing shared library $1 to $2
69 #
70 libname=`basename $1`
71 install -m 755 $1 $2/$libname
72 sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
73 solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
74 ln -sf $libname $2/$sonamelink
75 ln -sf $libname $2/$solink
76}
77
78oe_libinstall() {
79 # Purpose: Install a library, in all its forms
80 # Example
81 #
82 # oe_libinstall libltdl ${STAGING_LIBDIR}/
83 # oe_libinstall -C src/libblah libblah ${D}/${libdir}/
84 dir=""
85 libtool=""
86 silent=""
87 require_static=""
88 require_shared=""
89 staging_install=""
90 while [ "$#" -gt 0 ]; do
91 case "$1" in
92 -C)
93 shift
94 dir="$1"
95 ;;
96 -s)
97 silent=1
98 ;;
99 -a)
100 require_static=1
101 ;;
102 -so)
103 require_shared=1
104 ;;
105 -*)
106 bbfatal "oe_libinstall: unknown option: $1"
107 ;;
108 *)
109 break;
110 ;;
111 esac
112 shift
113 done
114
115 libname="$1"
116 shift
117 destpath="$1"
118 if [ -z "$destpath" ]; then
119 bbfatal "oe_libinstall: no destination path specified"
120 fi
121 if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null
122 then
123 staging_install=1
124 fi
125
126 __runcmd () {
127 if [ -z "$silent" ]; then
128 echo >&2 "oe_libinstall: $*"
129 fi
130 $*
131 }
132
133 if [ -z "$dir" ]; then
134 dir=`pwd`
135 fi
136
137 dotlai=$libname.lai
138
139 # Sanity check that the libname.lai is unique
140 number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
141 if [ $number_of_files -gt 1 ]; then
142 bbfatal "oe_libinstall: $dotlai is not unique in $dir"
143 fi
144
145
146 dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
147 olddir=`pwd`
148 __runcmd cd $dir
149
150 lafile=$libname.la
151
152 # If such file doesn't exist, try to cut version suffix
153 if [ ! -f "$lafile" ]; then
154 libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
155 lafile1=$libname.la
156 if [ -f "$lafile1" ]; then
157 libname=$libname1
158 lafile=$lafile1
159 fi
160 fi
161
162 if [ -f "$lafile" ]; then
163 # libtool archive
164 eval `cat $lafile|grep "^library_names="`
165 libtool=1
166 else
167 library_names="$libname.so* $libname.dll.a $libname.*.dylib"
168 fi
169
170 __runcmd install -d $destpath/
171 dota=$libname.a
172 if [ -f "$dota" -o -n "$require_static" ]; then
173 rm -f $destpath/$dota
174 __runcmd install -m 0644 $dota $destpath/
175 fi
176 if [ -f "$dotlai" -a -n "$libtool" ]; then
177 rm -f $destpath/$libname.la
178 __runcmd install -m 0644 $dotlai $destpath/$libname.la
179 fi
180
181 for name in $library_names; do
182 files=`eval echo $name`
183 for f in $files; do
184 if [ ! -e "$f" ]; then
185 if [ -n "$libtool" ]; then
186 bbfatal "oe_libinstall: $dir/$f not found."
187 fi
188 elif [ -L "$f" ]; then
189 __runcmd cp -P "$f" $destpath/
190 elif [ ! -L "$f" ]; then
191 libfile="$f"
192 rm -f $destpath/$libfile
193 __runcmd install -m 0755 $libfile $destpath/
194 fi
195 done
196 done
197
198 if [ -z "$libfile" ]; then
199 if [ -n "$require_shared" ]; then
200 bbfatal "oe_libinstall: unable to locate shared library"
201 fi
202 elif [ -z "$libtool" ]; then
203 # special case hack for non-libtool .so.#.#.# links
204 baselibfile=`basename "$libfile"`
205 if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
206 sonamelink=`${HOST_PREFIX}readelf -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
207 solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
208 if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
209 __runcmd ln -sf $baselibfile $destpath/$sonamelink
210 fi
211 __runcmd ln -sf $baselibfile $destpath/$solink
212 fi
213 fi
214
215 __runcmd cd "$olddir"
216}
217
218oe_machinstall() {
219 # Purpose: Install machine dependent files, if available
220 # If not available, check if there is a default
221 # If no default, just touch the destination
222 # Example:
223 # $1 $2 $3 $4
224 # oe_machinstall -m 0644 fstab ${D}/etc/fstab
225 #
226 # TODO: Check argument number?
227 #
228 filename=`basename $3`
229 dirname=`dirname $3`
230
231 for o in `echo ${OVERRIDES} | tr ':' ' '`; do
232 if [ -e $dirname/$o/$filename ]; then
233 bbnote $dirname/$o/$filename present, installing to $4
234 install $1 $2 $dirname/$o/$filename $4
235 return
236 fi
237 done
238# bbnote overrides specific file NOT present, trying default=$3...
239 if [ -e $3 ]; then
240 bbnote $3 present, installing to $4
241 install $1 $2 $3 $4
242 else
243 bbnote $3 NOT present, touching empty $4
244 touch $4
245 fi
246}
247
248create_cmdline_wrapper () {
249 # Create a wrapper script where commandline options are needed
250 #
251 # These are useful to work around relocation issues, by passing extra options
252 # to a program
253 #
254 # Usage: create_cmdline_wrapper FILENAME <extra-options>
255
256 cmd=$1
257 shift
258
259 echo "Generating wrapper script for $cmd"
260
261 mv $cmd $cmd.real
262 cmdname=`basename $cmd`.real
263 cat <<END >$cmd
264#!/bin/bash
265realpath=\`readlink -fn \$0\`
266exec -a $cmd \`dirname \$realpath\`/$cmdname $@ "\$@"
267END
268 chmod +x $cmd
269}
270
271create_wrapper () {
272 # Create a wrapper script where extra environment variables are needed
273 #
274 # These are useful to work around relocation issues, by setting environment
275 # variables which point to paths in the filesystem.
276 #
277 # Usage: create_wrapper FILENAME [[VAR=VALUE]..]
278
279 cmd=$1
280 shift
281
282 echo "Generating wrapper script for $cmd"
283
284 mv $cmd $cmd.real
285 cmdname=`basename $cmd`
286 cat <<END >$cmd
287#!/bin/bash
288realpath=\`readlink -fn \$0\`
289export $@
290exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real "\$@"
291END
292 chmod +x $cmd
293}
294
295def check_app_exists(app, d):
296 app = d.expand(app)
297 path = d.getVar('PATH', d, True)
298 return bool(bb.utils.which(path, app))
299
300def explode_deps(s):
301 return bb.utils.explode_deps(s)
302
303def base_set_filespath(path, d):
304 filespath = []
305 extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
306 # Don't prepend empty strings to the path list
307 if extrapaths != "":
308 path = extrapaths.split(":") + path
309 # The ":" ensures we have an 'empty' override
310 overrides = (":" + (d.getVar("FILESOVERRIDES", True) or "")).split(":")
311 overrides.reverse()
312 for o in overrides:
313 for p in path:
314 if p != "":
315 filespath.append(os.path.join(p, o))
316 return ":".join(filespath)
317
318def extend_variants(d, var, extend, delim=':'):
319 """Return a string of all bb class extend variants for the given extend"""
320 variants = []
321 whole = d.getVar(var, True) or ""
322 for ext in whole.split():
323 eext = ext.split(delim)
324 if len(eext) > 1 and eext[0] == extend:
325 variants.append(eext[1])
326 return " ".join(variants)
327
328def multilib_pkg_extend(d, pkg):
329 variants = (d.getVar("MULTILIB_VARIANTS", True) or "").split()
330 if not variants:
331 return pkg
332 pkgs = pkg
333 for v in variants:
334 pkgs = pkgs + " " + v + "-" + pkg
335 return pkgs
336
337def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
338 """Return a string of all ${var} in all multilib tune configuration"""
339 values = []
340 value = d.getVar(var, True) or ""
341 if value != "":
342 if need_split:
343 for item in value.split(delim):
344 values.append(item)
345 else:
346 values.append(value)
347 variants = d.getVar("MULTILIB_VARIANTS", True) or ""
348 for item in variants.split():
349 localdata = bb.data.createCopy(d)
350 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
351 localdata.setVar("OVERRIDES", overrides)
352 bb.data.update_data(localdata)
353 value = localdata.getVar(var, True) or ""
354 if value != "":
355 if need_split:
356 for item in value.split(delim):
357 values.append(item)
358 else:
359 values.append(value)
360 if unique:
361 #we do this to keep order as much as possible
362 ret = []
363 for value in values:
364 if not value in ret:
365 ret.append(value)
366 else:
367 ret = values
368 return " ".join(ret)
diff --git a/meta/classes/vala.bbclass b/meta/classes/vala.bbclass
new file mode 100644
index 0000000000..c7db08ceeb
--- /dev/null
+++ b/meta/classes/vala.bbclass
@@ -0,0 +1,18 @@
1# Vala has problems with multiple concurrent invocations
2PARALLEL_MAKE = ""
3
4# Vala needs vala-native
5DEPENDS += "vala-native"
6DEPENDS_virtclass-native += "vala-native"
7
8# Our patched version of Vala looks in STAGING_DATADIR for .vapi files
9export STAGING_DATADIR
10# Upstream Vala >= 0.11 looks in XDG_DATA_DIRS for .vapi files
11export XDG_DATA_DIRS = "${STAGING_DATADIR}"
12
13# Package additional files
14FILES_${PN}-dev += "\
15 ${datadir}/vala/vapi/*.vapi \
16 ${datadir}/vala/vapi/*.deps \
17 ${datadir}/gir-1.0 \
18"
diff --git a/meta/classes/waf.bbclass b/meta/classes/waf.bbclass
new file mode 100644
index 0000000000..3a221e7082
--- /dev/null
+++ b/meta/classes/waf.bbclass
@@ -0,0 +1,13 @@
1waf_do_configure() {
2 ${S}/waf configure --prefix=${prefix} ${EXTRA_OECONF}
3}
4
5waf_do_compile() {
6 ${S}/waf build ${PARALLEL_MAKE}
7}
8
9waf_do_install() {
10 ${S}/waf install --destdir=${D}
11}
12
13EXPORT_FUNCTIONS do_configure do_compile do_install