summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass36
-rw-r--r--meta/classes/archiver.bbclass368
-rw-r--r--meta/classes/autotools-brokensep.bbclass5
-rw-r--r--meta/classes/autotools.bbclass271
-rw-r--r--meta/classes/autotools_stage.bbclass2
-rw-r--r--meta/classes/base.bbclass661
-rw-r--r--meta/classes/bin_package.bbclass36
-rw-r--r--meta/classes/binconfig.bbclass63
-rw-r--r--meta/classes/blacklist.bbclass45
-rw-r--r--meta/classes/boot-directdisk.bbclass182
-rw-r--r--meta/classes/bootimg.bbclass240
-rw-r--r--meta/classes/bugzilla.bbclass187
-rw-r--r--meta/classes/buildhistory.bbclass684
-rw-r--r--meta/classes/buildstats.bbclass289
-rw-r--r--meta/classes/ccache.bbclass8
-rw-r--r--meta/classes/chrpath.bbclass115
-rw-r--r--meta/classes/clutter.bbclass22
-rw-r--r--meta/classes/cmake.bbclass115
-rw-r--r--meta/classes/cml1.bbclass73
-rw-r--r--meta/classes/copyleft_compliance.bbclass64
-rw-r--r--meta/classes/copyleft_filter.bbclass62
-rw-r--r--meta/classes/core-image.bbclass79
-rw-r--r--meta/classes/cpan-base.bbclass55
-rw-r--r--meta/classes/cpan.bbclass55
-rw-r--r--meta/classes/cpan_build.bbclass50
-rw-r--r--meta/classes/cross-canadian.bbclass102
-rw-r--r--meta/classes/cross.bbclass80
-rw-r--r--meta/classes/crosssdk.bbclass35
-rw-r--r--meta/classes/debian.bbclass125
-rw-r--r--meta/classes/deploy.bbclass10
-rw-r--r--meta/classes/devshell.bbclass33
-rw-r--r--meta/classes/distro_features_check.bbclass28
-rw-r--r--meta/classes/distrodata.bbclass916
-rw-r--r--meta/classes/distutils-base.bbclass4
-rw-r--r--meta/classes/distutils-common-base.bbclass24
-rw-r--r--meta/classes/distutils-native-base.bbclass3
-rw-r--r--meta/classes/distutils-tools.bbclass77
-rw-r--r--meta/classes/distutils.bbclass81
-rw-r--r--meta/classes/distutils3-base.bbclass8
-rw-r--r--meta/classes/distutils3-native-base.bbclass4
-rw-r--r--meta/classes/distutils3.bbclass98
-rw-r--r--meta/classes/externalsrc.bbclass53
-rw-r--r--meta/classes/extrausers.bbclass65
-rw-r--r--meta/classes/fontcache.bbclass40
-rw-r--r--meta/classes/gconf.bbclass70
-rw-r--r--meta/classes/gettext.bbclass19
-rw-r--r--meta/classes/gnome.bbclass5
-rw-r--r--meta/classes/gnomebase.bbclass30
-rw-r--r--meta/classes/grub-efi.bbclass141
-rw-r--r--meta/classes/gsettings.bbclass37
-rw-r--r--meta/classes/gtk-doc.bbclass23
-rw-r--r--meta/classes/gtk-icon-cache.bbclass62
-rw-r--r--meta/classes/gtk-immodules-cache.bbclass83
-rw-r--r--meta/classes/gummiboot.bbclass114
-rw-r--r--meta/classes/gzipnative.bbclass5
-rw-r--r--meta/classes/icecc.bbclass325
-rw-r--r--meta/classes/image-live.bbclass18
-rw-r--r--meta/classes/image-mklibs.bbclass71
-rw-r--r--meta/classes/image-prelink.bbclass33
-rw-r--r--meta/classes/image-swab.bbclass94
-rw-r--r--meta/classes/image-vmdk.bbclass35
-rw-r--r--meta/classes/image.bbclass408
-rw-r--r--meta/classes/image_types.bbclass154
-rw-r--r--meta/classes/image_types_uboot.bbclass23
-rw-r--r--meta/classes/insane.bbclass1005
-rw-r--r--meta/classes/insserv.bbclass5
-rw-r--r--meta/classes/kernel-arch.bbclass60
-rw-r--r--meta/classes/kernel-grub.bbclass91
-rw-r--r--meta/classes/kernel-module-split.bbclass187
-rw-r--r--meta/classes/kernel-yocto.bbclass416
-rw-r--r--meta/classes/kernel.bbclass502
-rw-r--r--meta/classes/lib_package.bbclass7
-rw-r--r--meta/classes/libc-common.bbclass36
-rw-r--r--meta/classes/libc-package.bbclass390
-rw-r--r--meta/classes/license.bbclass373
-rw-r--r--meta/classes/linux-kernel-base.bbclass32
-rw-r--r--meta/classes/logging.bbclass72
-rw-r--r--meta/classes/meta.bbclass4
-rw-r--r--meta/classes/metadata_scm.bbclass82
-rw-r--r--meta/classes/migrate_localcount.bbclass46
-rw-r--r--meta/classes/mime.bbclass56
-rw-r--r--meta/classes/mirrors.bbclass78
-rw-r--r--meta/classes/module-base.bbclass18
-rw-r--r--meta/classes/module.bbclass32
-rw-r--r--meta/classes/multilib.bbclass141
-rw-r--r--meta/classes/multilib_global.bbclass47
-rw-r--r--meta/classes/multilib_header.bbclass47
-rw-r--r--meta/classes/native.bbclass164
-rw-r--r--meta/classes/nativesdk.bbclass94
-rw-r--r--meta/classes/oelint.bbclass174
-rw-r--r--meta/classes/own-mirrors.bbclass12
-rw-r--r--meta/classes/package.bbclass2019
-rw-r--r--meta/classes/package_deb.bbclass317
-rw-r--r--meta/classes/package_ipk.bbclass261
-rw-r--r--meta/classes/package_rpm.bbclass731
-rw-r--r--meta/classes/package_tar.bbclass69
-rw-r--r--meta/classes/packagedata.bbclass26
-rw-r--r--meta/classes/packagegroup.bbclass47
-rw-r--r--meta/classes/packageinfo.bbclass22
-rw-r--r--meta/classes/patch.bbclass187
-rw-r--r--meta/classes/perlnative.bbclass3
-rw-r--r--meta/classes/pixbufcache.bbclass70
-rw-r--r--meta/classes/pkgconfig.bbclass2
-rw-r--r--meta/classes/populate_sdk.bbclass7
-rw-r--r--meta/classes/populate_sdk_base.bbclass337
-rw-r--r--meta/classes/populate_sdk_deb.bbclass13
-rw-r--r--meta/classes/populate_sdk_ipk.bbclass3
-rw-r--r--meta/classes/populate_sdk_rpm.bbclass16
-rw-r--r--meta/classes/prexport.bbclass58
-rw-r--r--meta/classes/primport.bbclass21
-rw-r--r--meta/classes/prserv.bbclass33
-rw-r--r--meta/classes/ptest.bbclass62
-rw-r--r--meta/classes/python-dir.bbclass5
-rw-r--r--meta/classes/python3native.bbclass7
-rw-r--r--meta/classes/pythonnative.bbclass6
-rw-r--r--meta/classes/qemu.bbclass35
-rw-r--r--meta/classes/qmake2.bbclass27
-rw-r--r--meta/classes/qmake_base.bbclass119
-rw-r--r--meta/classes/qt4e.bbclass24
-rw-r--r--meta/classes/qt4x11.bbclass14
-rw-r--r--meta/classes/recipe_sanity.bbclass168
-rw-r--r--meta/classes/relocatable.bbclass7
-rw-r--r--meta/classes/report-error.bbclass66
-rw-r--r--meta/classes/rm_work.bbclass99
-rw-r--r--meta/classes/rootfs_deb.bbclass24
-rw-r--r--meta/classes/rootfs_ipk.bbclass38
-rw-r--r--meta/classes/rootfs_rpm.bbclass42
-rw-r--r--meta/classes/sanity.bbclass800
-rw-r--r--meta/classes/scons.bbclass15
-rw-r--r--meta/classes/sdl.bbclass6
-rw-r--r--meta/classes/setuptools.bbclass8
-rw-r--r--meta/classes/setuptools3.bbclass8
-rw-r--r--meta/classes/sip.bbclass63
-rw-r--r--meta/classes/siteconfig.bbclass33
-rw-r--r--meta/classes/siteinfo.bbclass151
-rw-r--r--meta/classes/spdx.bbclass321
-rw-r--r--meta/classes/sstate.bbclass798
-rw-r--r--meta/classes/staging.bbclass121
-rw-r--r--meta/classes/syslinux.bbclass187
-rw-r--r--meta/classes/systemd.bbclass198
-rw-r--r--meta/classes/terminal.bbclass94
-rw-r--r--meta/classes/testimage-auto.bbclass23
-rw-r--r--meta/classes/testimage.bbclass232
-rw-r--r--meta/classes/tinderclient.bbclass368
-rw-r--r--meta/classes/toaster.bbclass331
-rw-r--r--meta/classes/toolchain-scripts.bbclass116
-rw-r--r--meta/classes/typecheck.bbclass12
-rw-r--r--meta/classes/uboot-config.bbclass61
-rw-r--r--meta/classes/update-alternatives.bbclass267
-rw-r--r--meta/classes/update-rc.d.bbclass130
-rw-r--r--meta/classes/useradd-staticids.bbclass272
-rw-r--r--meta/classes/useradd.bbclass211
-rw-r--r--meta/classes/useradd_base.bbclass230
-rw-r--r--meta/classes/utility-tasks.bbclass69
-rw-r--r--meta/classes/utils.bbclass368
-rw-r--r--meta/classes/vala.bbclass21
-rw-r--r--meta/classes/waf.bbclass13
157 files changed, 21986 insertions, 0 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
new file mode 100644
index 0000000..d41dd4b
--- /dev/null
+++ b/meta/classes/allarch.bbclass
@@ -0,0 +1,36 @@
1#
2# This class is used for architecture independent recipes/data files (usally scripts)
3#
4
5# Expand STAGING_DIR_HOST since for cross-canadian/native/nativesdk, this will
6# point elsewhere after these changes.
7STAGING_DIR_HOST := "${STAGING_DIR_HOST}"
8
9PACKAGE_ARCH = "all"
10
11python () {
12 # Allow this class to be included but overridden - only set
13 # the values if we're still "all" package arch.
14 if d.getVar("PACKAGE_ARCH") == "all":
15 # No need for virtual/libc or a cross compiler
16 d.setVar("INHIBIT_DEFAULT_DEPS","1")
17
18 # Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
19 # naming anyway
20 d.setVar("TARGET_ARCH", "allarch")
21 d.setVar("TARGET_OS", "linux")
22 d.setVar("TARGET_CC_ARCH", "none")
23 d.setVar("TARGET_LD_ARCH", "none")
24 d.setVar("TARGET_AS_ARCH", "none")
25 d.setVar("TARGET_FPU", "")
26 d.setVar("TARGET_PREFIX", "")
27 d.setVar("PACKAGE_EXTRA_ARCHS", "")
28 d.setVar("SDK_ARCH", "none")
29 d.setVar("SDK_CC_ARCH", "none")
30
31 # No need to do shared library processing or debug symbol handling
32 d.setVar("EXCLUDE_FROM_SHLIBS", "1")
33 d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
34 d.setVar("INHIBIT_PACKAGE_STRIP", "1")
35}
36
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
new file mode 100644
index 0000000..8d8e7c4
--- /dev/null
+++ b/meta/classes/archiver.bbclass
@@ -0,0 +1,368 @@
1# ex:ts=4:sw=4:sts=4:et
2# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
3#
4# This bbclass is used for creating archive for:
5# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
6# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
7# 3) configured source: ARCHIVER_MODE[src] = "configured"
8# 4) The patches between do_unpack and do_patch:
9# ARCHIVER_MODE[diff] = "1"
10# And you can set the one that you'd like to exclude from the diff:
11# ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
12# 5) The environment data, similar to 'bitbake -e recipe':
13# ARCHIVER_MODE[dumpdata] = "1"
14# 6) The recipe (.bb and .inc): ARCHIVER_MODE[recipe] = "1"
15# 7) Whether output the .src.rpm package:
16# ARCHIVER_MODE[srpm] = "1"
17# 8) Filter the license, the recipe whose license in
18# COPYLEFT_LICENSE_INCLUDE will be included, and in
19# COPYLEFT_LICENSE_EXCLUDE will be excluded.
20# COPYLEFT_LICENSE_INCLUDE = 'GPL* LGPL*'
21# COPYLEFT_LICENSE_EXCLUDE = 'CLOSED Proprietary'
22# 9) The recipe type that will be archived:
23# COPYLEFT_RECIPE_TYPES = 'target'
24#
25
26# Don't filter the license by default
27COPYLEFT_LICENSE_INCLUDE ?= ''
28COPYLEFT_LICENSE_EXCLUDE ?= ''
29# Create archive for all the recipe types
30COPYLEFT_RECIPE_TYPES ?= 'target native nativesdk cross crosssdk cross-canadian'
31inherit copyleft_filter
32
33ARCHIVER_MODE[srpm] ?= "0"
34ARCHIVER_MODE[src] ?= "patched"
35ARCHIVER_MODE[diff] ?= "0"
36ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
37ARCHIVER_MODE[dumpdata] ?= "0"
38ARCHIVER_MODE[recipe] ?= "0"
39
40DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
41ARCHIVER_TOPDIR ?= "${WORKDIR}/deploy-sources"
42ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/"
43ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
44
45do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
46do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
47do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
48
49# This is a convenience for the shell script to use it
50
51
52python () {
53 pn = d.getVar('PN', True)
54
55 if d.getVar('COPYLEFT_LICENSE_INCLUDE', True) or \
56 d.getVar('COPYLEFT_LICENSE_EXCLUDE', True):
57 included, reason = copyleft_should_include(d)
58 if not included:
59 bb.debug(1, 'archiver: %s is excluded: %s' % (pn, reason))
60 return
61 else:
62 bb.debug(1, 'archiver: %s is included: %s' % (pn, reason))
63
64 ar_src = d.getVarFlag('ARCHIVER_MODE', 'src', True)
65 ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata', True)
66 ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe', True)
67
68 if ar_src == "original":
69 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_original' % pn)
70 elif ar_src == "patched":
71 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn)
72 elif ar_src == "configured":
73 # We can't use "addtask do_ar_configured after do_configure" since it
74 # will cause the deptask of do_populate_sysroot to run not matter what
75 # archives we need, so we add the depends here.
76 d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn)
77 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_configured' % pn)
78 elif ar_src:
79 bb.fatal("Invalid ARCHIVER_MODE[src]: %s" % ar_src)
80
81 if ar_dumpdata == "1":
82 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_dumpdata' % pn)
83
84 if ar_recipe == "1":
85 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_recipe' % pn)
86
87 # Output the srpm package
88 ar_srpm = d.getVarFlag('ARCHIVER_MODE', 'srpm', True)
89 if ar_srpm == "1":
90 if d.getVar('PACKAGES', True) != '' and d.getVar('IMAGE_PKGTYPE', True) == 'rpm':
91 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_package_write_rpm' % pn)
92 if ar_dumpdata == "1":
93 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_dumpdata' % pn)
94 if ar_recipe == "1":
95 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_recipe' % pn)
96 if ar_src == "original":
97 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_original' % pn)
98 elif ar_src == "patched":
99 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_patched' % pn)
100 elif ar_src == "configured":
101 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
102
103 # The gcc staff uses shared source
104 flag = d.getVarFlag("do_unpack", "stamp-base", True)
105 if flag:
106 if ar_src in [ 'original', 'patched' ]:
107 ar_outdir = os.path.join(d.getVar('ARCHIVER_TOPDIR', True), 'work-shared')
108 d.setVar('ARCHIVER_OUTDIR', ar_outdir)
109 d.setVarFlag('do_ar_original', 'stamp-base', flag)
110 d.setVarFlag('do_ar_patched', 'stamp-base', flag)
111 d.setVarFlag('do_unpack_and_patch', 'stamp-base', flag)
112 d.setVarFlag('do_ar_original', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR')
113 d.setVarFlag('do_unpack_and_patch', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR')
114 d.setVarFlag('do_ar_patched', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR')
115 d.setVarFlag('create_diff_gz', 'vardepsexclude', 'PF')
116 d.setVarFlag('create_tarball', 'vardepsexclude', 'PF')
117
118 flag_clean = d.getVarFlag('do_unpack', 'stamp-base-clean', True)
119 if flag_clean:
120 d.setVarFlag('do_ar_original', 'stamp-base-clean', flag_clean)
121 d.setVarFlag('do_ar_patched', 'stamp-base-clean', flag_clean)
122 d.setVarFlag('do_unpack_and_patch', 'stamp-base-clean', flag_clean)
123}
124
125# Take all the sources for a recipe and puts them in WORKDIR/archiver-work/.
126# Files in SRC_URI are copied directly, anything that's a directory
127# (e.g. git repositories) is "unpacked" and then put into a tarball.
128python do_ar_original() {
129
130 import shutil, tarfile, tempfile
131
132 if d.getVarFlag('ARCHIVER_MODE', 'src', True) != "original":
133 return
134
135 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
136 bb.note('Archiving the original source...')
137 fetch = bb.fetch2.Fetch([], d)
138 for url in fetch.urls:
139 local = fetch.localpath(url)
140 if os.path.isfile(local):
141 shutil.copy(local, ar_outdir)
142 elif os.path.isdir(local):
143 basename = os.path.basename(local)
144
145 tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR', True))
146 fetch.unpack(tmpdir, (url,))
147
148 os.chdir(tmpdir)
149 tarname = os.path.join(ar_outdir, basename + '.tar.gz')
150 tar = tarfile.open(tarname, 'w:gz')
151 tar.add('.')
152 tar.close()
153
154 # Emit patch series files for 'original'
155 bb.note('Writing patch series files...')
156 for patch in src_patches(d):
157 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
158 patchdir = parm.get('patchdir')
159 if patchdir:
160 series = os.path.join(ar_outdir, 'series.subdir.%s' % patchdir.replace('/', '_'))
161 else:
162 series = os.path.join(ar_outdir, 'series')
163
164 with open(series, 'a') as s:
165 s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
166}
167
168python do_ar_patched() {
169
170 if d.getVarFlag('ARCHIVER_MODE', 'src', True) != 'patched':
171 return
172
173 # Get the ARCHIVER_OUTDIR before we reset the WORKDIR
174 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
175 bb.note('Archiving the patched source...')
176 d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
177 # The gcc staff uses shared source
178 flag = d.getVarFlag('do_unpack', 'stamp-base', True)
179 if flag:
180 create_tarball(d, d.getVar('S', True), 'patched', ar_outdir, 'gcc')
181 else:
182 create_tarball(d, d.getVar('S', True), 'patched', ar_outdir)
183}
184
185python do_ar_configured() {
186 import shutil
187
188 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
189 if d.getVarFlag('ARCHIVER_MODE', 'src', True) == 'configured':
190 bb.note('Archiving the configured source...')
191 # The libtool-native's do_configure will remove the
192 # ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
193 # do_configure, we archive the already configured ${S} to
194 # instead of.
195 if d.getVar('PN', True) != 'libtool-native':
196 # Change the WORKDIR to make do_configure run in another dir.
197 d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
198 if bb.data.inherits_class('kernel-yocto', d):
199 bb.build.exec_func('do_kernel_configme', d)
200 if bb.data.inherits_class('cmake', d):
201 bb.build.exec_func('do_generate_toolchain_file', d)
202 prefuncs = d.getVarFlag('do_configure', 'prefuncs', True)
203 for func in (prefuncs or '').split():
204 if func != "sysroot_cleansstate":
205 bb.build.exec_func(func, d)
206 bb.build.exec_func('do_configure', d)
207 postfuncs = d.getVarFlag('do_configure', 'postfuncs', True)
208 for func in (postfuncs or '').split():
209 if func != "do_qa_configure":
210 bb.build.exec_func(func, d)
211 srcdir = d.getVar('S', True)
212 builddir = d.getVar('B', True)
213 if srcdir != builddir:
214 if os.path.exists(builddir):
215 oe.path.copytree(builddir, os.path.join(srcdir, \
216 'build.%s.ar_configured' % d.getVar('PF', True)))
217 create_tarball(d, srcdir, 'configured', ar_outdir)
218}
219
220def create_tarball(d, srcdir, suffix, ar_outdir, pf=None):
221 """
222 create the tarball from srcdir
223 """
224 import tarfile
225
226 bb.utils.mkdirhier(ar_outdir)
227 if pf:
228 tarname = os.path.join(ar_outdir, '%s-%s.tar.gz' % (pf, suffix))
229 else:
230 tarname = os.path.join(ar_outdir, '%s-%s.tar.gz' % \
231 (d.getVar('PF', True), suffix))
232
233 srcdir = srcdir.rstrip('/')
234 dirname = os.path.dirname(srcdir)
235 basename = os.path.basename(srcdir)
236 os.chdir(dirname)
237 bb.note('Creating %s' % tarname)
238 tar = tarfile.open(tarname, 'w:gz')
239 tar.add(basename)
240 tar.close()
241
242# creating .diff.gz between source.orig and source
243def create_diff_gz(d, src_orig, src, ar_outdir):
244
245 import subprocess
246
247 if not os.path.isdir(src) or not os.path.isdir(src_orig):
248 return
249
250 # The diff --exclude can't exclude the file with path, so we copy
251 # the patched source, and remove the files that we'd like to
252 # exclude.
253 src_patched = src + '.patched'
254 oe.path.copyhardlinktree(src, src_patched)
255 for i in d.getVarFlag('ARCHIVER_MODE', 'diff-exclude', True).split():
256 bb.utils.remove(os.path.join(src_orig, i), recurse=True)
257 bb.utils.remove(os.path.join(src_patched, i), recurse=True)
258
259 dirname = os.path.dirname(src)
260 basename = os.path.basename(src)
261 os.chdir(dirname)
262 out_file = os.path.join(ar_outdir, '%s-diff.gz' % d.getVar('PF', True))
263 diff_cmd = 'diff -Naur %s.orig %s.patched | gzip -c > %s' % (basename, basename, out_file)
264 subprocess.call(diff_cmd, shell=True)
265 bb.utils.remove(src_patched, recurse=True)
266
267# Run do_unpack and do_patch
268python do_unpack_and_patch() {
269 if d.getVarFlag('ARCHIVER_MODE', 'src', True) not in \
270 [ 'patched', 'configured'] and \
271 d.getVarFlag('ARCHIVER_MODE', 'diff', True) != '1':
272 return
273
274 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
275
276 # Change the WORKDIR to make do_unpack do_patch run in another dir.
277 d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
278
279 # The kernel source is ready after do_validate_branches
280 if bb.data.inherits_class('kernel-yocto', d):
281 bb.build.exec_func('do_unpack', d)
282 bb.build.exec_func('do_kernel_checkout', d)
283 bb.build.exec_func('do_validate_branches', d)
284 else:
285 bb.build.exec_func('do_unpack', d)
286
287 # Save the original source for creating the patches
288 if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
289 src = d.getVar('S', True).rstrip('/')
290 src_orig = '%s.orig' % src
291 oe.path.copytree(src, src_orig)
292 bb.build.exec_func('do_patch', d)
293 # Create the patches
294 if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
295 bb.note('Creating diff gz...')
296 create_diff_gz(d, src_orig, src, ar_outdir)
297 bb.utils.remove(src_orig, recurse=True)
298}
299
300python do_ar_recipe () {
301 """
302 archive the recipe, including .bb and .inc.
303 """
304 import re
305 import shutil
306
307 require_re = re.compile( r"require\s+(.+)" )
308 include_re = re.compile( r"include\s+(.+)" )
309 bbfile = d.getVar('FILE', True)
310 outdir = os.path.join(d.getVar('WORKDIR', True), \
311 '%s-recipe' % d.getVar('PF', True))
312 bb.utils.mkdirhier(outdir)
313 shutil.copy(bbfile, outdir)
314
315 dirname = os.path.dirname(bbfile)
316 bbpath = '%s:%s' % (dirname, d.getVar('BBPATH', True))
317 f = open(bbfile, 'r')
318 for line in f.readlines():
319 incfile = None
320 if require_re.match(line):
321 incfile = require_re.match(line).group(1)
322 elif include_re.match(line):
323 incfile = include_re.match(line).group(1)
324 if incfile:
325 incfile = bb.data.expand(incfile, d)
326 incfile = bb.utils.which(bbpath, incfile)
327 if incfile:
328 shutil.copy(incfile, outdir)
329
330 create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR', True))
331 bb.utils.remove(outdir, recurse=True)
332}
333
334python do_dumpdata () {
335 """
336 dump environment data to ${PF}-showdata.dump
337 """
338
339 dumpfile = os.path.join(d.getVar('ARCHIVER_OUTDIR', True), \
340 '%s-showdata.dump' % d.getVar('PF', True))
341 bb.note('Dumping metadata into %s' % dumpfile)
342 f = open(dumpfile, 'w')
343 # emit variables and shell functions
344 bb.data.emit_env(f, d, True)
345 # emit the metadata which isn't valid shell
346 for e in d.keys():
347 if bb.data.getVarFlag(e, 'python', d):
348 f.write("\npython %s () {\n%s}\n" % (e, bb.data.getVar(e, d, True)))
349 f.close()
350}
351
352SSTATETASKS += "do_deploy_archives"
353do_deploy_archives () {
354 echo "Deploying source archive files ..."
355}
356python do_deploy_archives_setscene () {
357 sstate_setscene(d)
358}
359do_deploy_archives[sstate-inputdirs] = "${ARCHIVER_TOPDIR}"
360do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}"
361
362addtask do_ar_original after do_unpack
363addtask do_unpack_and_patch after do_patch
364addtask do_ar_patched after do_unpack_and_patch
365addtask do_ar_configured after do_unpack_and_patch
366addtask do_dumpdata
367addtask do_ar_recipe
368addtask do_deploy_archives before do_build
diff --git a/meta/classes/autotools-brokensep.bbclass b/meta/classes/autotools-brokensep.bbclass
new file mode 100644
index 0000000..71cf97a
--- /dev/null
+++ b/meta/classes/autotools-brokensep.bbclass
@@ -0,0 +1,5 @@
1# Autotools class for recipes where separate build dir doesn't work
2# Ideally we should fix software so it does work. Standard autotools supports
3# this.
4inherit autotools
5B = "${S}"
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
new file mode 100644
index 0000000..c60ba10
--- /dev/null
+++ b/meta/classes/autotools.bbclass
@@ -0,0 +1,271 @@
1def autotools_dep_prepend(d):
2 if d.getVar('INHIBIT_AUTOTOOLS_DEPS', True):
3 return ''
4
5 pn = d.getVar('PN', True)
6 deps = ''
7
8 if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
9 return deps
10 deps += 'autoconf-native automake-native '
11
12 if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
13 deps += 'libtool-native '
14 if not bb.data.inherits_class('native', d) \
15 and not bb.data.inherits_class('nativesdk', d) \
16 and not bb.data.inherits_class('cross', d) \
17 and not d.getVar('INHIBIT_DEFAULT_DEPS', True):
18 deps += 'libtool-cross '
19
20 return deps + 'gnu-config-native '
21
22EXTRA_OEMAKE = ""
23
24DEPENDS_prepend = "${@autotools_dep_prepend(d)}"
25
26inherit siteinfo
27
28# Space separated list of shell scripts with variables defined to supply test
29# results for autoconf tests we cannot run at build time.
30export CONFIG_SITE = "${@siteinfo_get_files(d)}"
31
32acpaths = "default"
33EXTRA_AUTORECONF = "--exclude=autopoint"
34
35export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
36
37# When building tools for use at build-time it's recommended for the build
38# system to use these variables when cross-compiling.
39# (http://sources.redhat.com/autobook/autobook/autobook_270.html)
40export CPP_FOR_BUILD = "${BUILD_CPP}"
41export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}"
42
43export CC_FOR_BUILD = "${BUILD_CC}"
44export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}"
45
46export CXX_FOR_BUILD = "${BUILD_CXX}"
47export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}"
48
49export LD_FOR_BUILD = "${BUILD_LD}"
50export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}"
51
52def autotools_set_crosscompiling(d):
53 if not bb.data.inherits_class('native', d):
54 return " cross_compiling=yes"
55 return ""
56
57def append_libtool_sysroot(d):
58 # Only supply libtool sysroot option for non-native packages
59 if not bb.data.inherits_class('native', d):
60 return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
61 return ""
62
63# EXTRA_OECONF_append = "${@autotools_set_crosscompiling(d)}"
64
65CONFIGUREOPTS = " --build=${BUILD_SYS} \
66 --host=${HOST_SYS} \
67 --target=${TARGET_SYS} \
68 --prefix=${prefix} \
69 --exec_prefix=${exec_prefix} \
70 --bindir=${bindir} \
71 --sbindir=${sbindir} \
72 --libexecdir=${libexecdir} \
73 --datadir=${datadir} \
74 --sysconfdir=${sysconfdir} \
75 --sharedstatedir=${sharedstatedir} \
76 --localstatedir=${localstatedir} \
77 --libdir=${libdir} \
78 --includedir=${includedir} \
79 --oldincludedir=${oldincludedir} \
80 --infodir=${infodir} \
81 --mandir=${mandir} \
82 --disable-silent-rules \
83 ${CONFIGUREOPT_DEPTRACK} \
84 ${@append_libtool_sysroot(d)}"
85CONFIGUREOPT_DEPTRACK = "--disable-dependency-tracking"
86
87
88oe_runconf () {
89 cfgscript="${S}/configure"
90 if [ -x "$cfgscript" ] ; then
91 bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
92 set +e
93 ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"
94 if [ "$?" != "0" ]; then
95 echo "Configure failed. The contents of all config.log files follows to aid debugging"
96 find ${S} -name config.log -print -exec cat {} \;
97 bbfatal "oe_runconf failed"
98 fi
99 set -e
100 else
101 bbfatal "no configure script found at $cfgscript"
102 fi
103}
104
105AUTOTOOLS_AUXDIR ?= "${S}"
106
107CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
108
109autotools_preconfigure() {
110 if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
111 if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
112 if [ "${S}" != "${B}" ]; then
113 echo "Previously configured separate build directory detected, cleaning ${B}"
114 rm -rf ${B}
115 mkdir ${B}
116 else
117 # At least remove the .la files since automake won't automatically
118 # regenerate them even if CFLAGS/LDFLAGS are different
119 cd ${S}; find ${S} -name \*.la -delete
120 fi
121 fi
122 fi
123}
124
125autotools_postconfigure(){
126 if [ -n "${CONFIGURESTAMPFILE}" ]; then
127 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
128 fi
129}
130
131EXTRACONFFUNCS ??= ""
132
133do_configure[prefuncs] += "autotools_preconfigure autotools_copy_aclocals ${EXTRACONFFUNCS}"
134do_configure[postfuncs] += "autotools_postconfigure"
135
136ACLOCALDIR = "${B}/aclocal-copy"
137
138python autotools_copy_aclocals () {
139 s = d.getVar("S", True)
140 if not os.path.exists(s + "/configure.in") and not os.path.exists(s + "/configure.ac"):
141 if not d.getVar("AUTOTOOLS_COPYACLOCAL"):
142 return
143
144 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
145 pn = d.getVar("PN", True)
146 aclocaldir = d.getVar("ACLOCALDIR", True)
147 oe.path.remove(aclocaldir)
148 bb.utils.mkdirhier(aclocaldir)
149 configuredeps = []
150 for dep in taskdepdata:
151 data = taskdepdata[dep]
152 if data[1] == "do_configure" and data[0] != pn:
153 configuredeps.append(data[0])
154
155 cp = []
156 for c in configuredeps:
157 if c.endswith("-native"):
158 manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}-%s.populate_sysroot" % c)
159 elif c.startswith("nativesdk-"):
160 manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${SDK_ARCH}-%s.populate_sysroot" % c)
161 elif c.endswith("-cross") or c.endswith("-cross-initial") or c.endswith("-crosssdk") or c.endswith("-crosssdk-initial"):
162 continue
163 else:
164 manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${MACHINE}-%s.populate_sysroot" % c)
165 try:
166 f = open(manifest, "r")
167 for l in f:
168 if "/aclocal/" in l and l.strip().endswith(".m4"):
169 cp.append(l.strip())
170 except:
171 bb.warn("%s not found" % manifest)
172
173 for c in cp:
174 t = os.path.join(aclocaldir, os.path.basename(c))
175 if not os.path.exists(t):
176 os.symlink(c, t)
177}
178autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH BB_TASKDEPDATA"
179
180autotools_do_configure() {
181 # WARNING: gross hack follows:
182 # An autotools built package generally needs these scripts, however only
183 # automake or libtoolize actually install the current versions of them.
184 # This is a problem in builds that do not use libtool or automake, in the case
185 # where we -need- the latest version of these scripts. e.g. running a build
186 # for a package whose autotools are old, on an x86_64 machine, which the old
187 # config.sub does not support. Work around this by installing them manually
188 # regardless.
189 ( for ac in `find ${S} -name configure.in -o -name configure.ac`; do
190 rm -f `dirname $ac`/configure
191 done )
192 if [ -e ${S}/configure.in -o -e ${S}/configure.ac ]; then
193 olddir=`pwd`
194 cd ${S}
195 ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
196 if [ x"${acpaths}" = xdefault ]; then
197 acpaths=
198 for i in `find ${S} -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
199 grep -v 'acinclude.m4' | grep -v 'aclocal-copy' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
200 acpaths="$acpaths -I $i"
201 done
202 else
203 acpaths="${acpaths}"
204 fi
205 AUTOV=`automake --version |head -n 1 |sed "s/.* //;s/\.[0-9]\+$//"`
206 automake --version
207 echo "AUTOV is $AUTOV"
208 if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
209 ACLOCAL="$ACLOCAL --automake-acdir=${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
210 fi
211 # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
212 # like it was auto-generated. Work around this by blowing it away
213 # by hand, unless the package specifically asked not to run aclocal.
214 if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
215 rm -f aclocal.m4
216 fi
217 if [ -e configure.in ]; then
218 CONFIGURE_AC=configure.in
219 else
220 CONFIGURE_AC=configure.ac
221 fi
222 if grep "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
223 if grep "sed.*POTFILES" $CONFIGURE_AC >/dev/null; then
224 : do nothing -- we still have an old unmodified configure.ac
225 else
226 bbnote Executing glib-gettextize --force --copy
227 echo "no" | glib-gettextize --force --copy
228 fi
229 else if grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
230 # We'd call gettextize here if it wasn't so broken...
231 cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
232 if [ -d ${S}/po/ ]; then
233 cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
234 if [ ! -e ${S}/po/remove-potcdate.sin ]; then
235 cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
236 fi
237 fi
238 for i in gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4; do
239 for j in `find ${S} -name $i | grep -v aclocal-copy`; do
240 rm $j
241 done
242 done
243 fi
244 fi
245 mkdir -p m4
246 if grep "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
247 bbnote Executing intltoolize --copy --force --automake
248 intltoolize --copy --force --automake
249 fi
250 bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
251 ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || bbfatal "autoreconf execution failed."
252 cd $olddir
253 fi
254 if [ -e ${S}/configure ]; then
255 oe_runconf
256 else
257 bbnote "nothing to configure"
258 fi
259}
260
261autotools_do_install() {
262 oe_runmake 'DESTDIR=${D}' install
263 # Info dir listing isn't interesting at this point so remove it if it exists.
264 if [ -e "${D}${infodir}/dir" ]; then
265 rm -f ${D}${infodir}/dir
266 fi
267}
268
269inherit siteconfig
270
271EXPORT_FUNCTIONS do_configure do_install
diff --git a/meta/classes/autotools_stage.bbclass b/meta/classes/autotools_stage.bbclass
new file mode 100644
index 0000000..b3c41e4
--- /dev/null
+++ b/meta/classes/autotools_stage.bbclass
@@ -0,0 +1,2 @@
1inherit autotools
2
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
new file mode 100644
index 0000000..f4f5321
--- /dev/null
+++ b/meta/classes/base.bbclass
@@ -0,0 +1,661 @@
1BB_DEFAULT_TASK ?= "build"
2CLASSOVERRIDE ?= "class-target"
3
4inherit patch
5inherit staging
6
7inherit mirrors
8inherit utils
9inherit utility-tasks
10inherit metadata_scm
11inherit logging
12
13OE_IMPORTS += "os sys time oe.path oe.utils oe.data oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath"
14OE_IMPORTS[type] = "list"
15
16def oe_import(d):
17 import sys
18
19 bbpath = d.getVar("BBPATH", True).split(":")
20 sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath]
21
22 def inject(name, value):
23 """Make a python object accessible from the metadata"""
24 if hasattr(bb.utils, "_context"):
25 bb.utils._context[name] = value
26 else:
27 __builtins__[name] = value
28
29 import oe.data
30 for toimport in oe.data.typed_value("OE_IMPORTS", d):
31 imported = __import__(toimport)
32 inject(toimport.split(".", 1)[0], imported)
33
34 return ""
35
36# We need the oe module name space early (before INHERITs get added)
37OE_IMPORTED := "${@oe_import(d)}"
38
39def lsb_distro_identifier(d):
40 adjust = d.getVar('LSB_DISTRO_ADJUST', True)
41 adjust_func = None
42 if adjust:
43 try:
44 adjust_func = globals()[adjust]
45 except KeyError:
46 pass
47 return oe.lsb.distro_identifier(adjust_func)
48
49die() {
50 bbfatal "$*"
51}
52
53oe_runmake_call() {
54 bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
55 ${MAKE} ${EXTRA_OEMAKE} "$@"
56}
57
58oe_runmake() {
59 oe_runmake_call "$@" || die "oe_runmake failed"
60}
61
62
63def base_dep_prepend(d):
64 #
65 # Ideally this will check a flag so we will operate properly in
66 # the case where host == build == target, for now we don't work in
67 # that case though.
68 #
69
70 deps = ""
71 # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
72 # we need that built is the responsibility of the patch function / class, not
73 # the application.
74 if not d.getVar('INHIBIT_DEFAULT_DEPS'):
75 if (d.getVar('HOST_SYS', True) != d.getVar('BUILD_SYS', True)):
76 deps += " virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc "
77 return deps
78
79BASEDEPENDS = "${@base_dep_prepend(d)}"
80
81DEPENDS_prepend="${BASEDEPENDS} "
82
83FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
84# THISDIR only works properly with imediate expansion as it has to run
85# in the context of the location its used (:=)
86THISDIR = "${@os.path.dirname(d.getVar('FILE', True))}"
87
88def extra_path_elements(d):
89 path = ""
90 elements = (d.getVar('EXTRANATIVEPATH', True) or "").split()
91 for e in elements:
92 path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
93 return path
94
95PATH_prepend = "${@extra_path_elements(d)}"
96
97addtask fetch
98do_fetch[dirs] = "${DL_DIR}"
99do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
100python base_do_fetch() {
101
102 src_uri = (d.getVar('SRC_URI', True) or "").split()
103 if len(src_uri) == 0:
104 return
105
106 try:
107 fetcher = bb.fetch2.Fetch(src_uri, d)
108 fetcher.download()
109 except bb.fetch2.BBFetchException as e:
110 raise bb.build.FuncFailed(e)
111}
112
113addtask unpack after do_fetch
114do_unpack[dirs] = "${WORKDIR}"
115do_unpack[cleandirs] = "${S}/patches"
116python base_do_unpack() {
117 src_uri = (d.getVar('SRC_URI', True) or "").split()
118 if len(src_uri) == 0:
119 return
120
121 rootdir = d.getVar('WORKDIR', True)
122
123 try:
124 fetcher = bb.fetch2.Fetch(src_uri, d)
125 fetcher.unpack(rootdir)
126 except bb.fetch2.BBFetchException as e:
127 raise bb.build.FuncFailed(e)
128}
129
130def pkgarch_mapping(d):
131 # Compatibility mappings of TUNE_PKGARCH (opt in)
132 if d.getVar("PKGARCHCOMPAT_ARMV7A", True):
133 if d.getVar("TUNE_PKGARCH", True) == "armv7a-vfp-neon":
134 d.setVar("TUNE_PKGARCH", "armv7a")
135
136def preferred_ml_updates(d):
137 # If any PREFERRED_PROVIDER or PREFERRED_VERSION are set,
138 # we need to mirror these variables in the multilib case;
139 multilibs = d.getVar('MULTILIBS', True) or ""
140 if not multilibs:
141 return
142
143 prefixes = []
144 for ext in multilibs.split():
145 eext = ext.split(':')
146 if len(eext) > 1 and eext[0] == 'multilib':
147 prefixes.append(eext[1])
148
149 versions = []
150 providers = []
151 for v in d.keys():
152 if v.startswith("PREFERRED_VERSION_"):
153 versions.append(v)
154 if v.startswith("PREFERRED_PROVIDER_"):
155 providers.append(v)
156
157 for v in versions:
158 val = d.getVar(v, False)
159 pkg = v.replace("PREFERRED_VERSION_", "")
160 if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
161 continue
162 if 'cross-canadian' in pkg:
163 for p in prefixes:
164 localdata = bb.data.createCopy(d)
165 override = ":virtclass-multilib-" + p
166 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
167 bb.data.update_data(localdata)
168 newname = localdata.expand(v)
169 if newname != v:
170 newval = localdata.expand(val)
171 d.setVar(newname, newval)
172 # Avoid future variable key expansion
173 vexp = d.expand(v)
174 if v != vexp and d.getVar(v, False):
175 d.renameVar(v, vexp)
176 continue
177 for p in prefixes:
178 newname = "PREFERRED_VERSION_" + p + "-" + pkg
179 if not d.getVar(newname, False):
180 d.setVar(newname, val)
181
182 for prov in providers:
183 val = d.getVar(prov, False)
184 pkg = prov.replace("PREFERRED_PROVIDER_", "")
185 if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
186 continue
187 if 'cross-canadian' in pkg:
188 for p in prefixes:
189 localdata = bb.data.createCopy(d)
190 override = ":virtclass-multilib-" + p
191 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
192 bb.data.update_data(localdata)
193 newname = localdata.expand(prov)
194 if newname != prov:
195 newval = localdata.expand(val)
196 d.setVar(newname, newval)
197 # Avoid future variable key expansion
198 provexp = d.expand(prov)
199 if prov != provexp and d.getVar(prov, False):
200 d.renameVar(prov, provexp)
201 continue
202 virt = ""
203 if pkg.startswith("virtual/"):
204 pkg = pkg.replace("virtual/", "")
205 virt = "virtual/"
206 for p in prefixes:
207 if pkg != "kernel":
208 newval = p + "-" + val
209
210 # implement variable keys
211 localdata = bb.data.createCopy(d)
212 override = ":virtclass-multilib-" + p
213 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
214 bb.data.update_data(localdata)
215 newname = localdata.expand(prov)
216 if newname != prov and not d.getVar(newname, False):
217 d.setVar(newname, localdata.expand(newval))
218
219 # implement alternative multilib name
220 newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg)
221 if not d.getVar(newname, False):
222 d.setVar(newname, newval)
223 # Avoid future variable key expansion
224 provexp = d.expand(prov)
225 if prov != provexp and d.getVar(prov, False):
226 d.renameVar(prov, provexp)
227
228
229 mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
230 extramp = []
231 for p in mp:
232 if p.endswith(("-native", "-crosssdk")) or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
233 continue
234 virt = ""
235 if p.startswith("virtual/"):
236 p = p.replace("virtual/", "")
237 virt = "virtual/"
238 for pref in prefixes:
239 extramp.append(virt + pref + "-" + p)
240 d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
241
242
243def get_layers_branch_rev(d):
244 layers = (d.getVar("BBLAYERS", True) or "").split()
245 layers_branch_rev = ["%-17s = \"%s:%s\"" % (os.path.basename(i), \
246 base_get_metadata_git_branch(i, None).strip(), \
247 base_get_metadata_git_revision(i, None)) \
248 for i in layers]
249 i = len(layers_branch_rev)-1
250 p1 = layers_branch_rev[i].find("=")
251 s1 = layers_branch_rev[i][p1:]
252 while i > 0:
253 p2 = layers_branch_rev[i-1].find("=")
254 s2= layers_branch_rev[i-1][p2:]
255 if s1 == s2:
256 layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
257 i -= 1
258 else:
259 i -= 1
260 p1 = layers_branch_rev[i].find("=")
261 s1= layers_branch_rev[i][p1:]
262 return layers_branch_rev
263
264
265BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
266BUILDCFG_FUNCS[type] = "list"
267
268def buildcfg_vars(d):
269 statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
270 for var in statusvars:
271 value = d.getVar(var, True)
272 if value is not None:
273 yield '%-17s = "%s"' % (var, value)
274
275def buildcfg_neededvars(d):
276 needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
277 pesteruser = []
278 for v in needed_vars:
279 val = d.getVar(v, True)
280 if not val or val == 'INVALID':
281 pesteruser.append(v)
282
283 if pesteruser:
284 bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
285
286addhandler base_eventhandler
287base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.BuildStarted"
288python base_eventhandler() {
289 if isinstance(e, bb.event.ConfigParsed):
290 e.data.setVar("NATIVELSBSTRING", lsb_distro_identifier(e.data))
291 e.data.setVar('BB_VERSION', bb.__version__)
292 pkgarch_mapping(e.data)
293 preferred_ml_updates(e.data)
294 oe.utils.features_backfill("DISTRO_FEATURES", e.data)
295 oe.utils.features_backfill("MACHINE_FEATURES", e.data)
296
297 if isinstance(e, bb.event.BuildStarted):
298 localdata = bb.data.createCopy(e.data)
299 bb.data.update_data(localdata)
300 statuslines = []
301 for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
302 g = globals()
303 if func not in g:
304 bb.warn("Build configuration function '%s' does not exist" % func)
305 else:
306 flines = g[func](localdata)
307 if flines:
308 statuslines.extend(flines)
309
310 statusheader = e.data.getVar('BUILDCFG_HEADER', True)
311 bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
312}
313
314addtask configure after do_patch
315do_configure[dirs] = "${S} ${B}"
316do_configure[deptask] = "do_populate_sysroot"
317base_do_configure() {
318 :
319}
320
321addtask compile after do_configure
322do_compile[dirs] = "${S} ${B}"
323base_do_compile() {
324 if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
325 oe_runmake || die "make failed"
326 else
327 bbnote "nothing to compile"
328 fi
329}
330
331addtask install after do_compile
332do_install[dirs] = "${D} ${S} ${B}"
333# Remove and re-create ${D} so that is it guaranteed to be empty
334do_install[cleandirs] = "${D}"
335
336base_do_install() {
337 :
338}
339
340base_do_package() {
341 :
342}
343
344addtask build after do_populate_sysroot
345do_build = ""
346do_build[func] = "1"
347do_build[noexec] = "1"
348do_build[recrdeptask] += "do_deploy"
349do_build () {
350 :
351}
352
353def set_packagetriplet(d):
354 archs = []
355 tos = []
356 tvs = []
357
358 archs.append(d.getVar("PACKAGE_ARCHS", True).split())
359 tos.append(d.getVar("TARGET_OS", True))
360 tvs.append(d.getVar("TARGET_VENDOR", True))
361
362 def settriplet(d, varname, archs, tos, tvs):
363 triplets = []
364 for i in range(len(archs)):
365 for arch in archs[i]:
366 triplets.append(arch + tvs[i] + "-" + tos[i])
367 triplets.reverse()
368 d.setVar(varname, " ".join(triplets))
369
370 settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
371
372 variants = d.getVar("MULTILIB_VARIANTS", True) or ""
373 for item in variants.split():
374 localdata = bb.data.createCopy(d)
375 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
376 localdata.setVar("OVERRIDES", overrides)
377 bb.data.update_data(localdata)
378
379 archs.append(localdata.getVar("PACKAGE_ARCHS", True).split())
380 tos.append(localdata.getVar("TARGET_OS", True))
381 tvs.append(localdata.getVar("TARGET_VENDOR", True))
382
383 settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
384
385python () {
386 import string, re
387
388 # Handle PACKAGECONFIG
389 #
390 # These take the form:
391 #
392 # PACKAGECONFIG ??= "<default options>"
393 # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends"
394 pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
395 if pkgconfigflags:
396 pkgconfig = (d.getVar('PACKAGECONFIG', True) or "").split()
397 pn = d.getVar("PN", True)
398 mlprefix = d.getVar("MLPREFIX", True)
399
400 def expandFilter(appends, extension, prefix):
401 appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
402 newappends = []
403 for a in appends:
404 if a.endswith("-native") or a.endswith("-cross"):
405 newappends.append(a)
406 elif a.startswith("virtual/"):
407 subs = a.split("/", 1)[1]
408 newappends.append("virtual/" + prefix + subs + extension)
409 else:
410 if a.startswith(prefix):
411 newappends.append(a + extension)
412 else:
413 newappends.append(prefix + a + extension)
414 return newappends
415
416 def appendVar(varname, appends):
417 if not appends:
418 return
419 if varname.find("DEPENDS") != -1:
420 if pn.startswith("nativesdk-"):
421 appends = expandFilter(appends, "", "nativesdk-")
422 if pn.endswith("-native"):
423 appends = expandFilter(appends, "-native", "")
424 if mlprefix:
425 appends = expandFilter(appends, "", mlprefix)
426 varname = d.expand(varname)
427 d.appendVar(varname, " " + " ".join(appends))
428
429 extradeps = []
430 extrardeps = []
431 extraconf = []
432 for flag, flagval in sorted(pkgconfigflags.items()):
433 if flag == "defaultval":
434 continue
435 items = flagval.split(",")
436 num = len(items)
437 if num > 4:
438 bb.error("Only enable,disable,depend,rdepend can be specified!")
439
440 if flag in pkgconfig:
441 if num >= 3 and items[2]:
442 extradeps.append(items[2])
443 if num >= 4 and items[3]:
444 extrardeps.append(items[3])
445 if num >= 1 and items[0]:
446 extraconf.append(items[0])
447 elif num >= 2 and items[1]:
448 extraconf.append(items[1])
449 appendVar('DEPENDS', extradeps)
450 appendVar('RDEPENDS_${PN}', extrardeps)
451 if bb.data.inherits_class('cmake', d):
452 appendVar('EXTRA_OECMAKE', extraconf)
453 else:
454 appendVar('EXTRA_OECONF', extraconf)
455
456 # If PRINC is set, try and increase the PR value by the amount specified
457 # The PR server is now the preferred way to handle PR changes based on
458 # the checksum of the recipe (including bbappend). The PRINC is now
459 # obsolete. Return a warning to the user.
460 princ = d.getVar('PRINC', True)
461 if princ and princ != "0":
462 bb.warn("Use of PRINC %s was detected in the recipe %s (or one of its .bbappends)\nUse of PRINC is deprecated. The PR server should be used to automatically increment the PR. See: https://wiki.yoctoproject.org/wiki/PR_Service." % (princ, d.getVar("FILE", True)))
463 pr = d.getVar('PR', True)
464 pr_prefix = re.search("\D+",pr)
465 prval = re.search("\d+",pr)
466 if pr_prefix is None or prval is None:
467 bb.error("Unable to analyse format of PR variable: %s" % pr)
468 nval = int(prval.group(0)) + int(princ)
469 pr = pr_prefix.group(0) + str(nval) + pr[prval.end():]
470 d.setVar('PR', pr)
471
472 pn = d.getVar('PN', True)
473 license = d.getVar('LICENSE', True)
474 if license == "INVALID":
475 bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
476
477 if bb.data.inherits_class('license', d):
478 unmatched_license_flag = check_license_flags(d)
479 if unmatched_license_flag:
480 bb.debug(1, "Skipping %s because it has a restricted license not"
481 " whitelisted in LICENSE_FLAGS_WHITELIST" % pn)
482 raise bb.parse.SkipPackage("because it has a restricted license not"
483 " whitelisted in LICENSE_FLAGS_WHITELIST")
484
485 # If we're building a target package we need to use fakeroot (pseudo)
486 # in order to capture permissions, owners, groups and special files
487 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
488 d.setVarFlag('do_unpack', 'umask', '022')
489 d.setVarFlag('do_configure', 'umask', '022')
490 d.setVarFlag('do_compile', 'umask', '022')
491 d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
492 d.setVarFlag('do_install', 'fakeroot', 1)
493 d.setVarFlag('do_install', 'umask', '022')
494 d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
495 d.setVarFlag('do_package', 'fakeroot', 1)
496 d.setVarFlag('do_package', 'umask', '022')
497 d.setVarFlag('do_package_setscene', 'fakeroot', 1)
498 d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
499 d.setVarFlag('do_devshell', 'fakeroot', 1)
500 d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
501 source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', 0)
502 if not source_mirror_fetch:
503 need_host = d.getVar('COMPATIBLE_HOST', True)
504 if need_host:
505 import re
506 this_host = d.getVar('HOST_SYS', True)
507 if not re.match(need_host, this_host):
508 raise bb.parse.SkipPackage("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
509
510 need_machine = d.getVar('COMPATIBLE_MACHINE', True)
511 if need_machine:
512 import re
513 compat_machines = (d.getVar('MACHINEOVERRIDES', True) or "").split(":")
514 for m in compat_machines:
515 if re.match(need_machine, m):
516 break
517 else:
518 raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE', True))
519
520
521 bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
522
523 check_license = False if pn.startswith("nativesdk-") else True
524 for t in ["-native", "-cross", "-cross-initial", "-cross-intermediate",
525 "-crosssdk-intermediate", "-crosssdk", "-crosssdk-initial",
526 "-cross-canadian-" + d.getVar('TRANSLATED_TARGET_ARCH', True)]:
527 if pn.endswith(t):
528 check_license = False
529
530 if check_license and bad_licenses:
531 whitelist = []
532 for lic in bad_licenses:
533 for w in ["HOSTTOOLS_WHITELIST_", "LGPLv2_WHITELIST_", "WHITELIST_"]:
534 whitelist.extend((d.getVar(w + lic, True) or "").split())
535 spdx_license = return_spdx(d, lic)
536 if spdx_license:
537 whitelist.extend((d.getVar('HOSTTOOLS_WHITELIST_%s' % spdx_license, True) or "").split())
538 if not pn in whitelist:
539 recipe_license = d.getVar('LICENSE', True)
540 pkgs = d.getVar('PACKAGES', True).split()
541 skipped_pkgs = []
542 unskipped_pkgs = []
543 for pkg in pkgs:
544 if incompatible_license(d, bad_licenses, pkg):
545 skipped_pkgs.append(pkg)
546 else:
547 unskipped_pkgs.append(pkg)
548 all_skipped = skipped_pkgs and not unskipped_pkgs
549 if unskipped_pkgs:
550 for pkg in skipped_pkgs:
551 bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + recipe_license)
552 d.setVar('LICENSE_EXCLUSION-' + pkg, 1)
553 for pkg in unskipped_pkgs:
554 bb.debug(1, "INCLUDING the package " + pkg)
555 elif all_skipped or incompatible_license(d, bad_licenses):
556 bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, recipe_license))
557 raise bb.parse.SkipPackage("incompatible with license %s" % recipe_license)
558
559 srcuri = d.getVar('SRC_URI', True)
560 # Svn packages should DEPEND on subversion-native
561 if "svn://" in srcuri:
562 d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
563
564 # Git packages should DEPEND on git-native
565 if "git://" in srcuri:
566 d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
567
568 # Mercurial packages should DEPEND on mercurial-native
569 elif "hg://" in srcuri:
570 d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
571
572 # OSC packages should DEPEND on osc-native
573 elif "osc://" in srcuri:
574 d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
575
576 # *.lz4 should depends on lz4-native for unpacking
577 # Not endswith because of "*.patch.lz4;patch=1". Need bb.fetch.decodeurl in future
578 if '.lz4' in srcuri:
579 d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
580
581 # *.xz should depends on xz-native for unpacking
582 # Not endswith because of "*.patch.xz;patch=1". Need bb.fetch.decodeurl in future
583 if '.xz' in srcuri:
584 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
585
586 # unzip-native should already be staged before unpacking ZIP recipes
587 if ".zip" in srcuri:
588 d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
589
590 # file is needed by rpm2cpio.sh
591 if ".src.rpm" in srcuri:
592 d.appendVarFlag('do_unpack', 'depends', ' file-native:do_populate_sysroot')
593
594 set_packagetriplet(d)
595
596 # 'multimachine' handling
597 mach_arch = d.getVar('MACHINE_ARCH', True)
598 pkg_arch = d.getVar('PACKAGE_ARCH', True)
599
600 if (pkg_arch == mach_arch):
601 # Already machine specific - nothing further to do
602 return
603
604 #
605 # We always try to scan SRC_URI for urls with machine overrides
606 # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
607 #
608 override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', True)
609 if override != '0':
610 paths = []
611 fpaths = (d.getVar('FILESPATH', True) or '').split(':')
612 machine = d.getVar('MACHINE', True)
613 for p in fpaths:
614 if os.path.basename(p) == machine and os.path.isdir(p):
615 paths.append(p)
616
617 if len(paths) != 0:
618 for s in srcuri.split():
619 if not s.startswith("file://"):
620 continue
621 fetcher = bb.fetch2.Fetch([s], d)
622 local = fetcher.localpath(s)
623 for mp in paths:
624 if local.startswith(mp):
625 #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
626 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
627 return
628
629 packages = d.getVar('PACKAGES', True).split()
630 for pkg in packages:
631 pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg, True)
632
633 # We could look for != PACKAGE_ARCH here but how to choose
634 # if multiple differences are present?
635 # Look through PACKAGE_ARCHS for the priority order?
636 if pkgarch and pkgarch == mach_arch:
637 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
638 bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN", True))
639}
640
641addtask cleansstate after do_clean
642python do_cleansstate() {
643 sstate_clean_cachefiles(d)
644}
645
646addtask cleanall after do_cleansstate
647python do_cleanall() {
648 src_uri = (d.getVar('SRC_URI', True) or "").split()
649 if len(src_uri) == 0:
650 return
651
652 try:
653 fetcher = bb.fetch2.Fetch(src_uri, d)
654 fetcher.clean()
655 except bb.fetch2.BBFetchException, e:
656 raise bb.build.FuncFailed(e)
657}
658do_cleanall[nostamp] = "1"
659
660
661EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
new file mode 100644
index 0000000..a52b75b
--- /dev/null
+++ b/meta/classes/bin_package.bbclass
@@ -0,0 +1,36 @@
1#
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4#
5# Common variable and task for the binary package recipe.
6# Basic principle:
7# * The files have been unpacked to ${S} by base.bbclass
8# * Skip do_configure and do_compile
9# * Use do_install to install the files to ${D}
10#
11# Note:
12# The "subdir" parameter in the SRC_URI is useful when the input package
13# is rpm, ipk, deb and so on, for example:
14#
15# SRC_URI = "http://foo.com/foo-1.0-r1.i586.rpm;subdir=foo-1.0"
16#
17# Then the files would be unpacked to ${WORKDIR}/foo-1.0, otherwise
18# they would be in ${WORKDIR}.
19#
20
21# Skip the unwanted steps
22do_configure[noexec] = "1"
23do_compile[noexec] = "1"
24
25# Install the files to ${D}
26bin_package_do_install () {
27 # Do it carefully
28 [ -d "${S}" ] || exit 1
29 cd ${S} || exit 1
30 tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
31 | tar --no-same-owner -xpf - -C ${D}
32}
33
34FILES_${PN} = "/"
35
36EXPORT_FUNCTIONS do_install
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
new file mode 100644
index 0000000..7158c8c
--- /dev/null
+++ b/meta/classes/binconfig.bbclass
@@ -0,0 +1,63 @@
1FILES_${PN}-dev += "${bindir}/*-config"
2
3# The namespaces can clash here hence the two step replace
4def get_binconfig_mangle(d):
5 s = "-e ''"
6 if not bb.data.inherits_class('native', d):
7 optional_quote = r"\(\"\?\)"
8 s += " -e 's:=%s${base_libdir}:=\\1OEBASELIBDIR:;'" % optional_quote
9 s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
10 s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
11 s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
12 s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote
13 s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
14 s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
15 s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
16 s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'"
17 s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
18 s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
19 s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
20 s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
21 s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
22 s += " -e 's:-I${WORKDIR}:-I${STAGING_INCDIR}:'"
23 s += " -e 's:-L${WORKDIR}:-L${STAGING_LIBDIR}:'"
24 if bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d):
25 s += bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d)
26
27 return s
28
29BINCONFIG_GLOB ?= "*-config"
30
31PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
32
33binconfig_package_preprocess () {
34 for config in `find ${PKGD} -name '${BINCONFIG_GLOB}'`; do
35 sed -i \
36 -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
37 -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
38 -e 's:${STAGING_INCDIR}:${includedir}:g;' \
39 -e 's:${STAGING_DATADIR}:${datadir}:' \
40 -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
41 $config
42 done
43 for lafile in `find ${PKGD} -name "*.la"` ; do
44 sed -i \
45 -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
46 -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
47 -e 's:${STAGING_INCDIR}:${includedir}:g;' \
48 -e 's:${STAGING_DATADIR}:${datadir}:' \
49 -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
50 $lafile
51 done
52}
53
54SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
55
56binconfig_sysroot_preprocess () {
57 for config in `find ${S} -name '${BINCONFIG_GLOB}'` `find ${B} -name '${BINCONFIG_GLOB}'`; do
58 configname=`basename $config`
59 install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
60 cat $config | sed ${@get_binconfig_mangle(d)} > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
61 chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
62 done
63}
diff --git a/meta/classes/blacklist.bbclass b/meta/classes/blacklist.bbclass
new file mode 100644
index 0000000..a0141a8
--- /dev/null
+++ b/meta/classes/blacklist.bbclass
@@ -0,0 +1,45 @@
1# anonymous support class from originally from angstrom
2#
3# To use the blacklist, a distribution should include this
4# class in the INHERIT_DISTRO
5#
6# No longer use ANGSTROM_BLACKLIST, instead use a table of
7# recipes in PNBLACKLIST
8#
9# Features:
10#
11# * To add a package to the blacklist, set:
12# PNBLACKLIST[pn] = "message"
13#
14
15# Cope with PNBLACKLIST flags for multilib case
16addhandler blacklist_multilib_eventhandler
17blacklist_multilib_eventhandler[eventmask] = "bb.event.ConfigParsed"
18python blacklist_multilib_eventhandler() {
19 multilibs = e.data.getVar('MULTILIBS', True)
20 if not multilibs:
21 return
22
23 # this block has been copied from base.bbclass so keep it in sync
24 prefixes = []
25 for ext in multilibs.split():
26 eext = ext.split(':')
27 if len(eext) > 1 and eext[0] == 'multilib':
28 prefixes.append(eext[1])
29
30 blacklists = e.data.getVarFlags('PNBLACKLIST') or {}
31 for pkg, reason in blacklists.items():
32 if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in pkg:
33 continue
34 for p in prefixes:
35 newpkg = p + "-" + pkg
36 if not e.data.getVarFlag('PNBLACKLIST', newpkg, True):
37 e.data.setVarFlag('PNBLACKLIST', newpkg, reason)
38}
39
40python () {
41 blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN', True), True)
42
43 if blacklist:
44 raise bb.parse.SkipPackage("Recipe is blacklisted: %s" % (blacklist))
45}
diff --git a/meta/classes/boot-directdisk.bbclass b/meta/classes/boot-directdisk.bbclass
new file mode 100644
index 0000000..88e5c52
--- /dev/null
+++ b/meta/classes/boot-directdisk.bbclass
@@ -0,0 +1,182 @@
1# boot-directdisk.bbclass
2# (loosly based off bootimg.bbclass Copyright (C) 2004, Advanced Micro Devices, Inc.)
3#
4# Create an image which can be placed directly onto a harddisk using dd and then
5# booted.
6#
7# This uses syslinux. extlinux would have been nice but required the ext2/3
8# partition to be mounted. grub requires to run itself as part of the install
9# process.
10#
11# The end result is a 512 boot sector populated with an MBR and partition table
12# followed by an msdos fat16 partition containing syslinux and a linux kernel
13# completed by the ext2/3 rootfs.
14#
15# We have to push the msdos parition table size > 16MB so fat 16 is used as parted
16# won't touch fat12 partitions.
17
18# External variables needed
19
20# ${ROOTFS} - the rootfs image to incorporate
21
22do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
23 syslinux:do_populate_sysroot \
24 syslinux-native:do_populate_sysroot \
25 parted-native:do_populate_sysroot \
26 mtools-native:do_populate_sysroot "
27
28PACKAGES = " "
29EXCLUDE_FROM_WORLD = "1"
30
31BOOTDD_VOLUME_ID ?= "boot"
32BOOTDD_EXTRA_SPACE ?= "16384"
33
34EFI = "${@base_contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
35EFI_PROVIDER ?= "grub-efi"
36EFI_CLASS = "${@base_contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
37
38# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
39# contain "efi". This way legacy is supported by default if neither is
40# specified, maintaining the original behavior.
41def pcbios(d):
42 pcbios = base_contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
43 if pcbios == "0":
44 pcbios = base_contains("MACHINE_FEATURES", "efi", "0", "1", d)
45 return pcbios
46
47def pcbios_class(d):
48 if d.getVar("PCBIOS", True) == "1":
49 return "syslinux"
50 return ""
51
52PCBIOS = "${@pcbios(d)}"
53PCBIOS_CLASS = "${@pcbios_class(d)}"
54
55inherit ${PCBIOS_CLASS}
56inherit ${EFI_CLASS}
57
58# Get the build_syslinux_cfg() function from the syslinux class
59
60AUTO_SYSLINUXCFG = "1"
61DISK_SIGNATURE ?= "${DISK_SIGNATURE_GENERATED}"
62SYSLINUX_ROOT ?= "root=/dev/sda2"
63SYSLINUX_TIMEOUT ?= "10"
64
65IS_VMDK = '${@base_contains("IMAGE_FSTYPES", "vmdk", "true", "false", d)}'
66
67boot_direct_populate() {
68 dest=$1
69 install -d $dest
70
71 # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
72 install -m 0644 ${STAGING_KERNEL_DIR}/bzImage $dest/vmlinuz
73
74 if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
75 install -m 0644 ${INITRD} $dest/initrd
76 fi
77
78}
79
80build_boot_dd() {
81 HDDDIR="${S}/hdd/boot"
82 HDDIMG="${S}/hdd.image"
83 IMAGE=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect
84
85 boot_direct_populate $HDDDIR
86
87 if [ "${PCBIOS}" = "1" ]; then
88 syslinux_hddimg_populate $HDDDIR
89 fi
90 if [ "${EFI}" = "1" ]; then
91 efi_hddimg_populate $HDDDIR
92 fi
93
94 if [ "${IS_VMDK}" = "true" ]; then
95 if [ "x${AUTO_SYSLINUXMENU}" = "x1" ] ; then
96 install -m 0644 ${STAGING_DIR}/${MACHINE}/usr/share/syslinux/vesamenu.c32 ${HDDDIR}${SYSLINUXDIR}/vesamenu.c32
97 if [ "x${SYSLINUX_SPLASH}" != "x" ] ; then
98 install -m 0644 ${SYSLINUX_SPLASH} ${HDDDIR}${SYSLINUXDIR}/splash.lss
99 fi
100 fi
101 fi
102
103 BLOCKS=`du -bks $HDDDIR | cut -f 1`
104 BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
105
106 # Ensure total sectors is an integral number of sectors per
107 # track or mcopy will complain. Sectors are 512 bytes, and we
108 # generate images with 32 sectors per track. This calculation is
109 # done in blocks, thus the mod by 16 instead of 32.
110 BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
111
112 mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C $HDDIMG $BLOCKS
113 mcopy -i $HDDIMG -s $HDDDIR/* ::/
114
115 if [ "${PCBIOS}" = "1" ]; then
116 syslinux_hdddirect_install $HDDIMG
117 fi
118 chmod 644 $HDDIMG
119
120 ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1`
121 TOTALSIZE=`expr $BLOCKS + $ROOTFSBLOCKS`
122 END1=`expr $BLOCKS \* 1024`
123 END2=`expr $END1 + 512`
124 END3=`expr \( $ROOTFSBLOCKS \* 1024 \) + $END1`
125
126 echo $ROOTFSBLOCKS $TOTALSIZE $END1 $END2 $END3
127 rm -rf $IMAGE
128 dd if=/dev/zero of=$IMAGE bs=1024 seek=$TOTALSIZE count=1
129
130 parted $IMAGE mklabel msdos
131 parted $IMAGE mkpart primary fat16 0 ${END1}B
132 parted $IMAGE unit B mkpart primary ext2 ${END2}B ${END3}B
133 parted $IMAGE set 1 boot on
134
135 parted $IMAGE print
136
137 awk "BEGIN { printf \"$(echo ${DISK_SIGNATURE} | fold -w 2 | tac | paste -sd '' | sed 's/\(..\)/\\x&/g')\" }" | \
138 dd of=$IMAGE bs=1 seek=440 conv=notrunc
139
140 OFFSET=`expr $END2 / 512`
141 if [ "${PCBIOS}" = "1" ]; then
142 dd if=${STAGING_DATADIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
143 fi
144
145 dd if=$HDDIMG of=$IMAGE conv=notrunc seek=1 bs=512
146 dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
147
148 cd ${DEPLOY_DIR_IMAGE}
149 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
150 ln -s ${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
151}
152
153python do_bootdirectdisk() {
154 validate_disk_signature(d)
155 if d.getVar("PCBIOS", True) == "1":
156 bb.build.exec_func('build_syslinux_cfg', d)
157 if d.getVar("EFI", True) == "1":
158 bb.build.exec_func('build_efi_cfg', d)
159 bb.build.exec_func('build_boot_dd', d)
160}
161
162def generate_disk_signature():
163 import uuid
164
165 signature = str(uuid.uuid4())[:8]
166
167 if signature != '00000000':
168 return signature
169 else:
170 return 'ffffffff'
171
172def validate_disk_signature(d):
173 import re
174
175 disk_signature = d.getVar("DISK_SIGNATURE", True)
176
177 if not re.match(r'^[0-9a-fA-F]{8}$', disk_signature):
178 bb.fatal("DISK_SIGNATURE '%s' must be an 8 digit hex string" % disk_signature)
179
180DISK_SIGNATURE_GENERATED := "${@generate_disk_signature()}"
181
182addtask bootdirectdisk before do_build
diff --git a/meta/classes/bootimg.bbclass b/meta/classes/bootimg.bbclass
new file mode 100644
index 0000000..b13eef9
--- /dev/null
+++ b/meta/classes/bootimg.bbclass
@@ -0,0 +1,240 @@
1# Copyright (C) 2004, Advanced Micro Devices, Inc. All Rights Reserved
2# Released under the MIT license (see packages/COPYING)
3
4# Creates a bootable image using syslinux, your kernel and an optional
5# initrd
6
7#
8# End result is two things:
9#
10# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
11# an initrd and a rootfs image. These can be written to harddisks directly and
12# also booted on USB flash disks (write them there with dd).
13#
14# 2. A CD .iso image
15
16# Boot process is that the initrd will boot and process which label was selected
17# in syslinux. Actions based on the label are then performed (e.g. installing to
18# an hdd)
19
20# External variables (also used by syslinux.bbclass)
21# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
22# ${COMPRESSISO} - Transparent compress ISO, reduce size ~40% if set to 1
23# ${NOISO} - skip building the ISO image if set to 1
24# ${NOHDD} - skip building the HDD image if set to 1
25# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
26
27do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
28 mtools-native:do_populate_sysroot \
29 cdrtools-native:do_populate_sysroot \
30 ${@oe.utils.ifelse(d.getVar('COMPRESSISO'),'zisofs-tools-native:do_populate_sysroot','')}"
31
32PACKAGES = " "
33EXCLUDE_FROM_WORLD = "1"
34
35HDDDIR = "${S}/hddimg"
36ISODIR = "${S}/iso"
37EFIIMGDIR = "${S}/efi_img"
38COMPACT_ISODIR = "${S}/iso.z"
39COMPRESSISO ?= "0"
40
41BOOTIMG_VOLUME_ID ?= "boot"
42BOOTIMG_EXTRA_SPACE ?= "512"
43
44EFI = "${@base_contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
45EFI_PROVIDER ?= "grub-efi"
46EFI_CLASS = "${@base_contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
47
48# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
49# contain "efi". This way legacy is supported by default if neither is
50# specified, maintaining the original behavior.
51def pcbios(d):
52 pcbios = base_contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
53 if pcbios == "0":
54 pcbios = base_contains("MACHINE_FEATURES", "efi", "0", "1", d)
55 return pcbios
56
57PCBIOS = "${@pcbios(d)}"
58
59# The syslinux is required for the isohybrid command and boot catalog
60inherit syslinux
61inherit ${EFI_CLASS}
62
63populate() {
64 DEST=$1
65 install -d ${DEST}
66
67 # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
68 install -m 0644 ${STAGING_KERNEL_DIR}/bzImage ${DEST}/vmlinuz
69
70 if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
71 install -m 0644 ${INITRD} ${DEST}/initrd
72 fi
73
74 if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
75 install -m 0644 ${ROOTFS} ${DEST}/rootfs.img
76 fi
77
78}
79
80build_iso() {
81 # Only create an ISO if we have an INITRD and NOISO was not set
82 if [ -z "${INITRD}" ] || [ ! -s "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
83 bbnote "ISO image will not be created."
84 return
85 fi
86
87 populate ${ISODIR}
88
89 if [ "${PCBIOS}" = "1" ]; then
90 syslinux_iso_populate ${ISODIR}
91 fi
92 if [ "${EFI}" = "1" ]; then
93 efi_iso_populate ${ISODIR}
94 build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img
95 fi
96
97 # EFI only
98 if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then
99 # Work around bug in isohybrid where it requires isolinux.bin
100 # In the boot catalog, even though it is not used
101 mkdir -p ${ISODIR}/${ISOLINUXDIR}
102 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
103 fi
104
105 if [ "${COMPRESSISO}" = "1" ] ; then
106 # create compact directory, compress iso
107 mkdir -p ${COMPACT_ISODIR}
108 mkzftree -z 9 -p 4 -F ${ISODIR}/rootfs.img ${COMPACT_ISODIR}/rootfs.img
109
110 # move compact iso to iso, then remove compact directory
111 mv ${COMPACT_ISODIR}/rootfs.img ${ISODIR}/rootfs.img
112 rm -Rf ${COMPACT_ISODIR}
113 mkisofs_compress_opts="-R -z -D -l"
114 else
115 mkisofs_compress_opts="-r"
116 fi
117
118 if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
119 # PCBIOS only media
120 mkisofs -V ${BOOTIMG_VOLUME_ID} \
121 -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
122 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
123 $mkisofs_compress_opts \
124 ${MKISOFS_OPTIONS} ${ISODIR}
125 else
126 # EFI only OR EFI+PCBIOS
127 mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
128 -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
129 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
130 $mkisofs_compress_opts ${MKISOFS_OPTIONS} \
131 -eltorito-alt-boot -eltorito-platform efi \
132 -b efi.img -no-emul-boot \
133 ${ISODIR}
134 isohybrid_args="-u"
135 fi
136
137 isohybrid $isohybrid_args ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso
138
139 cd ${DEPLOY_DIR_IMAGE}
140 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
141 ln -s ${IMAGE_NAME}.iso ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
142}
143
144build_fat_img() {
145 FATSOURCEDIR=$1
146 FATIMG=$2
147
148 # Calculate the size required for the final image including the
149 # data and filesystem overhead.
150 # Sectors: 512 bytes
151 # Blocks: 1024 bytes
152
153 # Determine the sector count just for the data
154 SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
155
156 # Account for the filesystem overhead. This includes directory
157 # entries in the clusters as well as the FAT itself.
158 # Assumptions:
159 # FAT32 (12 or 16 may be selected by mkdosfs, but the extra
160 # padding will be minimal on those smaller images and not
161 # worth the logic here to caclulate the smaller FAT sizes)
162 # < 16 entries per directory
163 # 8.3 filenames only
164
165 # 32 bytes per dir entry
166 DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
167 # 32 bytes for every end-of-directory dir entry
168 DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
169 # 4 bytes per FAT entry per sector of data
170 FAT_BYTES=$(expr $SECTORS \* 4)
171 # 4 bytes per FAT entry per end-of-cluster list
172 FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
173
174 # Use a ceiling function to determine FS overhead in sectors
175 DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
176 # There are two FATs on the image
177 FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
178 SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
179
180 # Determine the final size in blocks accounting for some padding
181 BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
182
183 # Ensure total sectors is an integral number of sectors per
184 # track or mcopy will complain. Sectors are 512 bytes, and we
185 # generate images with 32 sectors per track. This calculation is
186 # done in blocks, thus the mod by 16 instead of 32.
187 BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
188
189 # mkdosfs will sometimes use FAT16 when it is not appropriate,
190 # resulting in a boot failure from SYSLINUX. Use FAT32 for
191 # images larger than 512MB, otherwise let mkdosfs decide.
192 if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
193 FATSIZE="-F 32"
194 fi
195
196 mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} ${BLOCKS}
197 # Copy FATSOURCEDIR recursively into the image file directly
198 mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
199}
200
201build_hddimg() {
202 # Create an HDD image
203 if [ "${NOHDD}" != "1" ] ; then
204 populate ${HDDDIR}
205
206 if [ "${PCBIOS}" = "1" ]; then
207 syslinux_hddimg_populate ${HDDDIR}
208 fi
209 if [ "${EFI}" = "1" ]; then
210 efi_hddimg_populate ${HDDDIR}
211 fi
212
213 build_fat_img ${HDDDIR} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
214
215 if [ "${PCBIOS}" = "1" ]; then
216 syslinux_hddimg_install
217 fi
218
219 chmod 644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
220
221 cd ${DEPLOY_DIR_IMAGE}
222 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
223 ln -s ${IMAGE_NAME}.hddimg ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
224 fi
225}
226
227python do_bootimg() {
228 if d.getVar("PCBIOS", True) == "1":
229 bb.build.exec_func('build_syslinux_cfg', d)
230 if d.getVar("EFI", True) == "1":
231 bb.build.exec_func('build_efi_cfg', d)
232 bb.build.exec_func('build_hddimg', d)
233 bb.build.exec_func('build_iso', d)
234}
235
236IMAGE_TYPEDEP_iso = "ext3"
237IMAGE_TYPEDEP_hddimg = "ext3"
238IMAGE_TYPES_MASKED += "iso hddimg"
239
240addtask bootimg before do_build
diff --git a/meta/classes/bugzilla.bbclass b/meta/classes/bugzilla.bbclass
new file mode 100644
index 0000000..3fc8956
--- /dev/null
+++ b/meta/classes/bugzilla.bbclass
@@ -0,0 +1,187 @@
1#
2# Small event handler to automatically open URLs and file
3# bug reports at a bugzilla of your choiche
4# it uses XML-RPC interface, so you must have it enabled
5#
6# Before using you must define BUGZILLA_USER, BUGZILLA_PASS credentials,
7# BUGZILLA_XMLRPC - uri of xmlrpc.cgi,
8# BUGZILLA_PRODUCT, BUGZILLA_COMPONENT - a place in BTS for build bugs
9# BUGZILLA_VERSION - version against which to report new bugs
10#
11
12def bugzilla_find_bug_report(debug_file, server, args, bugname):
13 args['summary'] = bugname
14 bugs = server.Bug.search(args)
15 if len(bugs['bugs']) == 0:
16 print >> debug_file, "Bugs not found"
17 return (False,None)
18 else: # silently pick the first result
19 print >> debug_file, "Result of bug search is "
20 print >> debug_file, bugs
21 status = bugs['bugs'][0]['status']
22 id = bugs['bugs'][0]['id']
23 return (not status in ["CLOSED", "RESOLVED", "VERIFIED"],id)
24
25def bugzilla_file_bug(debug_file, server, args, name, text, version):
26 args['summary'] = name
27 args['comment'] = text
28 args['version'] = version
29 args['op_sys'] = 'Linux'
30 args['platform'] = 'Other'
31 args['severity'] = 'normal'
32 args['priority'] = 'Normal'
33 try:
34 return server.Bug.create(args)['id']
35 except Exception, e:
36 print >> debug_file, repr(e)
37 return None
38
39def bugzilla_reopen_bug(debug_file, server, args, bug_number):
40 args['ids'] = [bug_number]
41 args['status'] = "CONFIRMED"
42 try:
43 server.Bug.update(args)
44 return True
45 except Exception, e:
46 print >> debug_file, repr(e)
47 return False
48
49def bugzilla_create_attachment(debug_file, server, args, bug_number, text, file_name, log, logdescription):
50 args['ids'] = [bug_number]
51 args['file_name'] = file_name
52 args['summary'] = logdescription
53 args['content_type'] = "text/plain"
54 args['data'] = log
55 args['comment'] = text
56 try:
57 server.Bug.add_attachment(args)
58 return True
59 except Exception, e:
60 print >> debug_file, repr(e)
61 return False
62
63def bugzilla_add_comment(debug_file, server, args, bug_number, text):
64 args['id'] = bug_number
65 args['comment'] = text
66 try:
67 server.Bug.add_comment(args)
68 return True
69 except Exception, e:
70 print >> debug_file, repr(e)
71 return False
72
73addhandler bugzilla_eventhandler
74bugzilla_eventhandler[eventmask] = "bb.event.MsgNote bb.build.TaskFailed"
75python bugzilla_eventhandler() {
76 import glob
77 import xmlrpclib, httplib
78
79 class ProxiedTransport(xmlrpclib.Transport):
80 def __init__(self, proxy, use_datetime = 0):
81 xmlrpclib.Transport.__init__(self, use_datetime)
82 self.proxy = proxy
83 self.user = None
84 self.password = None
85
86 def set_user(self, user):
87 self.user = user
88
89 def set_password(self, password):
90 self.password = password
91
92 def make_connection(self, host):
93 self.realhost = host
94 return httplib.HTTP(self.proxy)
95
96 def send_request(self, connection, handler, request_body):
97 connection.putrequest("POST", 'http://%s%s' % (self.realhost, handler))
98 if self.user != None:
99 if self.password != None:
100 auth = "%s:%s" % (self.user, self.password)
101 else:
102 auth = self.user
103 connection.putheader("Proxy-authorization", "Basic " + base64.encodestring(auth))
104
105 event = e
106 data = e.data
107 name = bb.event.getName(event)
108 if name == "MsgNote":
109 # avoid recursion
110 return
111
112 if name == "TaskFailed":
113 xmlrpc = data.getVar("BUGZILLA_XMLRPC", True)
114 user = data.getVar("BUGZILLA_USER", True)
115 passw = data.getVar("BUGZILLA_PASS", True)
116 product = data.getVar("BUGZILLA_PRODUCT", True)
117 compon = data.getVar("BUGZILLA_COMPONENT", True)
118 version = data.getVar("BUGZILLA_VERSION", True)
119
120 proxy = data.getVar('http_proxy', True )
121 if (proxy):
122 import urllib2
123 s, u, p, hostport = urllib2._parse_proxy(proxy)
124 transport = ProxiedTransport(hostport)
125 else:
126 transport = None
127
128 server = xmlrpclib.ServerProxy(xmlrpc, transport=transport, verbose=0)
129 args = {
130 'Bugzilla_login': user,
131 'Bugzilla_password': passw,
132 'product': product,
133 'component': compon}
134
135 # evil hack to figure out what is going on
136 debug_file = open(os.path.join(data.getVar("TMPDIR", True),"..","bugzilla-log"),"a")
137
138 file = None
139 bugname = "%(package)s-%(pv)s-autobuild" % { "package" : data.getVar("PN", True),
140 "pv" : data.getVar("PV", True),
141 }
142 log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
143 text = "The %s step in %s failed at %s for machine %s" % (e.task, data.getVar("PN", True), data.getVar('DATETIME', True), data.getVar( 'MACHINE', True ) )
144 if len(log_file) != 0:
145 print >> debug_file, "Adding log file %s" % log_file[0]
146 file = open(log_file[0], 'r')
147 log = file.read()
148 file.close();
149 else:
150 print >> debug_file, "No log file found for the glob"
151 log = None
152
153 (bug_open, bug_number) = bugzilla_find_bug_report(debug_file, server, args.copy(), bugname)
154 print >> debug_file, "Bug is open: %s and bug number: %s" % (bug_open, bug_number)
155
156 # The bug is present and still open, attach an error log
157 if not bug_number:
158 bug_number = bugzilla_file_bug(debug_file, server, args.copy(), bugname, text, version)
159 if not bug_number:
160 print >> debug_file, "Couldn't acquire a new bug_numer, filing a bugreport failed"
161 else:
162 print >> debug_file, "The new bug_number: '%s'" % bug_number
163 elif not bug_open:
164 if not bugzilla_reopen_bug(debug_file, server, args.copy(), bug_number):
165 print >> debug_file, "Failed to reopen the bug #%s" % bug_number
166 else:
167 print >> debug_file, "Reopened the bug #%s" % bug_number
168
169 if bug_number and log:
170 print >> debug_file, "The bug is known as '%s'" % bug_number
171 desc = "Build log for machine %s" % (data.getVar('MACHINE', True))
172 if not bugzilla_create_attachment(debug_file, server, args.copy(), bug_number, text, log_file[0], log, desc):
173 print >> debug_file, "Failed to attach the build log for bug #%s" % bug_number
174 else:
175 print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
176 else:
177 print >> debug_file, "Not trying to create an attachment for bug #%s" % bug_number
178 if not bugzilla_add_comment(debug_file, server, args.copy(), bug_number, text, ):
179 print >> debug_file, "Failed to create a comment the build log for bug #%s" % bug_number
180 else:
181 print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
182
183 # store bug number for oestats-client
184 if bug_number:
185 data.setVar('OESTATS_BUG_NUMBER', bug_number)
186}
187
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
new file mode 100644
index 0000000..262095f
--- /dev/null
+++ b/meta/classes/buildhistory.bbclass
@@ -0,0 +1,684 @@
1#
2# Records history of build output in order to detect regressions
3#
4# Based in part on testlab.bbclass and packagehistory.bbclass
5#
6# Copyright (C) 2011-2014 Intel Corporation
7# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
8#
9
10BUILDHISTORY_FEATURES ?= "image package sdk"
11BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory"
12BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}"
13BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}/${PN}"
14BUILDHISTORY_DIR_SDK = "${BUILDHISTORY_DIR}/sdk/${SDK_NAME}/${IMAGE_BASENAME}"
15BUILDHISTORY_IMAGE_FILES ?= "/etc/passwd /etc/group"
16BUILDHISTORY_COMMIT ?= "0"
17BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
18BUILDHISTORY_PUSH_REPO ?= ""
19
20SSTATEPOSTINSTFUNCS += "buildhistory_emit_pkghistory"
21# We want to avoid influence the signatures of sstate tasks - first the function itself:
22sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
23# then the value added to SSTATEPOSTINSTFUNCS:
24SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
25
26#
27# Write out metadata about this package for comparision when writing future packages
28#
29python buildhistory_emit_pkghistory() {
30 if not d.getVar('BB_CURRENTTASK', True) in ['packagedata', 'packagedata_setscene']:
31 return 0
32
33 if not "package" in (d.getVar('BUILDHISTORY_FEATURES', True) or "").split():
34 return 0
35
36 import re
37 import json
38 import errno
39
40 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
41
42 class RecipeInfo:
43 def __init__(self, name):
44 self.name = name
45 self.pe = "0"
46 self.pv = "0"
47 self.pr = "r0"
48 self.depends = ""
49 self.packages = ""
50 self.srcrev = ""
51
52
53 class PackageInfo:
54 def __init__(self, name):
55 self.name = name
56 self.pe = "0"
57 self.pv = "0"
58 self.pr = "r0"
59 # pkg/pkge/pkgv/pkgr should be empty because we want to be able to default them
60 self.pkg = ""
61 self.pkge = ""
62 self.pkgv = ""
63 self.pkgr = ""
64 self.size = 0
65 self.depends = ""
66 self.rprovides = ""
67 self.rdepends = ""
68 self.rrecommends = ""
69 self.rsuggests = ""
70 self.rreplaces = ""
71 self.rconflicts = ""
72 self.files = ""
73 self.filelist = ""
74 # Variables that need to be written to their own separate file
75 self.filevars = dict.fromkeys(['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'])
76
77 # Should check PACKAGES here to see if anything removed
78
79 def readPackageInfo(pkg, histfile):
80 pkginfo = PackageInfo(pkg)
81 with open(histfile, "r") as f:
82 for line in f:
83 lns = line.split('=')
84 name = lns[0].strip()
85 value = lns[1].strip(" \t\r\n").strip('"')
86 if name == "PE":
87 pkginfo.pe = value
88 elif name == "PV":
89 pkginfo.pv = value
90 elif name == "PR":
91 pkginfo.pr = value
92 elif name == "PKG":
93 pkginfo.pkg = value
94 elif name == "PKGE":
95 pkginfo.pkge = value
96 elif name == "PKGV":
97 pkginfo.pkgv = value
98 elif name == "PKGR":
99 pkginfo.pkgr = value
100 elif name == "RPROVIDES":
101 pkginfo.rprovides = value
102 elif name == "RDEPENDS":
103 pkginfo.rdepends = value
104 elif name == "RRECOMMENDS":
105 pkginfo.rrecommends = value
106 elif name == "RSUGGESTS":
107 pkginfo.rsuggests = value
108 elif name == "RREPLACES":
109 pkginfo.rreplaces = value
110 elif name == "RCONFLICTS":
111 pkginfo.rconflicts = value
112 elif name == "PKGSIZE":
113 pkginfo.size = long(value)
114 elif name == "FILES":
115 pkginfo.files = value
116 elif name == "FILELIST":
117 pkginfo.filelist = value
118 # Apply defaults
119 if not pkginfo.pkg:
120 pkginfo.pkg = pkginfo.name
121 if not pkginfo.pkge:
122 pkginfo.pkge = pkginfo.pe
123 if not pkginfo.pkgv:
124 pkginfo.pkgv = pkginfo.pv
125 if not pkginfo.pkgr:
126 pkginfo.pkgr = pkginfo.pr
127 return pkginfo
128
129 def getlastpkgversion(pkg):
130 try:
131 histfile = os.path.join(pkghistdir, pkg, "latest")
132 return readPackageInfo(pkg, histfile)
133 except EnvironmentError:
134 return None
135
136 def sortpkglist(string):
137 pkgiter = re.finditer(r'[a-zA-Z0-9.+-]+( \([><=]+ [^ )]+\))?', string, 0)
138 pkglist = [p.group(0) for p in pkgiter]
139 pkglist.sort()
140 return ' '.join(pkglist)
141
142 def sortlist(string):
143 items = string.split(' ')
144 items.sort()
145 return ' '.join(items)
146
147 pn = d.getVar('PN', True)
148 pe = d.getVar('PE', True) or "0"
149 pv = d.getVar('PV', True)
150 pr = d.getVar('PR', True)
151
152 pkgdata_dir = d.getVar('PKGDATA_DIR', True)
153 packages = ""
154 try:
155 with open(os.path.join(pkgdata_dir, pn)) as f:
156 for line in f.readlines():
157 if line.startswith('PACKAGES: '):
158 packages = squashspaces(line.split(': ', 1)[1])
159 break
160 except IOError as e:
161 if e.errno == errno.ENOENT:
162 # Probably a -cross recipe, just ignore
163 return 0
164 else:
165 raise
166
167 packagelist = packages.split()
168 if not os.path.exists(pkghistdir):
169 bb.utils.mkdirhier(pkghistdir)
170 else:
171 # Remove files for packages that no longer exist
172 for item in os.listdir(pkghistdir):
173 if item != "latest" and item != "latest_srcrev":
174 if item not in packagelist:
175 subdir = os.path.join(pkghistdir, item)
176 for subfile in os.listdir(subdir):
177 os.unlink(os.path.join(subdir, subfile))
178 os.rmdir(subdir)
179
180 rcpinfo = RecipeInfo(pn)
181 rcpinfo.pe = pe
182 rcpinfo.pv = pv
183 rcpinfo.pr = pr
184 rcpinfo.depends = sortlist(squashspaces(d.getVar('DEPENDS', True) or ""))
185 rcpinfo.packages = packages
186 write_recipehistory(rcpinfo, d)
187
188 pkgdest = d.getVar('PKGDEST', True)
189 for pkg in packagelist:
190 pkgdata = {}
191 with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
192 for line in f.readlines():
193 item = line.rstrip('\n').split(': ', 1)
194 key = item[0]
195 if key.endswith('_' + pkg):
196 key = key[:-len(pkg)-1]
197 pkgdata[key] = item[1].decode('utf-8').decode('string_escape')
198
199 pkge = pkgdata.get('PKGE', '0')
200 pkgv = pkgdata['PKGV']
201 pkgr = pkgdata['PKGR']
202 #
203 # Find out what the last version was
204 # Make sure the version did not decrease
205 #
206 lastversion = getlastpkgversion(pkg)
207 if lastversion:
208 last_pkge = lastversion.pkge
209 last_pkgv = lastversion.pkgv
210 last_pkgr = lastversion.pkgr
211 r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
212 if r < 0:
213 msg = "Package version for package %s went backwards which would break package feeds from (%s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
214 package_qa_handle_error("version-going-backwards", msg, d)
215
216 pkginfo = PackageInfo(pkg)
217 # Apparently the version can be different on a per-package basis (see Python)
218 pkginfo.pe = pkgdata.get('PE', '0')
219 pkginfo.pv = pkgdata['PV']
220 pkginfo.pr = pkgdata['PR']
221 pkginfo.pkg = pkgdata['PKG']
222 pkginfo.pkge = pkge
223 pkginfo.pkgv = pkgv
224 pkginfo.pkgr = pkgr
225 pkginfo.rprovides = sortpkglist(squashspaces(pkgdata.get('RPROVIDES', "")))
226 pkginfo.rdepends = sortpkglist(squashspaces(pkgdata.get('RDEPENDS', "")))
227 pkginfo.rrecommends = sortpkglist(squashspaces(pkgdata.get('RRECOMMENDS', "")))
228 pkginfo.rsuggests = sortpkglist(squashspaces(pkgdata.get('RSUGGESTS', "")))
229 pkginfo.rreplaces = sortpkglist(squashspaces(pkgdata.get('RREPLACES', "")))
230 pkginfo.rconflicts = sortpkglist(squashspaces(pkgdata.get('RCONFLICTS', "")))
231 pkginfo.files = squashspaces(pkgdata.get('FILES', ""))
232 for filevar in pkginfo.filevars:
233 pkginfo.filevars[filevar] = pkgdata.get(filevar, "")
234
235 # Gather information about packaged files
236 val = pkgdata.get('FILES_INFO', '')
237 dictval = json.loads(val)
238 filelist = dictval.keys()
239 filelist.sort()
240 pkginfo.filelist = " ".join(filelist)
241
242 pkginfo.size = int(pkgdata['PKGSIZE'])
243
244 write_pkghistory(pkginfo, d)
245}
246
247
248def write_recipehistory(rcpinfo, d):
249 import codecs
250
251 bb.debug(2, "Writing recipe history")
252
253 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
254
255 infofile = os.path.join(pkghistdir, "latest")
256 with codecs.open(infofile, "w", encoding='utf8') as f:
257 if rcpinfo.pe != "0":
258 f.write(u"PE = %s\n" % rcpinfo.pe)
259 f.write(u"PV = %s\n" % rcpinfo.pv)
260 f.write(u"PR = %s\n" % rcpinfo.pr)
261 f.write(u"DEPENDS = %s\n" % rcpinfo.depends)
262 f.write(u"PACKAGES = %s\n" % rcpinfo.packages)
263
264
265def write_pkghistory(pkginfo, d):
266 import codecs
267
268 bb.debug(2, "Writing package history for package %s" % pkginfo.name)
269
270 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
271
272 pkgpath = os.path.join(pkghistdir, pkginfo.name)
273 if not os.path.exists(pkgpath):
274 bb.utils.mkdirhier(pkgpath)
275
276 infofile = os.path.join(pkgpath, "latest")
277 with codecs.open(infofile, "w", encoding='utf8') as f:
278 if pkginfo.pe != "0":
279 f.write(u"PE = %s\n" % pkginfo.pe)
280 f.write(u"PV = %s\n" % pkginfo.pv)
281 f.write(u"PR = %s\n" % pkginfo.pr)
282
283 pkgvars = {}
284 pkgvars['PKG'] = pkginfo.pkg if pkginfo.pkg != pkginfo.name else ''
285 pkgvars['PKGE'] = pkginfo.pkge if pkginfo.pkge != pkginfo.pe else ''
286 pkgvars['PKGV'] = pkginfo.pkgv if pkginfo.pkgv != pkginfo.pv else ''
287 pkgvars['PKGR'] = pkginfo.pkgr if pkginfo.pkgr != pkginfo.pr else ''
288 for pkgvar in pkgvars:
289 val = pkgvars[pkgvar]
290 if val:
291 f.write(u"%s = %s\n" % (pkgvar, val))
292
293 f.write(u"RPROVIDES = %s\n" % pkginfo.rprovides)
294 f.write(u"RDEPENDS = %s\n" % pkginfo.rdepends)
295 f.write(u"RRECOMMENDS = %s\n" % pkginfo.rrecommends)
296 if pkginfo.rsuggests:
297 f.write(u"RSUGGESTS = %s\n" % pkginfo.rsuggests)
298 if pkginfo.rreplaces:
299 f.write(u"RREPLACES = %s\n" % pkginfo.rreplaces)
300 if pkginfo.rconflicts:
301 f.write(u"RCONFLICTS = %s\n" % pkginfo.rconflicts)
302 f.write(u"PKGSIZE = %d\n" % pkginfo.size)
303 f.write(u"FILES = %s\n" % pkginfo.files)
304 f.write(u"FILELIST = %s\n" % pkginfo.filelist)
305
306 for filevar in pkginfo.filevars:
307 filevarpath = os.path.join(pkgpath, "latest.%s" % filevar)
308 val = pkginfo.filevars[filevar]
309 if val:
310 with codecs.open(filevarpath, "w", encoding='utf8') as f:
311 f.write(val)
312 else:
313 if os.path.exists(filevarpath):
314 os.unlink(filevarpath)
315
316#
317# rootfs_type can be: image, sdk_target, sdk_host
318#
319def buildhistory_list_installed(d, rootfs_type="image"):
320 from oe.rootfs import image_list_installed_packages
321 from oe.sdk import sdk_list_installed_packages
322
323 process_list = [('file', 'bh_installed_pkgs.txt'),\
324 ('deps', 'bh_installed_pkgs_deps.txt')]
325
326 for output_type, output_file in process_list:
327 output_file_full = os.path.join(d.getVar('WORKDIR', True), output_file)
328
329 with open(output_file_full, 'w') as output:
330 if rootfs_type == "image":
331 output.write(image_list_installed_packages(d, output_type))
332 else:
333 output.write(sdk_list_installed_packages(d, rootfs_type == "sdk_target", output_type))
334
335python buildhistory_list_installed_image() {
336 buildhistory_list_installed(d)
337}
338
339python buildhistory_list_installed_sdk_target() {
340 buildhistory_list_installed(d, "sdk_target")
341}
342
343python buildhistory_list_installed_sdk_host() {
344 buildhistory_list_installed(d, "sdk_host")
345}
346
347buildhistory_get_installed() {
348 mkdir -p $1
349
350 # Get list of installed packages
351 pkgcache="$1/installed-packages.tmp"
352 cat ${WORKDIR}/bh_installed_pkgs.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs.txt
353
354 cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt
355 if [ -s $pkgcache ] ; then
356 cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt
357 else
358 printf "" > $1/installed-packages.txt
359 fi
360
361 # Produce dependency graph
362 # First, quote each name to handle characters that cause issues for dot
363 cat ${WORKDIR}/bh_installed_pkgs_deps.txt | sed 's:\([^| ]*\):"\1":g' > $1/depends.tmp && \
364 rm ${WORKDIR}/bh_installed_pkgs_deps.txt
365 # Change delimiter from pipe to -> and set style for recommend lines
366 sed -i -e 's:|: -> :' -e 's:"\[REC\]":[style=dotted]:' -e 's:$:;:' $1/depends.tmp
367 # Add header, sorted and de-duped contents and footer and then delete the temp file
368 printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
369 cat $1/depends.tmp | sort | uniq >> $1/depends.dot
370 echo "}" >> $1/depends.dot
371 rm $1/depends.tmp
372
373 # Produce installed package sizes list
374 printf "" > $1/installed-package-sizes.tmp
375 cat $pkgcache | while read pkg pkgfile pkgarch
376 do
377 size=`oe-pkgdata-util read-value ${PKGDATA_DIR} "PKGSIZE" ${pkg}_${pkgarch}`
378 if [ "$size" != "" ] ; then
379 echo "$size $pkg" >> $1/installed-package-sizes.tmp
380 fi
381 done
382 cat $1/installed-package-sizes.tmp | sort -n -r | awk '{print $1 "\tKiB " $2}' > $1/installed-package-sizes.txt
383 rm $1/installed-package-sizes.tmp
384
385 # We're now done with the cache, delete it
386 rm $pkgcache
387
388 if [ "$2" != "sdk" ] ; then
389 # Produce some cut-down graphs (for readability)
390 grep -v kernel_image $1/depends.dot | grep -v kernel-2 | grep -v kernel-3 > $1/depends-nokernel.dot
391 grep -v libc6 $1/depends-nokernel.dot | grep -v libgcc > $1/depends-nokernel-nolibc.dot
392 grep -v update- $1/depends-nokernel-nolibc.dot > $1/depends-nokernel-nolibc-noupdate.dot
393 grep -v kernel-module $1/depends-nokernel-nolibc-noupdate.dot > $1/depends-nokernel-nolibc-noupdate-nomodules.dot
394 fi
395
396 # add complementary package information
397 if [ -e ${WORKDIR}/complementary_pkgs.txt ]; then
398 cp ${WORKDIR}/complementary_pkgs.txt $1
399 fi
400}
401
402buildhistory_get_image_installed() {
403 # Anything requiring the use of the packaging system should be done in here
404 # in case the packaging files are going to be removed for this image
405
406 if [ "${@base_contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
407 return
408 fi
409
410 buildhistory_get_installed ${BUILDHISTORY_DIR_IMAGE}
411}
412
413buildhistory_get_sdk_installed() {
414 # Anything requiring the use of the packaging system should be done in here
415 # in case the packaging files are going to be removed for this SDK
416
417 if [ "${@base_contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then
418 return
419 fi
420
421 buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk
422}
423
424buildhistory_get_sdk_installed_host() {
425 buildhistory_get_sdk_installed host
426}
427
428buildhistory_get_sdk_installed_target() {
429 buildhistory_get_sdk_installed target
430}
431
432buildhistory_list_files() {
433 # List the files in the specified directory, but exclude date/time etc.
434 # This awk script is somewhat messy, but handles where the size is not printed for device files under pseudo
435 ( cd $1 && find . -printf "%M %-10u %-10g %10s %p -> %l\n" | sort -k5 | sed 's/ * -> $//' > $2 )
436}
437
438
439buildhistory_get_imageinfo() {
440 if [ "${@base_contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
441 return
442 fi
443
444 buildhistory_list_files ${IMAGE_ROOTFS} ${BUILDHISTORY_DIR_IMAGE}/files-in-image.txt
445
446 # Collect files requested in BUILDHISTORY_IMAGE_FILES
447 rm -rf ${BUILDHISTORY_DIR_IMAGE}/image-files
448 for f in ${BUILDHISTORY_IMAGE_FILES}; do
449 if [ -f ${IMAGE_ROOTFS}/$f ] ; then
450 mkdir -p ${BUILDHISTORY_DIR_IMAGE}/image-files/`dirname $f`
451 cp ${IMAGE_ROOTFS}/$f ${BUILDHISTORY_DIR_IMAGE}/image-files/$f
452 fi
453 done
454
455 # Record some machine-readable meta-information about the image
456 printf "" > ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
457 cat >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt <<END
458${@buildhistory_get_imagevars(d)}
459END
460 imagesize=`du -ks ${IMAGE_ROOTFS} | awk '{ print $1 }'`
461 echo "IMAGESIZE = $imagesize" >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
462
463 # Add some configuration information
464 echo "${MACHINE}: ${IMAGE_BASENAME} configured for ${DISTRO} ${DISTRO_VERSION}" > ${BUILDHISTORY_DIR_IMAGE}/build-id
465
466 cat >> ${BUILDHISTORY_DIR_IMAGE}/build-id <<END
467${@buildhistory_get_layers(d)}
468END
469}
470
471buildhistory_get_sdkinfo() {
472 if [ "${@base_contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then
473 return
474 fi
475
476 buildhistory_list_files ${SDK_OUTPUT} ${BUILDHISTORY_DIR_SDK}/files-in-sdk.txt
477
478 # Record some machine-readable meta-information about the SDK
479 printf "" > ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
480 cat >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt <<END
481${@buildhistory_get_sdkvars(d)}
482END
483 sdksize=`du -ks ${SDK_OUTPUT} | awk '{ print $1 }'`
484 echo "SDKSIZE = $sdksize" >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
485}
486
487# By prepending we get in before the removal of packaging files
488ROOTFS_POSTPROCESS_COMMAND =+ " buildhistory_list_installed_image ;\
489 buildhistory_get_image_installed ; "
490
491IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; "
492
493# We want these to be the last run so that we get called after complementary package installation
494POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target ;\
495 buildhistory_get_sdk_installed_target ; "
496POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host ;\
497 buildhistory_get_sdk_installed_host ; "
498
499SDK_POSTPROCESS_COMMAND += "buildhistory_get_sdkinfo ; "
500
501def buildhistory_get_layers(d):
502 if d.getVar('BB_WORKERCONTEXT', True) != '1':
503 return ""
504 layertext = "Configured metadata layers:\n%s\n" % '\n'.join(get_layers_branch_rev(d))
505 return layertext
506
507def buildhistory_get_metadata_revs(d):
508 # We want an easily machine-readable format here, so get_layers_branch_rev isn't quite what we want
509 layers = (d.getVar("BBLAYERS", True) or "").split()
510 medadata_revs = ["%-17s = %s:%s" % (os.path.basename(i), \
511 base_get_metadata_git_branch(i, None).strip(), \
512 base_get_metadata_git_revision(i, None)) \
513 for i in layers]
514 return '\n'.join(medadata_revs)
515
516
517def squashspaces(string):
518 import re
519 return re.sub("\s+", " ", string).strip()
520
521def outputvars(vars, listvars, d):
522 vars = vars.split()
523 listvars = listvars.split()
524 ret = ""
525 for var in vars:
526 value = d.getVar(var, True) or ""
527 if var in listvars:
528 # Squash out spaces
529 value = squashspaces(value)
530 ret += "%s = %s\n" % (var, value)
531 return ret.rstrip('\n')
532
533def buildhistory_get_imagevars(d):
534 if d.getVar('BB_WORKERCONTEXT', True) != '1':
535 return ""
536 imagevars = "DISTRO DISTRO_VERSION USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE ROOTFS_POSTPROCESS_COMMAND IMAGE_POSTPROCESS_COMMAND"
537 listvars = "USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS PACKAGE_EXCLUDE"
538 return outputvars(imagevars, listvars, d)
539
540def buildhistory_get_sdkvars(d):
541 if d.getVar('BB_WORKERCONTEXT', True) != '1':
542 return ""
543 sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
544 listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE"
545 return outputvars(sdkvars, listvars, d)
546
547
548def buildhistory_get_cmdline(d):
549 if sys.argv[0].endswith('bin/bitbake'):
550 bincmd = 'bitbake'
551 else:
552 bincmd = sys.argv[0]
553 return '%s %s' % (bincmd, ' '.join(sys.argv[1:]))
554
555
556buildhistory_commit() {
557 if [ ! -d ${BUILDHISTORY_DIR} ] ; then
558 # Code above that creates this dir never executed, so there can't be anything to commit
559 return
560 fi
561
562 # Create a machine-readable list of metadata revisions for each layer
563 cat > ${BUILDHISTORY_DIR}/metadata-revs <<END
564${@buildhistory_get_metadata_revs(d)}
565END
566
567 ( cd ${BUILDHISTORY_DIR}/
568 # Initialise the repo if necessary
569 if [ ! -d .git ] ; then
570 git init -q
571 else
572 git tag -f build-minus-3 build-minus-2 > /dev/null 2>&1 || true
573 git tag -f build-minus-2 build-minus-1 > /dev/null 2>&1 || true
574 git tag -f build-minus-1 > /dev/null 2>&1 || true
575 fi
576 # Check if there are new/changed files to commit (other than metadata-revs)
577 repostatus=`git status --porcelain | grep -v " metadata-revs$"`
578 HOSTNAME=`hostname 2>/dev/null || echo unknown`
579 CMDLINE="${@buildhistory_get_cmdline(d)}"
580 if [ "$repostatus" != "" ] ; then
581 git add -A .
582 # porcelain output looks like "?? packages/foo/bar"
583 # Ensure we commit metadata-revs with the first commit
584 for entry in `echo "$repostatus" | awk '{print $2}' | awk -F/ '{print $1}' | sort | uniq` ; do
585 git commit $entry metadata-revs -m "$entry: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" -m "cmd: $CMDLINE" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
586 done
587 git gc --auto --quiet
588 if [ "${BUILDHISTORY_PUSH_REPO}" != "" ] ; then
589 git push -q ${BUILDHISTORY_PUSH_REPO}
590 fi
591 else
592 git commit ${BUILDHISTORY_DIR}/ --allow-empty -m "No changes: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" -m "cmd: $CMDLINE" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
593 fi) || true
594}
595
596python buildhistory_eventhandler() {
597 if e.data.getVar('BUILDHISTORY_FEATURES', True).strip():
598 if e.data.getVar("BUILDHISTORY_COMMIT", True) == "1":
599 bb.note("Writing buildhistory")
600 bb.build.exec_func("buildhistory_commit", e.data)
601}
602
603addhandler buildhistory_eventhandler
604buildhistory_eventhandler[eventmask] = "bb.event.BuildCompleted"
605
606
607# FIXME this ought to be moved into the fetcher
608def _get_srcrev_values(d):
609 """
610 Return the version strings for the current recipe
611 """
612
613 scms = []
614 fetcher = bb.fetch.Fetch(d.getVar('SRC_URI', True).split(), d)
615 urldata = fetcher.ud
616 for u in urldata:
617 if urldata[u].method.supports_srcrev():
618 scms.append(u)
619
620 autoinc_templ = 'AUTOINC+'
621 dict_srcrevs = {}
622 dict_tag_srcrevs = {}
623 for scm in scms:
624 ud = urldata[scm]
625 for name in ud.names:
626 try:
627 rev = ud.method.sortable_revision(ud, d, name)
628 except TypeError:
629 # support old bitbake versions
630 rev = ud.method.sortable_revision(scm, ud, d, name)
631 # Clean this up when we next bump bitbake version
632 if type(rev) != str:
633 autoinc, rev = rev
634 elif rev.startswith(autoinc_templ):
635 rev = rev[len(autoinc_templ):]
636 dict_srcrevs[name] = rev
637 if 'tag' in ud.parm:
638 tag = ud.parm['tag'];
639 key = name+'_'+tag
640 dict_tag_srcrevs[key] = rev
641 return (dict_srcrevs, dict_tag_srcrevs)
642
643do_fetch[postfuncs] += "write_srcrev"
644do_fetch[vardepsexclude] += "write_srcrev"
645python write_srcrev() {
646 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
647 srcrevfile = os.path.join(pkghistdir, 'latest_srcrev')
648
649 srcrevs, tag_srcrevs = _get_srcrev_values(d)
650 if srcrevs:
651 if not os.path.exists(pkghistdir):
652 bb.utils.mkdirhier(pkghistdir)
653 old_tag_srcrevs = {}
654 if os.path.exists(srcrevfile):
655 with open(srcrevfile) as f:
656 for line in f:
657 if line.startswith('# tag_'):
658 key, value = line.split("=", 1)
659 key = key.replace('# tag_', '').strip()
660 value = value.replace('"', '').strip()
661 old_tag_srcrevs[key] = value
662 with open(srcrevfile, 'w') as f:
663 orig_srcrev = d.getVar('SRCREV', False) or 'INVALID'
664 if orig_srcrev != 'INVALID':
665 f.write('# SRCREV = "%s"\n' % orig_srcrev)
666 if len(srcrevs) > 1:
667 for name, srcrev in srcrevs.items():
668 orig_srcrev = d.getVar('SRCREV_%s' % name, False)
669 if orig_srcrev:
670 f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
671 f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
672 else:
673 f.write('SRCREV = "%s"\n' % srcrevs.itervalues().next())
674 if len(tag_srcrevs) > 0:
675 for name, srcrev in tag_srcrevs.items():
676 f.write('# tag_%s = "%s"\n' % (name, srcrev))
677 if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
678 pkg = d.getVar('PN', True)
679 bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
680
681 else:
682 if os.path.exists(srcrevfile):
683 os.remove(srcrevfile)
684}
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
new file mode 100644
index 0000000..89ae72c
--- /dev/null
+++ b/meta/classes/buildstats.bbclass
@@ -0,0 +1,289 @@
1BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
2BNFILE = "${BUILDSTATS_BASE}/.buildname"
3DEVFILE = "${BUILDSTATS_BASE}/.device"
4
5################################################################################
6# Build statistics gathering.
7#
8# The CPU and Time gathering/tracking functions and bbevent inspiration
9# were written by Christopher Larson and can be seen here:
10# http://kergoth.pastey.net/142813
11#
12################################################################################
13
14def get_process_cputime(pid):
15 with open("/proc/%d/stat" % pid, "r") as f:
16 fields = f.readline().rstrip().split()
17 # 13: utime, 14: stime, 15: cutime, 16: cstime
18 return sum(int(field) for field in fields[13:16])
19
20def get_cputime():
21 with open("/proc/stat", "r") as f:
22 fields = f.readline().rstrip().split()[1:]
23 return sum(int(field) for field in fields)
24
25def set_bn(e):
26 bn = e.getPkgs()[0] + "-" + e.data.getVar('MACHINE', True)
27 try:
28 os.remove(e.data.getVar('BNFILE', True))
29 except:
30 pass
31 with open(e.data.getVar('BNFILE', True), "w") as f:
32 f.write(os.path.join(bn, e.data.getVar('BUILDNAME', True)))
33
34def get_bn(e):
35 with open(e.data.getVar('BNFILE', True)) as f:
36 bn = f.readline()
37 return bn
38
39def set_device(e):
40 tmpdir = e.data.getVar('TMPDIR', True)
41 try:
42 os.remove(e.data.getVar('DEVFILE', True))
43 except:
44 pass
45 ############################################################################
46 # We look for the volume TMPDIR lives on. To do all disks would make little
47 # sense and not give us any particularly useful data. In theory we could do
48 # something like stick DL_DIR on a different partition and this would
49 # throw stats gathering off. The same goes with SSTATE_DIR. However, let's
50 # get the basics in here and work on the cornercases later.
51 # A note. /proc/diskstats does not contain info on encryptfs, tmpfs, etc.
52 # If we end up hitting one of these fs, we'll just skip diskstats collection.
53 ############################################################################
54 device=os.stat(tmpdir)
55 majordev=os.major(device.st_dev)
56 minordev=os.minor(device.st_dev)
57 ############################################################################
58 # Bug 1700:
59 # Because tmpfs/encryptfs/ramfs etc inserts no entry in /proc/diskstats
60 # we set rdev to NoLogicalDevice and search for it later. If we find NLD
61 # we do not collect diskstats as the method to collect meaningful statistics
62 # for these fs types requires a bit more research.
63 ############################################################################
64 rdev="NoLogicalDevice"
65 try:
66 with open("/proc/diskstats", "r") as f:
67 for line in f:
68 if majordev == int(line.split()[0]) and minordev == int(line.split()[1]):
69 rdev=line.split()[2]
70 except:
71 pass
72 file = open(e.data.getVar('DEVFILE', True), "w")
73 file.write(rdev)
74 file.close()
75
76def get_device(e):
77 file = open(e.data.getVar('DEVFILE', True))
78 device = file.readline()
79 file.close()
80 return device
81
82def get_diskstats(dev):
83 import itertools
84 ############################################################################
85 # For info on what these are, see kernel doc file iostats.txt
86 ############################################################################
87 DSTAT_KEYS = ['ReadsComp', 'ReadsMerged', 'SectRead', 'TimeReads', 'WritesComp', 'SectWrite', 'TimeWrite', 'IOinProgress', 'TimeIO', 'WTimeIO']
88 try:
89 with open("/proc/diskstats", "r") as f:
90 for x in f:
91 if dev in x:
92 diskstats_val = x.rstrip().split()[4:]
93 except IOError as e:
94 return
95 diskstats = dict(itertools.izip(DSTAT_KEYS, diskstats_val))
96 return diskstats
97
98def set_diskdata(var, dev, data):
99 data.setVar(var, get_diskstats(dev))
100
101def get_diskdata(var, dev, data):
102 olddiskdata = data.getVar(var, False)
103 diskdata = {}
104 if olddiskdata is None:
105 return
106 newdiskdata = get_diskstats(dev)
107 for key in olddiskdata.iterkeys():
108 diskdata["Start"+key] = str(int(olddiskdata[key]))
109 diskdata["End"+key] = str(int(newdiskdata[key]))
110 return diskdata
111
112def set_timedata(var, data, server_time=None):
113 import time
114 if server_time:
115 time = server_time
116 else:
117 time = time.time()
118 cputime = get_cputime()
119 proctime = get_process_cputime(os.getpid())
120 data.setVar(var, (time, cputime, proctime))
121
122def get_timedata(var, data, server_time=None):
123 import time
124 timedata = data.getVar(var, False)
125 if timedata is None:
126 return
127 oldtime, oldcpu, oldproc = timedata
128 procdiff = get_process_cputime(os.getpid()) - oldproc
129 cpudiff = get_cputime() - oldcpu
130 if server_time:
131 end_time = server_time
132 else:
133 end_time = time.time()
134 timediff = end_time - oldtime
135 if cpudiff > 0:
136 cpuperc = float(procdiff) * 100 / cpudiff
137 else:
138 cpuperc = None
139 return timediff, cpuperc
140
141def write_task_data(status, logfile, dev, e):
142 bn = get_bn(e)
143 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
144 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
145 file = open(os.path.join(logfile), "a")
146 timedata = get_timedata("__timedata_task", e.data, e.time)
147 if timedata:
148 elapsedtime, cpu = timedata
149 file.write(bb.data.expand("${PF}: %s: Elapsed time: %0.2f seconds \n" %
150 (e.task, elapsedtime), e.data))
151 if cpu:
152 file.write("CPU usage: %0.1f%% \n" % cpu)
153 ############################################################################
154 # Here we gather up disk data. In an effort to avoid lying with stats
155 # I do a bare minimum of analysis of collected data.
156 # The simple fact is, doing disk io collection on a per process basis
157 # without effecting build time would be difficult.
158 # For the best information, running things with BB_TOTAL_THREADS = "1"
159 # would return accurate per task results.
160 ############################################################################
161 if dev != "NoLogicalDevice":
162 diskdata = get_diskdata("__diskdata_task", dev, e.data)
163 if diskdata:
164 for key in sorted(diskdata.iterkeys()):
165 file.write(key + ": " + diskdata[key] + "\n")
166 if status is "passed":
167 file.write("Status: PASSED \n")
168 else:
169 file.write("Status: FAILED \n")
170 file.write("Ended: %0.2f \n" % e.time)
171 file.close()
172
173python run_buildstats () {
174 import bb.build
175 import bb.event
176 import bb.data
177 import time, subprocess, platform
178
179 if isinstance(e, bb.event.BuildStarted):
180 ########################################################################
181 # at first pass make the buildstats heriarchy and then
182 # set the buildname
183 ########################################################################
184 try:
185 bb.utils.mkdirhier(e.data.getVar('BUILDSTATS_BASE', True))
186 except:
187 pass
188 set_bn(e)
189 bn = get_bn(e)
190 set_device(e)
191 device = get_device(e)
192
193 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
194 try:
195 bb.utils.mkdirhier(bsdir)
196 except:
197 pass
198 if device != "NoLogicalDevice":
199 set_diskdata("__diskdata_build", device, e.data)
200 set_timedata("__timedata_build", e.data)
201 build_time = os.path.join(bsdir, "build_stats")
202 # write start of build into build_time
203 file = open(build_time,"a")
204 host_info = platform.uname()
205 file.write("Host Info: ")
206 for x in host_info:
207 if x:
208 file.write(x + " ")
209 file.write("\n")
210 file.write("Build Started: %0.2f \n" % time.time())
211 file.close()
212
213 elif isinstance(e, bb.event.BuildCompleted):
214 bn = get_bn(e)
215 device = get_device(e)
216 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
217 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
218 build_time = os.path.join(bsdir, "build_stats")
219 file = open(build_time, "a")
220 ########################################################################
221 # Write build statistics for the build
222 ########################################################################
223 timedata = get_timedata("__timedata_build", e.data)
224 if timedata:
225 time, cpu = timedata
226 # write end of build and cpu used into build_time
227 file.write("Elapsed time: %0.2f seconds \n" % (time))
228 if cpu:
229 file.write("CPU usage: %0.1f%% \n" % cpu)
230 if device != "NoLogicalDevice":
231 diskio = get_diskdata("__diskdata_build", device, e.data)
232 if diskio:
233 for key in sorted(diskio.iterkeys()):
234 file.write(key + ": " + diskio[key] + "\n")
235 file.close()
236
237 if isinstance(e, bb.build.TaskStarted):
238 bn = get_bn(e)
239 device = get_device(e)
240 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
241 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
242 if device != "NoLogicalDevice":
243 set_diskdata("__diskdata_task", device, e.data)
244 set_timedata("__timedata_task", e.data, e.time)
245 try:
246 bb.utils.mkdirhier(taskdir)
247 except:
248 pass
249 # write into the task event file the name and start time
250 file = open(os.path.join(taskdir, e.task), "a")
251 file.write("Event: %s \n" % bb.event.getName(e))
252 file.write("Started: %0.2f \n" % e.time)
253 file.close()
254
255 elif isinstance(e, bb.build.TaskSucceeded):
256 bn = get_bn(e)
257 device = get_device(e)
258 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
259 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
260 write_task_data("passed", os.path.join(taskdir, e.task), device, e)
261 if e.task == "do_rootfs":
262 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
263 bs=os.path.join(bsdir, "build_stats")
264 file = open(bs,"a")
265 rootfs = e.data.getVar('IMAGE_ROOTFS', True)
266 rootfs_size = subprocess.Popen(["du", "-sh", rootfs], stdout=subprocess.PIPE).stdout.read()
267 file.write("Uncompressed Rootfs size: %s" % rootfs_size)
268 file.close()
269
270 elif isinstance(e, bb.build.TaskFailed):
271 bn = get_bn(e)
272 device = get_device(e)
273 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
274 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
275 write_task_data("failed", os.path.join(taskdir, e.task), device, e)
276 ########################################################################
277 # Lets make things easier and tell people where the build failed in
278 # build_status. We do this here because BuildCompleted triggers no
279 # matter what the status of the build actually is
280 ########################################################################
281 build_status = os.path.join(bsdir, "build_stats")
282 file = open(build_status,"a")
283 file.write(e.data.expand("Failed at: ${PF} at task: %s \n" % e.task))
284 file.close()
285}
286
287addhandler run_buildstats
288run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
289
diff --git a/meta/classes/ccache.bbclass b/meta/classes/ccache.bbclass
new file mode 100644
index 0000000..2cdce46
--- /dev/null
+++ b/meta/classes/ccache.bbclass
@@ -0,0 +1,8 @@
1CCACHE = "${@bb.utils.which(d.getVar('PATH', True), 'ccache') and 'ccache '}"
2export CCACHE_DIR ?= "${TMPDIR}/ccache/${MULTIMACH_HOST_SYS}/${PN}"
3CCACHE_DISABLE[unexport] = "1"
4
5do_configure[dirs] =+ "${CCACHE_DIR}"
6do_kernel_configme[dirs] =+ "${CCACHE_DIR}"
7
8do_clean[cleandirs] += "${CCACHE_DIR}"
diff --git a/meta/classes/chrpath.bbclass b/meta/classes/chrpath.bbclass
new file mode 100644
index 0000000..7bdb1b9
--- /dev/null
+++ b/meta/classes/chrpath.bbclass
@@ -0,0 +1,115 @@
1CHRPATH_BIN ?= "chrpath"
2PREPROCESS_RELOCATE_DIRS ?= ""
3
4def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
5 import subprocess as sub
6
7 p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
8 err, out = p.communicate()
9 # If returned succesfully, process stderr for results
10 if p.returncode != 0:
11 return
12
13 # Throw away everything other than the rpath list
14 curr_rpath = err.partition("RPATH=")[2]
15 #bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
16 rpaths = curr_rpath.split(":")
17 new_rpaths = []
18 modified = False
19 for rpath in rpaths:
20 # If rpath is already dynamic copy it to new_rpath and continue
21 if rpath.find("$ORIGIN") != -1:
22 new_rpaths.append(rpath.strip())
23 continue
24 rpath = os.path.normpath(rpath)
25 if baseprefix not in rpath and tmpdir not in rpath:
26 new_rpaths.append(rpath.strip())
27 continue
28 new_rpaths.append("$ORIGIN/" + os.path.relpath(rpath.strip(), os.path.dirname(fpath.replace(rootdir, "/"))))
29 modified = True
30
31 # if we have modified some rpaths call chrpath to update the binary
32 if modified:
33 args = ":".join(new_rpaths)
34 #bb.note("Setting rpath for %s to %s" %(fpath, args))
35 p = sub.Popen([cmd, '-r', args, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
36 out, err = p.communicate()
37 if p.returncode != 0:
38 bb.error("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN', True), p.returncode, out, err))
39 raise bb.build.FuncFailed
40
41def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
42 import subprocess as sub
43
44 p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
45 err, out = p.communicate()
46 # If returned succesfully, process stderr for results
47 if p.returncode != 0:
48 return
49 for l in err.split("\n"):
50 if "(compatibility" not in l:
51 continue
52 rpath = l.partition("(compatibility")[0].strip()
53 if baseprefix not in rpath:
54 continue
55
56 newpath = "@loader_path/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/")))
57 bb.warn("%s %s %s %s" % (fpath, rpath, newpath, rootdir))
58 p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, newpath, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
59 err, out = p.communicate()
60
61def process_dir (rootdir, directory, d):
62 import stat
63
64 cmd = d.expand('${CHRPATH_BIN}')
65 tmpdir = os.path.normpath(d.getVar('TMPDIR'))
66 baseprefix = os.path.normpath(d.expand('${base_prefix}'))
67 hostos = d.getVar("HOST_OS", True)
68
69 #bb.debug("Checking %s for binaries to process" % directory)
70 if not os.path.exists(directory):
71 return
72
73 if "linux" in hostos:
74 process_file = process_file_linux
75 elif "darwin" in hostos:
76 process_file = process_file_darwin
77 else:
78 # Relocations not supported
79 return
80
81 dirs = os.listdir(directory)
82 for file in dirs:
83 fpath = directory + "/" + file
84 fpath = os.path.normpath(fpath)
85 if os.path.islink(fpath):
86 # Skip symlinks
87 continue
88
89 if os.path.isdir(fpath):
90 process_dir(rootdir, fpath, d)
91 else:
92 #bb.note("Testing %s for relocatability" % fpath)
93
94 # We need read and write permissions for chrpath, if we don't have
95 # them then set them temporarily. Take a copy of the files
96 # permissions so that we can restore them afterwards.
97 perms = os.stat(fpath)[stat.ST_MODE]
98 if os.access(fpath, os.W_OK|os.R_OK):
99 perms = None
100 else:
101 # Temporarily make the file writeable so we can chrpath it
102 os.chmod(fpath, perms|stat.S_IRWXU)
103 process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d)
104
105 if perms:
106 os.chmod(fpath, perms)
107
108def rpath_replace (path, d):
109 bindirs = d.expand("${bindir} ${sbindir} ${base_sbindir} ${base_bindir} ${libdir} ${base_libdir} ${libexecdir} ${PREPROCESS_RELOCATE_DIRS}").split()
110
111 for bindir in bindirs:
112 #bb.note ("Processing directory " + bindir)
113 directory = path + "/" + bindir
114 process_dir (path, directory, d)
115
diff --git a/meta/classes/clutter.bbclass b/meta/classes/clutter.bbclass
new file mode 100644
index 0000000..167407d
--- /dev/null
+++ b/meta/classes/clutter.bbclass
@@ -0,0 +1,22 @@
1
2def get_minor_dir(v):
3 import re
4 m = re.match("^([0-9]+)\.([0-9]+)", v)
5 return "%s.%s" % (m.group(1), m.group(2))
6
7def get_real_name(n):
8 import re
9 m = re.match("^([a-z]+(-[a-z]+)?)(-[0-9]+\.[0-9]+)?", n)
10 return "%s" % (m.group(1))
11
12VERMINOR = "${@get_minor_dir("${PV}")}"
13REALNAME = "${@get_real_name("${BPN}")}"
14
15CLUTTER_SRC_FTP = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
16
17CLUTTER_SRC_GIT = "git://git.gnome.org/${REALNAME}"
18
19SRC_URI = "${CLUTTER_SRC_FTP}"
20S = "${WORKDIR}/${REALNAME}-${PV}"
21
22inherit autotools pkgconfig gtk-doc gettext
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
new file mode 100644
index 0000000..c9c15f3
--- /dev/null
+++ b/meta/classes/cmake.bbclass
@@ -0,0 +1,115 @@
1DEPENDS_prepend = "cmake-native "
2B = "${WORKDIR}/build"
3
4# We need to unset CCACHE otherwise cmake gets too confused
5CCACHE = ""
6
7# We want the staging and installing functions from autotools
8inherit autotools
9
10# C/C++ Compiler (without cpu arch/tune arguments)
11OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
12OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
13
14# Compiler flags
15OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
16OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} -fpermissive"
17OECMAKE_C_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CFLAGS} -DNDEBUG"
18OECMAKE_CXX_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CXXFLAGS} -DNDEBUG"
19OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
20OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
21
22OECMAKE_RPATH ?= ""
23OECMAKE_PERLNATIVE_DIR ??= ""
24OECMAKE_EXTRA_ROOT_PATH ?= ""
25
26cmake_do_generate_toolchain_file() {
27 cat > ${WORKDIR}/toolchain.cmake <<EOF
28# CMake system name must be something like "Linux".
29# This is important for cross-compiling.
30set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).*/\1/'` )
31set( CMAKE_SYSTEM_PROCESSOR ${TARGET_ARCH} )
32set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
33set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
34set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
35set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
36set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "CFLAGS for release" )
37set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "CXXFLAGS for release" )
38set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
39set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
40
41# only search in the paths provided so cmake doesnt pick
42# up libraries and tools from the native build machine
43set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN})
44set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
45set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY )
46set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
47set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
48
49# Use qt.conf settings
50set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
51
52# We need to set the rpath to the correct directory as cmake does not provide any
53# directory as rpath by default
54set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
55
56# Use native cmake modules
57set( CMAKE_MODULE_PATH ${STAGING_DATADIR}/cmake/Modules/ )
58
59# add for non /usr/lib libdir, e.g. /usr/lib64
60set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
61
62EOF
63}
64
65addtask generate_toolchain_file after do_patch before do_configure
66
67cmake_do_configure() {
68 if [ "${OECMAKE_BUILDPATH}" -o "${OECMAKE_SOURCEPATH}" ]; then
69 bbnote "cmake.bbclass no longer uses OECMAKE_SOURCEPATH and OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
70 fi
71
72 if [ "${S}" != "${B}" ]; then
73 rm -rf ${B}
74 mkdir -p ${B}
75 cd ${B}
76 fi
77
78 # Just like autotools cmake can use a site file to cache result that need generated binaries to run
79 if [ -e ${WORKDIR}/site-file.cmake ] ; then
80 OECMAKE_SITEFILE=" -C ${WORKDIR}/site-file.cmake"
81 else
82 OECMAKE_SITEFILE=""
83 fi
84
85 cmake \
86 ${OECMAKE_SITEFILE} \
87 ${S} \
88 -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
89 -DCMAKE_INSTALL_BINDIR:PATH=${bindir} \
90 -DCMAKE_INSTALL_SBINDIR:PATH=${sbindir} \
91 -DCMAKE_INSTALL_LIBEXECDIR:PATH=${libexecdir} \
92 -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
93 -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${sharedstatedir} \
94 -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
95 -DCMAKE_INSTALL_LIBDIR:PATH=${libdir} \
96 -DCMAKE_INSTALL_INCLUDEDIR:PATH=${includedir} \
97 -DCMAKE_INSTALL_DATAROOTDIR:PATH=${datadir} \
98 -DCMAKE_INSTALL_SO_NO_EXE=0 \
99 -DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
100 -DCMAKE_VERBOSE_MAKEFILE=1 \
101 ${EXTRA_OECMAKE} \
102 -Wno-dev
103}
104
105cmake_do_compile() {
106 cd ${B}
107 base_do_compile
108}
109
110cmake_do_install() {
111 cd ${B}
112 autotools_do_install
113}
114
115EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
new file mode 100644
index 0000000..34c0c4e
--- /dev/null
+++ b/meta/classes/cml1.bbclass
@@ -0,0 +1,73 @@
1cml1_do_configure() {
2 set -e
3 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
4 oe_runmake oldconfig
5}
6
7EXPORT_FUNCTIONS do_configure
8addtask configure after do_unpack do_patch before do_compile
9
10inherit terminal
11
12OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS HOST_LOADLIBES TERMINFO"
13HOST_EXTRACFLAGS = "${BUILD_CFLAGS} ${BUILD_LDFLAGS}"
14HOSTLDFLAGS = "${BUILD_LDFLAGS}"
15HOST_LOADLIBES = "-lncurses"
16TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
17
18python do_menuconfig() {
19 import shutil
20
21 try:
22 mtime = os.path.getmtime(".config")
23 shutil.copy(".config", ".config.orig")
24 except OSError:
25 mtime = 0
26
27 oe_terminal("${SHELL} -c \"make menuconfig; if [ \$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"", '${PN} Configuration', d)
28
29 # FIXME this check can be removed when the minimum bitbake version has been bumped
30 if hasattr(bb.build, 'write_taint'):
31 try:
32 newmtime = os.path.getmtime(".config")
33 except OSError:
34 newmtime = 0
35
36 if newmtime > mtime:
37 bb.note("Configuration changed, recompile will be forced")
38 bb.build.write_taint('do_compile', d)
39}
40do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
41do_menuconfig[nostamp] = "1"
42addtask menuconfig after do_configure
43
44python do_diffconfig() {
45 import shutil
46 import subprocess
47
48 workdir = d.getVar('WORKDIR', True)
49 fragment = workdir + '/fragment.cfg'
50 configorig = '.config.orig'
51 config = '.config'
52
53 try:
54 md5newconfig = bb.utils.md5_file(configorig)
55 md5config = bb.utils.md5_file(config)
56 isdiff = md5newconfig != md5config
57 except IOError as e:
58 bb.fatal("No config files found. Did you do menuconfig ?\n%s" % e)
59
60 if isdiff:
61 statement = 'diff -Nurp ' + configorig + ' ' + config + '| sed -n "s/^\+//p" >' + fragment
62 subprocess.call(statement, shell=True)
63
64 shutil.copy(configorig, config)
65
66 bb.plain("Config fragment has been dumped into:\n %s" % fragment)
67 else:
68 if os.path.exists(fragment):
69 os.unlink(fragment)
70}
71
72do_diffconfig[nostamp] = "1"
73addtask diffconfig
diff --git a/meta/classes/copyleft_compliance.bbclass b/meta/classes/copyleft_compliance.bbclass
new file mode 100644
index 0000000..907c183
--- /dev/null
+++ b/meta/classes/copyleft_compliance.bbclass
@@ -0,0 +1,64 @@
1# Deploy sources for recipes for compliance with copyleft-style licenses
2# Defaults to using symlinks, as it's a quick operation, and one can easily
3# follow the links when making use of the files (e.g. tar with the -h arg).
4#
5# vi:sts=4:sw=4:et
6
7inherit copyleft_filter
8
9COPYLEFT_SOURCES_DIR ?= '${DEPLOY_DIR}/copyleft_sources'
10
11python do_prepare_copyleft_sources () {
12 """Populate a tree of the recipe sources and emit patch series files"""
13 import os.path
14 import shutil
15
16 p = d.getVar('P', True)
17 included, reason = copyleft_should_include(d)
18 if not included:
19 bb.debug(1, 'copyleft: %s is excluded: %s' % (p, reason))
20 return
21 else:
22 bb.debug(1, 'copyleft: %s is included: %s' % (p, reason))
23
24 sources_dir = d.getVar('COPYLEFT_SOURCES_DIR', True)
25 dl_dir = d.getVar('DL_DIR', True)
26 src_uri = d.getVar('SRC_URI', True).split()
27 fetch = bb.fetch2.Fetch(src_uri, d)
28 ud = fetch.ud
29
30 pf = d.getVar('PF', True)
31 dest = os.path.join(sources_dir, pf)
32 shutil.rmtree(dest, ignore_errors=True)
33 bb.utils.mkdirhier(dest)
34
35 for u in ud.values():
36 local = os.path.normpath(fetch.localpath(u.url))
37 if local.endswith('.bb'):
38 continue
39 elif local.endswith('/'):
40 local = local[:-1]
41
42 if u.mirrortarball:
43 tarball_path = os.path.join(dl_dir, u.mirrortarball)
44 if os.path.exists(tarball_path):
45 local = tarball_path
46
47 oe.path.symlink(local, os.path.join(dest, os.path.basename(local)), force=True)
48
49 patches = src_patches(d)
50 for patch in patches:
51 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
52 patchdir = parm.get('patchdir')
53 if patchdir:
54 series = os.path.join(dest, 'series.subdir.%s' % patchdir.replace('/', '_'))
55 else:
56 series = os.path.join(dest, 'series')
57
58 with open(series, 'a') as s:
59 s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
60}
61
62addtask prepare_copyleft_sources after do_fetch before do_build
63do_prepare_copyleft_sources[dirs] = "${WORKDIR}"
64do_build[recrdeptask] += 'do_prepare_copyleft_sources'
diff --git a/meta/classes/copyleft_filter.bbclass b/meta/classes/copyleft_filter.bbclass
new file mode 100644
index 0000000..2c1d8f1
--- /dev/null
+++ b/meta/classes/copyleft_filter.bbclass
@@ -0,0 +1,62 @@
1# Filter the license, the copyleft_should_include returns True for the
2# COPYLEFT_LICENSE_INCLUDE recipe, and False for the
3# COPYLEFT_LICENSE_EXCLUDE.
4#
5# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary.
6#
7# vi:sts=4:sw=4:et
8
9COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL*'
10COPYLEFT_LICENSE_INCLUDE[type] = 'list'
11COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which include licenses'
12
13COPYLEFT_LICENSE_EXCLUDE ?= 'CLOSED Proprietary'
14COPYLEFT_LICENSE_EXCLUDE[type] = 'list'
15COPYLEFT_LICENSE_EXCLUDE[doc] = 'Space separated list of globs which exclude licenses'
16
17COPYLEFT_RECIPE_TYPE ?= '${@copyleft_recipe_type(d)}'
18COPYLEFT_RECIPE_TYPE[doc] = 'The "type" of the current recipe (e.g. target, native, cross)'
19
20COPYLEFT_RECIPE_TYPES ?= 'target'
21COPYLEFT_RECIPE_TYPES[type] = 'list'
22COPYLEFT_RECIPE_TYPES[doc] = 'Space separated list of recipe types to include'
23
24COPYLEFT_AVAILABLE_RECIPE_TYPES = 'target native nativesdk cross crosssdk cross-canadian'
25COPYLEFT_AVAILABLE_RECIPE_TYPES[type] = 'list'
26COPYLEFT_AVAILABLE_RECIPE_TYPES[doc] = 'Space separated list of available recipe types'
27
28def copyleft_recipe_type(d):
29 for recipe_type in oe.data.typed_value('COPYLEFT_AVAILABLE_RECIPE_TYPES', d):
30 if oe.utils.inherits(d, recipe_type):
31 return recipe_type
32 return 'target'
33
34def copyleft_should_include(d):
35 """
36 Determine if this recipe's sources should be deployed for compliance
37 """
38 import ast
39 import oe.license
40 from fnmatch import fnmatchcase as fnmatch
41
42 recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE', True)
43 if recipe_type not in oe.data.typed_value('COPYLEFT_RECIPE_TYPES', d):
44 return False, 'recipe type "%s" is excluded' % recipe_type
45