summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass36
-rw-r--r--meta/classes/archiver.bbclass368
-rw-r--r--meta/classes/autotools-brokensep.bbclass5
-rw-r--r--meta/classes/autotools.bbclass271
-rw-r--r--meta/classes/autotools_stage.bbclass2
-rw-r--r--meta/classes/base.bbclass661
-rw-r--r--meta/classes/bin_package.bbclass36
-rw-r--r--meta/classes/binconfig.bbclass63
-rw-r--r--meta/classes/blacklist.bbclass45
-rw-r--r--meta/classes/boot-directdisk.bbclass182
-rw-r--r--meta/classes/bootimg.bbclass240
-rw-r--r--meta/classes/bugzilla.bbclass187
-rw-r--r--meta/classes/buildhistory.bbclass684
-rw-r--r--meta/classes/buildstats.bbclass289
-rw-r--r--meta/classes/ccache.bbclass8
-rw-r--r--meta/classes/chrpath.bbclass115
-rw-r--r--meta/classes/clutter.bbclass22
-rw-r--r--meta/classes/cmake.bbclass115
-rw-r--r--meta/classes/cml1.bbclass73
-rw-r--r--meta/classes/copyleft_compliance.bbclass64
-rw-r--r--meta/classes/copyleft_filter.bbclass62
-rw-r--r--meta/classes/core-image.bbclass79
-rw-r--r--meta/classes/cpan-base.bbclass55
-rw-r--r--meta/classes/cpan.bbclass55
-rw-r--r--meta/classes/cpan_build.bbclass50
-rw-r--r--meta/classes/cross-canadian.bbclass102
-rw-r--r--meta/classes/cross.bbclass80
-rw-r--r--meta/classes/crosssdk.bbclass35
-rw-r--r--meta/classes/debian.bbclass125
-rw-r--r--meta/classes/deploy.bbclass10
-rw-r--r--meta/classes/devshell.bbclass33
-rw-r--r--meta/classes/distro_features_check.bbclass28
-rw-r--r--meta/classes/distrodata.bbclass916
-rw-r--r--meta/classes/distutils-base.bbclass4
-rw-r--r--meta/classes/distutils-common-base.bbclass24
-rw-r--r--meta/classes/distutils-native-base.bbclass3
-rw-r--r--meta/classes/distutils-tools.bbclass77
-rw-r--r--meta/classes/distutils.bbclass81
-rw-r--r--meta/classes/distutils3-base.bbclass8
-rw-r--r--meta/classes/distutils3-native-base.bbclass4
-rw-r--r--meta/classes/distutils3.bbclass98
-rw-r--r--meta/classes/externalsrc.bbclass53
-rw-r--r--meta/classes/extrausers.bbclass65
-rw-r--r--meta/classes/fontcache.bbclass40
-rw-r--r--meta/classes/gconf.bbclass70
-rw-r--r--meta/classes/gettext.bbclass19
-rw-r--r--meta/classes/gnome.bbclass5
-rw-r--r--meta/classes/gnomebase.bbclass30
-rw-r--r--meta/classes/grub-efi.bbclass141
-rw-r--r--meta/classes/gsettings.bbclass37
-rw-r--r--meta/classes/gtk-doc.bbclass23
-rw-r--r--meta/classes/gtk-icon-cache.bbclass62
-rw-r--r--meta/classes/gtk-immodules-cache.bbclass83
-rw-r--r--meta/classes/gummiboot.bbclass114
-rw-r--r--meta/classes/gzipnative.bbclass5
-rw-r--r--meta/classes/icecc.bbclass325
-rw-r--r--meta/classes/image-live.bbclass18
-rw-r--r--meta/classes/image-mklibs.bbclass71
-rw-r--r--meta/classes/image-prelink.bbclass33
-rw-r--r--meta/classes/image-swab.bbclass94
-rw-r--r--meta/classes/image-vmdk.bbclass35
-rw-r--r--meta/classes/image.bbclass408
-rw-r--r--meta/classes/image_types.bbclass154
-rw-r--r--meta/classes/image_types_uboot.bbclass23
-rw-r--r--meta/classes/insane.bbclass1005
-rw-r--r--meta/classes/insserv.bbclass5
-rw-r--r--meta/classes/kernel-arch.bbclass60
-rw-r--r--meta/classes/kernel-grub.bbclass91
-rw-r--r--meta/classes/kernel-module-split.bbclass187
-rw-r--r--meta/classes/kernel-yocto.bbclass416
-rw-r--r--meta/classes/kernel.bbclass502
-rw-r--r--meta/classes/lib_package.bbclass7
-rw-r--r--meta/classes/libc-common.bbclass36
-rw-r--r--meta/classes/libc-package.bbclass390
-rw-r--r--meta/classes/license.bbclass373
-rw-r--r--meta/classes/linux-kernel-base.bbclass32
-rw-r--r--meta/classes/logging.bbclass72
-rw-r--r--meta/classes/meta.bbclass4
-rw-r--r--meta/classes/metadata_scm.bbclass82
-rw-r--r--meta/classes/migrate_localcount.bbclass46
-rw-r--r--meta/classes/mime.bbclass56
-rw-r--r--meta/classes/mirrors.bbclass78
-rw-r--r--meta/classes/module-base.bbclass18
-rw-r--r--meta/classes/module.bbclass32
-rw-r--r--meta/classes/multilib.bbclass141
-rw-r--r--meta/classes/multilib_global.bbclass47
-rw-r--r--meta/classes/multilib_header.bbclass47
-rw-r--r--meta/classes/native.bbclass164
-rw-r--r--meta/classes/nativesdk.bbclass94
-rw-r--r--meta/classes/oelint.bbclass174
-rw-r--r--meta/classes/own-mirrors.bbclass12
-rw-r--r--meta/classes/package.bbclass2019
-rw-r--r--meta/classes/package_deb.bbclass317
-rw-r--r--meta/classes/package_ipk.bbclass261
-rw-r--r--meta/classes/package_rpm.bbclass731
-rw-r--r--meta/classes/package_tar.bbclass69
-rw-r--r--meta/classes/packagedata.bbclass26
-rw-r--r--meta/classes/packagegroup.bbclass47
-rw-r--r--meta/classes/packageinfo.bbclass22
-rw-r--r--meta/classes/patch.bbclass187
-rw-r--r--meta/classes/perlnative.bbclass3
-rw-r--r--meta/classes/pixbufcache.bbclass70
-rw-r--r--meta/classes/pkgconfig.bbclass2
-rw-r--r--meta/classes/populate_sdk.bbclass7
-rw-r--r--meta/classes/populate_sdk_base.bbclass337
-rw-r--r--meta/classes/populate_sdk_deb.bbclass13
-rw-r--r--meta/classes/populate_sdk_ipk.bbclass3
-rw-r--r--meta/classes/populate_sdk_rpm.bbclass16
-rw-r--r--meta/classes/prexport.bbclass58
-rw-r--r--meta/classes/primport.bbclass21
-rw-r--r--meta/classes/prserv.bbclass33
-rw-r--r--meta/classes/ptest.bbclass62
-rw-r--r--meta/classes/python-dir.bbclass5
-rw-r--r--meta/classes/python3native.bbclass7
-rw-r--r--meta/classes/pythonnative.bbclass6
-rw-r--r--meta/classes/qemu.bbclass35
-rw-r--r--meta/classes/qmake2.bbclass27
-rw-r--r--meta/classes/qmake_base.bbclass119
-rw-r--r--meta/classes/qt4e.bbclass24
-rw-r--r--meta/classes/qt4x11.bbclass14
-rw-r--r--meta/classes/recipe_sanity.bbclass168
-rw-r--r--meta/classes/relocatable.bbclass7
-rw-r--r--meta/classes/report-error.bbclass66
-rw-r--r--meta/classes/rm_work.bbclass99
-rw-r--r--meta/classes/rootfs_deb.bbclass24
-rw-r--r--meta/classes/rootfs_ipk.bbclass38
-rw-r--r--meta/classes/rootfs_rpm.bbclass42
-rw-r--r--meta/classes/sanity.bbclass800
-rw-r--r--meta/classes/scons.bbclass15
-rw-r--r--meta/classes/sdl.bbclass6
-rw-r--r--meta/classes/setuptools.bbclass8
-rw-r--r--meta/classes/setuptools3.bbclass8
-rw-r--r--meta/classes/sip.bbclass63
-rw-r--r--meta/classes/siteconfig.bbclass33
-rw-r--r--meta/classes/siteinfo.bbclass151
-rw-r--r--meta/classes/spdx.bbclass321
-rw-r--r--meta/classes/sstate.bbclass798
-rw-r--r--meta/classes/staging.bbclass121
-rw-r--r--meta/classes/syslinux.bbclass187
-rw-r--r--meta/classes/systemd.bbclass198
-rw-r--r--meta/classes/terminal.bbclass94
-rw-r--r--meta/classes/testimage-auto.bbclass23
-rw-r--r--meta/classes/testimage.bbclass232
-rw-r--r--meta/classes/tinderclient.bbclass368
-rw-r--r--meta/classes/toaster.bbclass331
-rw-r--r--meta/classes/toolchain-scripts.bbclass116
-rw-r--r--meta/classes/typecheck.bbclass12
-rw-r--r--meta/classes/uboot-config.bbclass61
-rw-r--r--meta/classes/update-alternatives.bbclass267
-rw-r--r--meta/classes/update-rc.d.bbclass130
-rw-r--r--meta/classes/useradd-staticids.bbclass272
-rw-r--r--meta/classes/useradd.bbclass211
-rw-r--r--meta/classes/useradd_base.bbclass230
-rw-r--r--meta/classes/utility-tasks.bbclass69
-rw-r--r--meta/classes/utils.bbclass368
-rw-r--r--meta/classes/vala.bbclass21
-rw-r--r--meta/classes/waf.bbclass13
157 files changed, 21986 insertions, 0 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
new file mode 100644
index 0000000000..d41dd4bee8
--- /dev/null
+++ b/meta/classes/allarch.bbclass
@@ -0,0 +1,36 @@
1#
2# This class is used for architecture independent recipes/data files (usally scripts)
3#
4
5# Expand STAGING_DIR_HOST since for cross-canadian/native/nativesdk, this will
6# point elsewhere after these changes.
7STAGING_DIR_HOST := "${STAGING_DIR_HOST}"
8
9PACKAGE_ARCH = "all"
10
11python () {
12 # Allow this class to be included but overridden - only set
13 # the values if we're still "all" package arch.
14 if d.getVar("PACKAGE_ARCH") == "all":
15 # No need for virtual/libc or a cross compiler
16 d.setVar("INHIBIT_DEFAULT_DEPS","1")
17
18 # Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
19 # naming anyway
20 d.setVar("TARGET_ARCH", "allarch")
21 d.setVar("TARGET_OS", "linux")
22 d.setVar("TARGET_CC_ARCH", "none")
23 d.setVar("TARGET_LD_ARCH", "none")
24 d.setVar("TARGET_AS_ARCH", "none")
25 d.setVar("TARGET_FPU", "")
26 d.setVar("TARGET_PREFIX", "")
27 d.setVar("PACKAGE_EXTRA_ARCHS", "")
28 d.setVar("SDK_ARCH", "none")
29 d.setVar("SDK_CC_ARCH", "none")
30
31 # No need to do shared library processing or debug symbol handling
32 d.setVar("EXCLUDE_FROM_SHLIBS", "1")
33 d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
34 d.setVar("INHIBIT_PACKAGE_STRIP", "1")
35}
36
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
new file mode 100644
index 0000000000..8d8e7c42a8
--- /dev/null
+++ b/meta/classes/archiver.bbclass
@@ -0,0 +1,368 @@
1# ex:ts=4:sw=4:sts=4:et
2# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
3#
4# This bbclass is used for creating archive for:
5# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
6# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
7# 3) configured source: ARCHIVER_MODE[src] = "configured"
8# 4) The patches between do_unpack and do_patch:
9# ARCHIVER_MODE[diff] = "1"
10# And you can set the one that you'd like to exclude from the diff:
11# ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
12# 5) The environment data, similar to 'bitbake -e recipe':
13# ARCHIVER_MODE[dumpdata] = "1"
14# 6) The recipe (.bb and .inc): ARCHIVER_MODE[recipe] = "1"
15# 7) Whether output the .src.rpm package:
16# ARCHIVER_MODE[srpm] = "1"
17# 8) Filter the license, the recipe whose license in
18# COPYLEFT_LICENSE_INCLUDE will be included, and in
19# COPYLEFT_LICENSE_EXCLUDE will be excluded.
20# COPYLEFT_LICENSE_INCLUDE = 'GPL* LGPL*'
21# COPYLEFT_LICENSE_EXCLUDE = 'CLOSED Proprietary'
22# 9) The recipe type that will be archived:
23# COPYLEFT_RECIPE_TYPES = 'target'
24#
25
26# Don't filter the license by default
27COPYLEFT_LICENSE_INCLUDE ?= ''
28COPYLEFT_LICENSE_EXCLUDE ?= ''
29# Create archive for all the recipe types
30COPYLEFT_RECIPE_TYPES ?= 'target native nativesdk cross crosssdk cross-canadian'
31inherit copyleft_filter
32
33ARCHIVER_MODE[srpm] ?= "0"
34ARCHIVER_MODE[src] ?= "patched"
35ARCHIVER_MODE[diff] ?= "0"
36ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
37ARCHIVER_MODE[dumpdata] ?= "0"
38ARCHIVER_MODE[recipe] ?= "0"
39
40DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
41ARCHIVER_TOPDIR ?= "${WORKDIR}/deploy-sources"
42ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/"
43ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
44
45do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
46do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
47do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
48
49# This is a convenience for the shell script to use it
50
51
52python () {
53 pn = d.getVar('PN', True)
54
55 if d.getVar('COPYLEFT_LICENSE_INCLUDE', True) or \
56 d.getVar('COPYLEFT_LICENSE_EXCLUDE', True):
57 included, reason = copyleft_should_include(d)
58 if not included:
59 bb.debug(1, 'archiver: %s is excluded: %s' % (pn, reason))
60 return
61 else:
62 bb.debug(1, 'archiver: %s is included: %s' % (pn, reason))
63
64 ar_src = d.getVarFlag('ARCHIVER_MODE', 'src', True)
65 ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata', True)
66 ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe', True)
67
68 if ar_src == "original":
69 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_original' % pn)
70 elif ar_src == "patched":
71 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn)
72 elif ar_src == "configured":
73 # We can't use "addtask do_ar_configured after do_configure" since it
74 # will cause the deptask of do_populate_sysroot to run not matter what
75 # archives we need, so we add the depends here.
76 d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn)
77 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_configured' % pn)
78 elif ar_src:
79 bb.fatal("Invalid ARCHIVER_MODE[src]: %s" % ar_src)
80
81 if ar_dumpdata == "1":
82 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_dumpdata' % pn)
83
84 if ar_recipe == "1":
85 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_recipe' % pn)
86
87 # Output the srpm package
88 ar_srpm = d.getVarFlag('ARCHIVER_MODE', 'srpm', True)
89 if ar_srpm == "1":
90 if d.getVar('PACKAGES', True) != '' and d.getVar('IMAGE_PKGTYPE', True) == 'rpm':
91 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_package_write_rpm' % pn)
92 if ar_dumpdata == "1":
93 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_dumpdata' % pn)
94 if ar_recipe == "1":
95 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_recipe' % pn)
96 if ar_src == "original":
97 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_original' % pn)
98 elif ar_src == "patched":
99 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_patched' % pn)
100 elif ar_src == "configured":
101 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
102
103 # The gcc staff uses shared source
104 flag = d.getVarFlag("do_unpack", "stamp-base", True)
105 if flag:
106 if ar_src in [ 'original', 'patched' ]:
107 ar_outdir = os.path.join(d.getVar('ARCHIVER_TOPDIR', True), 'work-shared')
108 d.setVar('ARCHIVER_OUTDIR', ar_outdir)
109 d.setVarFlag('do_ar_original', 'stamp-base', flag)
110 d.setVarFlag('do_ar_patched', 'stamp-base', flag)
111 d.setVarFlag('do_unpack_and_patch', 'stamp-base', flag)
112 d.setVarFlag('do_ar_original', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR')
113 d.setVarFlag('do_unpack_and_patch', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR')
114 d.setVarFlag('do_ar_patched', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR')
115 d.setVarFlag('create_diff_gz', 'vardepsexclude', 'PF')
116 d.setVarFlag('create_tarball', 'vardepsexclude', 'PF')
117
118 flag_clean = d.getVarFlag('do_unpack', 'stamp-base-clean', True)
119 if flag_clean:
120 d.setVarFlag('do_ar_original', 'stamp-base-clean', flag_clean)
121 d.setVarFlag('do_ar_patched', 'stamp-base-clean', flag_clean)
122 d.setVarFlag('do_unpack_and_patch', 'stamp-base-clean', flag_clean)
123}
124
125# Take all the sources for a recipe and puts them in WORKDIR/archiver-work/.
126# Files in SRC_URI are copied directly, anything that's a directory
127# (e.g. git repositories) is "unpacked" and then put into a tarball.
128python do_ar_original() {
129
130 import shutil, tarfile, tempfile
131
132 if d.getVarFlag('ARCHIVER_MODE', 'src', True) != "original":
133 return
134
135 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
136 bb.note('Archiving the original source...')
137 fetch = bb.fetch2.Fetch([], d)
138 for url in fetch.urls:
139 local = fetch.localpath(url)
140 if os.path.isfile(local):
141 shutil.copy(local, ar_outdir)
142 elif os.path.isdir(local):
143 basename = os.path.basename(local)
144
145 tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR', True))
146 fetch.unpack(tmpdir, (url,))
147
148 os.chdir(tmpdir)
149 tarname = os.path.join(ar_outdir, basename + '.tar.gz')
150 tar = tarfile.open(tarname, 'w:gz')
151 tar.add('.')
152 tar.close()
153
154 # Emit patch series files for 'original'
155 bb.note('Writing patch series files...')
156 for patch in src_patches(d):
157 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
158 patchdir = parm.get('patchdir')
159 if patchdir:
160 series = os.path.join(ar_outdir, 'series.subdir.%s' % patchdir.replace('/', '_'))
161 else:
162 series = os.path.join(ar_outdir, 'series')
163
164 with open(series, 'a') as s:
165 s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
166}
167
168python do_ar_patched() {
169
170 if d.getVarFlag('ARCHIVER_MODE', 'src', True) != 'patched':
171 return
172
173 # Get the ARCHIVER_OUTDIR before we reset the WORKDIR
174 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
175 bb.note('Archiving the patched source...')
176 d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
177 # The gcc staff uses shared source
178 flag = d.getVarFlag('do_unpack', 'stamp-base', True)
179 if flag:
180 create_tarball(d, d.getVar('S', True), 'patched', ar_outdir, 'gcc')
181 else:
182 create_tarball(d, d.getVar('S', True), 'patched', ar_outdir)
183}
184
185python do_ar_configured() {
186 import shutil
187
188 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
189 if d.getVarFlag('ARCHIVER_MODE', 'src', True) == 'configured':
190 bb.note('Archiving the configured source...')
191 # The libtool-native's do_configure will remove the
192 # ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
193 # do_configure, we archive the already configured ${S} to
194 # instead of.
195 if d.getVar('PN', True) != 'libtool-native':
196 # Change the WORKDIR to make do_configure run in another dir.
197 d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
198 if bb.data.inherits_class('kernel-yocto', d):
199 bb.build.exec_func('do_kernel_configme', d)
200 if bb.data.inherits_class('cmake', d):
201 bb.build.exec_func('do_generate_toolchain_file', d)
202 prefuncs = d.getVarFlag('do_configure', 'prefuncs', True)
203 for func in (prefuncs or '').split():
204 if func != "sysroot_cleansstate":
205 bb.build.exec_func(func, d)
206 bb.build.exec_func('do_configure', d)
207 postfuncs = d.getVarFlag('do_configure', 'postfuncs', True)
208 for func in (postfuncs or '').split():
209 if func != "do_qa_configure":
210 bb.build.exec_func(func, d)
211 srcdir = d.getVar('S', True)
212 builddir = d.getVar('B', True)
213 if srcdir != builddir:
214 if os.path.exists(builddir):
215 oe.path.copytree(builddir, os.path.join(srcdir, \
216 'build.%s.ar_configured' % d.getVar('PF', True)))
217 create_tarball(d, srcdir, 'configured', ar_outdir)
218}
219
220def create_tarball(d, srcdir, suffix, ar_outdir, pf=None):
221 """
222 create the tarball from srcdir
223 """
224 import tarfile
225
226 bb.utils.mkdirhier(ar_outdir)
227 if pf:
228 tarname = os.path.join(ar_outdir, '%s-%s.tar.gz' % (pf, suffix))
229 else:
230 tarname = os.path.join(ar_outdir, '%s-%s.tar.gz' % \
231 (d.getVar('PF', True), suffix))
232
233 srcdir = srcdir.rstrip('/')
234 dirname = os.path.dirname(srcdir)
235 basename = os.path.basename(srcdir)
236 os.chdir(dirname)
237 bb.note('Creating %s' % tarname)
238 tar = tarfile.open(tarname, 'w:gz')
239 tar.add(basename)
240 tar.close()
241
242# creating .diff.gz between source.orig and source
243def create_diff_gz(d, src_orig, src, ar_outdir):
244
245 import subprocess
246
247 if not os.path.isdir(src) or not os.path.isdir(src_orig):
248 return
249
250 # The diff --exclude can't exclude the file with path, so we copy
251 # the patched source, and remove the files that we'd like to
252 # exclude.
253 src_patched = src + '.patched'
254 oe.path.copyhardlinktree(src, src_patched)
255 for i in d.getVarFlag('ARCHIVER_MODE', 'diff-exclude', True).split():
256 bb.utils.remove(os.path.join(src_orig, i), recurse=True)
257 bb.utils.remove(os.path.join(src_patched, i), recurse=True)
258
259 dirname = os.path.dirname(src)
260 basename = os.path.basename(src)
261 os.chdir(dirname)
262 out_file = os.path.join(ar_outdir, '%s-diff.gz' % d.getVar('PF', True))
263 diff_cmd = 'diff -Naur %s.orig %s.patched | gzip -c > %s' % (basename, basename, out_file)
264 subprocess.call(diff_cmd, shell=True)
265 bb.utils.remove(src_patched, recurse=True)
266
267# Run do_unpack and do_patch
268python do_unpack_and_patch() {
269 if d.getVarFlag('ARCHIVER_MODE', 'src', True) not in \
270 [ 'patched', 'configured'] and \
271 d.getVarFlag('ARCHIVER_MODE', 'diff', True) != '1':
272 return
273
274 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
275
276 # Change the WORKDIR to make do_unpack do_patch run in another dir.
277 d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
278
279 # The kernel source is ready after do_validate_branches
280 if bb.data.inherits_class('kernel-yocto', d):
281 bb.build.exec_func('do_unpack', d)
282 bb.build.exec_func('do_kernel_checkout', d)
283 bb.build.exec_func('do_validate_branches', d)
284 else:
285 bb.build.exec_func('do_unpack', d)
286
287 # Save the original source for creating the patches
288 if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
289 src = d.getVar('S', True).rstrip('/')
290 src_orig = '%s.orig' % src
291 oe.path.copytree(src, src_orig)
292 bb.build.exec_func('do_patch', d)
293 # Create the patches
294 if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
295 bb.note('Creating diff gz...')
296 create_diff_gz(d, src_orig, src, ar_outdir)
297 bb.utils.remove(src_orig, recurse=True)
298}
299
300python do_ar_recipe () {
301 """
302 archive the recipe, including .bb and .inc.
303 """
304 import re
305 import shutil
306
307 require_re = re.compile( r"require\s+(.+)" )
308 include_re = re.compile( r"include\s+(.+)" )
309 bbfile = d.getVar('FILE', True)
310 outdir = os.path.join(d.getVar('WORKDIR', True), \
311 '%s-recipe' % d.getVar('PF', True))
312 bb.utils.mkdirhier(outdir)
313 shutil.copy(bbfile, outdir)
314
315 dirname = os.path.dirname(bbfile)
316 bbpath = '%s:%s' % (dirname, d.getVar('BBPATH', True))
317 f = open(bbfile, 'r')
318 for line in f.readlines():
319 incfile = None
320 if require_re.match(line):
321 incfile = require_re.match(line).group(1)
322 elif include_re.match(line):
323 incfile = include_re.match(line).group(1)
324 if incfile:
325 incfile = bb.data.expand(incfile, d)
326 incfile = bb.utils.which(bbpath, incfile)
327 if incfile:
328 shutil.copy(incfile, outdir)
329
330 create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR', True))
331 bb.utils.remove(outdir, recurse=True)
332}
333
334python do_dumpdata () {
335 """
336 dump environment data to ${PF}-showdata.dump
337 """
338
339 dumpfile = os.path.join(d.getVar('ARCHIVER_OUTDIR', True), \
340 '%s-showdata.dump' % d.getVar('PF', True))
341 bb.note('Dumping metadata into %s' % dumpfile)
342 f = open(dumpfile, 'w')
343 # emit variables and shell functions
344 bb.data.emit_env(f, d, True)
345 # emit the metadata which isn't valid shell
346 for e in d.keys():
347 if bb.data.getVarFlag(e, 'python', d):
348 f.write("\npython %s () {\n%s}\n" % (e, bb.data.getVar(e, d, True)))
349 f.close()
350}
351
352SSTATETASKS += "do_deploy_archives"
353do_deploy_archives () {
354 echo "Deploying source archive files ..."
355}
356python do_deploy_archives_setscene () {
357 sstate_setscene(d)
358}
359do_deploy_archives[sstate-inputdirs] = "${ARCHIVER_TOPDIR}"
360do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}"
361
362addtask do_ar_original after do_unpack
363addtask do_unpack_and_patch after do_patch
364addtask do_ar_patched after do_unpack_and_patch
365addtask do_ar_configured after do_unpack_and_patch
366addtask do_dumpdata
367addtask do_ar_recipe
368addtask do_deploy_archives before do_build
diff --git a/meta/classes/autotools-brokensep.bbclass b/meta/classes/autotools-brokensep.bbclass
new file mode 100644
index 0000000000..71cf97a391
--- /dev/null
+++ b/meta/classes/autotools-brokensep.bbclass
@@ -0,0 +1,5 @@
1# Autotools class for recipes where separate build dir doesn't work
2# Ideally we should fix software so it does work. Standard autotools supports
3# this.
4inherit autotools
5B = "${S}"
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
new file mode 100644
index 0000000000..c60ba10929
--- /dev/null
+++ b/meta/classes/autotools.bbclass
@@ -0,0 +1,271 @@
1def autotools_dep_prepend(d):
2 if d.getVar('INHIBIT_AUTOTOOLS_DEPS', True):
3 return ''
4
5 pn = d.getVar('PN', True)
6 deps = ''
7
8 if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
9 return deps
10 deps += 'autoconf-native automake-native '
11
12 if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
13 deps += 'libtool-native '
14 if not bb.data.inherits_class('native', d) \
15 and not bb.data.inherits_class('nativesdk', d) \
16 and not bb.data.inherits_class('cross', d) \
17 and not d.getVar('INHIBIT_DEFAULT_DEPS', True):
18 deps += 'libtool-cross '
19
20 return deps + 'gnu-config-native '
21
22EXTRA_OEMAKE = ""
23
24DEPENDS_prepend = "${@autotools_dep_prepend(d)}"
25
26inherit siteinfo
27
28# Space separated list of shell scripts with variables defined to supply test
29# results for autoconf tests we cannot run at build time.
30export CONFIG_SITE = "${@siteinfo_get_files(d)}"
31
32acpaths = "default"
33EXTRA_AUTORECONF = "--exclude=autopoint"
34
35export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
36
37# When building tools for use at build-time it's recommended for the build
38# system to use these variables when cross-compiling.
39# (http://sources.redhat.com/autobook/autobook/autobook_270.html)
40export CPP_FOR_BUILD = "${BUILD_CPP}"
41export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}"
42
43export CC_FOR_BUILD = "${BUILD_CC}"
44export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}"
45
46export CXX_FOR_BUILD = "${BUILD_CXX}"
47export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}"
48
49export LD_FOR_BUILD = "${BUILD_LD}"
50export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}"
51
52def autotools_set_crosscompiling(d):
53 if not bb.data.inherits_class('native', d):
54 return " cross_compiling=yes"
55 return ""
56
57def append_libtool_sysroot(d):
58 # Only supply libtool sysroot option for non-native packages
59 if not bb.data.inherits_class('native', d):
60 return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
61 return ""
62
63# EXTRA_OECONF_append = "${@autotools_set_crosscompiling(d)}"
64
65CONFIGUREOPTS = " --build=${BUILD_SYS} \
66 --host=${HOST_SYS} \
67 --target=${TARGET_SYS} \
68 --prefix=${prefix} \
69 --exec_prefix=${exec_prefix} \
70 --bindir=${bindir} \
71 --sbindir=${sbindir} \
72 --libexecdir=${libexecdir} \
73 --datadir=${datadir} \
74 --sysconfdir=${sysconfdir} \
75 --sharedstatedir=${sharedstatedir} \
76 --localstatedir=${localstatedir} \
77 --libdir=${libdir} \
78 --includedir=${includedir} \
79 --oldincludedir=${oldincludedir} \
80 --infodir=${infodir} \
81 --mandir=${mandir} \
82 --disable-silent-rules \
83 ${CONFIGUREOPT_DEPTRACK} \
84 ${@append_libtool_sysroot(d)}"
85CONFIGUREOPT_DEPTRACK = "--disable-dependency-tracking"
86
87
88oe_runconf () {
89 cfgscript="${S}/configure"
90 if [ -x "$cfgscript" ] ; then
91 bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
92 set +e
93 ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"
94 if [ "$?" != "0" ]; then
95 echo "Configure failed. The contents of all config.log files follows to aid debugging"
96 find ${S} -name config.log -print -exec cat {} \;
97 bbfatal "oe_runconf failed"
98 fi
99 set -e
100 else
101 bbfatal "no configure script found at $cfgscript"
102 fi
103}
104
105AUTOTOOLS_AUXDIR ?= "${S}"
106
107CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
108
109autotools_preconfigure() {
110 if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
111 if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
112 if [ "${S}" != "${B}" ]; then
113 echo "Previously configured separate build directory detected, cleaning ${B}"
114 rm -rf ${B}
115 mkdir ${B}
116 else
117 # At least remove the .la files since automake won't automatically
118 # regenerate them even if CFLAGS/LDFLAGS are different
119 cd ${S}; find ${S} -name \*.la -delete
120 fi
121 fi
122 fi
123}
124
125autotools_postconfigure(){
126 if [ -n "${CONFIGURESTAMPFILE}" ]; then
127 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
128 fi
129}
130
131EXTRACONFFUNCS ??= ""
132
133do_configure[prefuncs] += "autotools_preconfigure autotools_copy_aclocals ${EXTRACONFFUNCS}"
134do_configure[postfuncs] += "autotools_postconfigure"
135
136ACLOCALDIR = "${B}/aclocal-copy"
137
138python autotools_copy_aclocals () {
139 s = d.getVar("S", True)
140 if not os.path.exists(s + "/configure.in") and not os.path.exists(s + "/configure.ac"):
141 if not d.getVar("AUTOTOOLS_COPYACLOCAL"):
142 return
143
144 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
145 pn = d.getVar("PN", True)
146 aclocaldir = d.getVar("ACLOCALDIR", True)
147 oe.path.remove(aclocaldir)
148 bb.utils.mkdirhier(aclocaldir)
149 configuredeps = []
150 for dep in taskdepdata:
151 data = taskdepdata[dep]
152 if data[1] == "do_configure" and data[0] != pn:
153 configuredeps.append(data[0])
154
155 cp = []
156 for c in configuredeps:
157 if c.endswith("-native"):
158 manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}-%s.populate_sysroot" % c)
159 elif c.startswith("nativesdk-"):
160 manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${SDK_ARCH}-%s.populate_sysroot" % c)
161 elif c.endswith("-cross") or c.endswith("-cross-initial") or c.endswith("-crosssdk") or c.endswith("-crosssdk-initial"):
162 continue
163 else:
164 manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${MACHINE}-%s.populate_sysroot" % c)
165 try:
166 f = open(manifest, "r")
167 for l in f:
168 if "/aclocal/" in l and l.strip().endswith(".m4"):
169 cp.append(l.strip())
170 except:
171 bb.warn("%s not found" % manifest)
172
173 for c in cp:
174 t = os.path.join(aclocaldir, os.path.basename(c))
175 if not os.path.exists(t):
176 os.symlink(c, t)
177}
178autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH BB_TASKDEPDATA"
179
180autotools_do_configure() {
181 # WARNING: gross hack follows:
182 # An autotools built package generally needs these scripts, however only
183 # automake or libtoolize actually install the current versions of them.
184 # This is a problem in builds that do not use libtool or automake, in the case
185 # where we -need- the latest version of these scripts. e.g. running a build
186 # for a package whose autotools are old, on an x86_64 machine, which the old
187 # config.sub does not support. Work around this by installing them manually
188 # regardless.
189 ( for ac in `find ${S} -name configure.in -o -name configure.ac`; do
190 rm -f `dirname $ac`/configure
191 done )
192 if [ -e ${S}/configure.in -o -e ${S}/configure.ac ]; then
193 olddir=`pwd`
194 cd ${S}
195 ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
196 if [ x"${acpaths}" = xdefault ]; then
197 acpaths=
198 for i in `find ${S} -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
199 grep -v 'acinclude.m4' | grep -v 'aclocal-copy' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
200 acpaths="$acpaths -I $i"
201 done
202 else
203 acpaths="${acpaths}"
204 fi
205 AUTOV=`automake --version |head -n 1 |sed "s/.* //;s/\.[0-9]\+$//"`
206 automake --version
207 echo "AUTOV is $AUTOV"
208 if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
209 ACLOCAL="$ACLOCAL --automake-acdir=${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
210 fi
211 # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
212 # like it was auto-generated. Work around this by blowing it away
213 # by hand, unless the package specifically asked not to run aclocal.
214 if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
215 rm -f aclocal.m4
216 fi
217 if [ -e configure.in ]; then
218 CONFIGURE_AC=configure.in
219 else
220 CONFIGURE_AC=configure.ac
221 fi
222 if grep "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
223 if grep "sed.*POTFILES" $CONFIGURE_AC >/dev/null; then
224 : do nothing -- we still have an old unmodified configure.ac
225 else
226 bbnote Executing glib-gettextize --force --copy
227 echo "no" | glib-gettextize --force --copy
228 fi
229 else if grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
230 # We'd call gettextize here if it wasn't so broken...
231 cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
232 if [ -d ${S}/po/ ]; then
233 cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
234 if [ ! -e ${S}/po/remove-potcdate.sin ]; then
235 cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
236 fi
237 fi
238 for i in gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4; do
239 for j in `find ${S} -name $i | grep -v aclocal-copy`; do
240 rm $j
241 done
242 done
243 fi
244 fi
245 mkdir -p m4
246 if grep "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
247 bbnote Executing intltoolize --copy --force --automake
248 intltoolize --copy --force --automake
249 fi
250 bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
251 ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || bbfatal "autoreconf execution failed."
252 cd $olddir
253 fi
254 if [ -e ${S}/configure ]; then
255 oe_runconf
256 else
257 bbnote "nothing to configure"
258 fi
259}
260
261autotools_do_install() {
262 oe_runmake 'DESTDIR=${D}' install
263 # Info dir listing isn't interesting at this point so remove it if it exists.
264 if [ -e "${D}${infodir}/dir" ]; then
265 rm -f ${D}${infodir}/dir
266 fi
267}
268
269inherit siteconfig
270
271EXPORT_FUNCTIONS do_configure do_install
diff --git a/meta/classes/autotools_stage.bbclass b/meta/classes/autotools_stage.bbclass
new file mode 100644
index 0000000000..b3c41e4b4d
--- /dev/null
+++ b/meta/classes/autotools_stage.bbclass
@@ -0,0 +1,2 @@
1inherit autotools
2
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
new file mode 100644
index 0000000000..f4f5321ac8
--- /dev/null
+++ b/meta/classes/base.bbclass
@@ -0,0 +1,661 @@
1BB_DEFAULT_TASK ?= "build"
2CLASSOVERRIDE ?= "class-target"
3
4inherit patch
5inherit staging
6
7inherit mirrors
8inherit utils
9inherit utility-tasks
10inherit metadata_scm
11inherit logging
12
13OE_IMPORTS += "os sys time oe.path oe.utils oe.data oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath"
14OE_IMPORTS[type] = "list"
15
16def oe_import(d):
17 import sys
18
19 bbpath = d.getVar("BBPATH", True).split(":")
20 sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath]
21
22 def inject(name, value):
23 """Make a python object accessible from the metadata"""
24 if hasattr(bb.utils, "_context"):
25 bb.utils._context[name] = value
26 else:
27 __builtins__[name] = value
28
29 import oe.data
30 for toimport in oe.data.typed_value("OE_IMPORTS", d):
31 imported = __import__(toimport)
32 inject(toimport.split(".", 1)[0], imported)
33
34 return ""
35
36# We need the oe module name space early (before INHERITs get added)
37OE_IMPORTED := "${@oe_import(d)}"
38
39def lsb_distro_identifier(d):
40 adjust = d.getVar('LSB_DISTRO_ADJUST', True)
41 adjust_func = None
42 if adjust:
43 try:
44 adjust_func = globals()[adjust]
45 except KeyError:
46 pass
47 return oe.lsb.distro_identifier(adjust_func)
48
49die() {
50 bbfatal "$*"
51}
52
53oe_runmake_call() {
54 bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
55 ${MAKE} ${EXTRA_OEMAKE} "$@"
56}
57
58oe_runmake() {
59 oe_runmake_call "$@" || die "oe_runmake failed"
60}
61
62
63def base_dep_prepend(d):
64 #
65 # Ideally this will check a flag so we will operate properly in
66 # the case where host == build == target, for now we don't work in
67 # that case though.
68 #
69
70 deps = ""
71 # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
72 # we need that built is the responsibility of the patch function / class, not
73 # the application.
74 if not d.getVar('INHIBIT_DEFAULT_DEPS'):
75 if (d.getVar('HOST_SYS', True) != d.getVar('BUILD_SYS', True)):
76 deps += " virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc "
77 return deps
78
79BASEDEPENDS = "${@base_dep_prepend(d)}"
80
81DEPENDS_prepend="${BASEDEPENDS} "
82
83FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
84# THISDIR only works properly with imediate expansion as it has to run
85# in the context of the location its used (:=)
86THISDIR = "${@os.path.dirname(d.getVar('FILE', True))}"
87
88def extra_path_elements(d):
89 path = ""
90 elements = (d.getVar('EXTRANATIVEPATH', True) or "").split()
91 for e in elements:
92 path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
93 return path
94
95PATH_prepend = "${@extra_path_elements(d)}"
96
97addtask fetch
98do_fetch[dirs] = "${DL_DIR}"
99do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
100python base_do_fetch() {
101
102 src_uri = (d.getVar('SRC_URI', True) or "").split()
103 if len(src_uri) == 0:
104 return
105
106 try:
107 fetcher = bb.fetch2.Fetch(src_uri, d)
108 fetcher.download()
109 except bb.fetch2.BBFetchException as e:
110 raise bb.build.FuncFailed(e)
111}
112
113addtask unpack after do_fetch
114do_unpack[dirs] = "${WORKDIR}"
115do_unpack[cleandirs] = "${S}/patches"
116python base_do_unpack() {
117 src_uri = (d.getVar('SRC_URI', True) or "").split()
118 if len(src_uri) == 0:
119 return
120
121 rootdir = d.getVar('WORKDIR', True)
122
123 try:
124 fetcher = bb.fetch2.Fetch(src_uri, d)
125 fetcher.unpack(rootdir)
126 except bb.fetch2.BBFetchException as e:
127 raise bb.build.FuncFailed(e)
128}
129
130def pkgarch_mapping(d):
131 # Compatibility mappings of TUNE_PKGARCH (opt in)
132 if d.getVar("PKGARCHCOMPAT_ARMV7A", True):
133 if d.getVar("TUNE_PKGARCH", True) == "armv7a-vfp-neon":
134 d.setVar("TUNE_PKGARCH", "armv7a")
135
136def preferred_ml_updates(d):
137 # If any PREFERRED_PROVIDER or PREFERRED_VERSION are set,
138 # we need to mirror these variables in the multilib case;
139 multilibs = d.getVar('MULTILIBS', True) or ""
140 if not multilibs:
141 return
142
143 prefixes = []
144 for ext in multilibs.split():
145 eext = ext.split(':')
146 if len(eext) > 1 and eext[0] == 'multilib':
147 prefixes.append(eext[1])
148
149 versions = []
150 providers = []
151 for v in d.keys():
152 if v.startswith("PREFERRED_VERSION_"):
153 versions.append(v)
154 if v.startswith("PREFERRED_PROVIDER_"):
155 providers.append(v)
156
157 for v in versions:
158 val = d.getVar(v, False)
159 pkg = v.replace("PREFERRED_VERSION_", "")
160 if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
161 continue
162 if 'cross-canadian' in pkg:
163 for p in prefixes:
164 localdata = bb.data.createCopy(d)
165 override = ":virtclass-multilib-" + p
166 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
167 bb.data.update_data(localdata)
168 newname = localdata.expand(v)
169 if newname != v:
170 newval = localdata.expand(val)
171 d.setVar(newname, newval)
172 # Avoid future variable key expansion
173 vexp = d.expand(v)
174 if v != vexp and d.getVar(v, False):
175 d.renameVar(v, vexp)
176 continue
177 for p in prefixes:
178 newname = "PREFERRED_VERSION_" + p + "-" + pkg
179 if not d.getVar(newname, False):
180 d.setVar(newname, val)
181
182 for prov in providers:
183 val = d.getVar(prov, False)
184 pkg = prov.replace("PREFERRED_PROVIDER_", "")
185 if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
186 continue
187 if 'cross-canadian' in pkg:
188 for p in prefixes:
189 localdata = bb.data.createCopy(d)
190 override = ":virtclass-multilib-" + p
191 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
192 bb.data.update_data(localdata)
193 newname = localdata.expand(prov)
194 if newname != prov:
195 newval = localdata.expand(val)
196 d.setVar(newname, newval)
197 # Avoid future variable key expansion
198 provexp = d.expand(prov)
199 if prov != provexp and d.getVar(prov, False):
200 d.renameVar(prov, provexp)
201 continue
202 virt = ""
203 if pkg.startswith("virtual/"):
204 pkg = pkg.replace("virtual/", "")
205 virt = "virtual/"
206 for p in prefixes:
207 if pkg != "kernel":
208 newval = p + "-" + val
209
210 # implement variable keys
211 localdata = bb.data.createCopy(d)
212 override = ":virtclass-multilib-" + p
213 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
214 bb.data.update_data(localdata)
215 newname = localdata.expand(prov)
216 if newname != prov and not d.getVar(newname, False):
217 d.setVar(newname, localdata.expand(newval))
218
219 # implement alternative multilib name
220 newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg)
221 if not d.getVar(newname, False):
222 d.setVar(newname, newval)
223 # Avoid future variable key expansion
224 provexp = d.expand(prov)
225 if prov != provexp and d.getVar(prov, False):
226 d.renameVar(prov, provexp)
227
228
229 mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
230 extramp = []
231 for p in mp:
232 if p.endswith(("-native", "-crosssdk")) or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
233 continue
234 virt = ""
235 if p.startswith("virtual/"):
236 p = p.replace("virtual/", "")
237 virt = "virtual/"
238 for pref in prefixes:
239 extramp.append(virt + pref + "-" + p)
240 d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
241
242
243def get_layers_branch_rev(d):
244 layers = (d.getVar("BBLAYERS", True) or "").split()
245 layers_branch_rev = ["%-17s = \"%s:%s\"" % (os.path.basename(i), \
246 base_get_metadata_git_branch(i, None).strip(), \
247 base_get_metadata_git_revision(i, None)) \
248 for i in layers]
249 i = len(layers_branch_rev)-1
250 p1 = layers_branch_rev[i].find("=")
251 s1 = layers_branch_rev[i][p1:]
252 while i > 0:
253 p2 = layers_branch_rev[i-1].find("=")
254 s2= layers_branch_rev[i-1][p2:]
255 if s1 == s2:
256 layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
257 i -= 1
258 else:
259 i -= 1
260 p1 = layers_branch_rev[i].find("=")
261 s1= layers_branch_rev[i][p1:]
262 return layers_branch_rev
263
264
265BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
266BUILDCFG_FUNCS[type] = "list"
267
268def buildcfg_vars(d):
269 statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
270 for var in statusvars:
271 value = d.getVar(var, True)
272 if value is not None:
273 yield '%-17s = "%s"' % (var, value)
274
275def buildcfg_neededvars(d):
276 needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
277 pesteruser = []
278 for v in needed_vars:
279 val = d.getVar(v, True)
280 if not val or val == 'INVALID':
281 pesteruser.append(v)
282
283 if pesteruser:
284 bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
285
286addhandler base_eventhandler
287base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.BuildStarted"
288python base_eventhandler() {
289 if isinstance(e, bb.event.ConfigParsed):
290 e.data.setVar("NATIVELSBSTRING", lsb_distro_identifier(e.data))
291 e.data.setVar('BB_VERSION', bb.__version__)
292 pkgarch_mapping(e.data)
293 preferred_ml_updates(e.data)
294 oe.utils.features_backfill("DISTRO_FEATURES", e.data)
295 oe.utils.features_backfill("MACHINE_FEATURES", e.data)
296
297 if isinstance(e, bb.event.BuildStarted):
298 localdata = bb.data.createCopy(e.data)
299 bb.data.update_data(localdata)
300 statuslines = []
301 for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
302 g = globals()
303 if func not in g:
304 bb.warn("Build configuration function '%s' does not exist" % func)
305 else:
306 flines = g[func](localdata)
307 if flines:
308 statuslines.extend(flines)
309
310 statusheader = e.data.getVar('BUILDCFG_HEADER', True)
311 bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
312}
313
314addtask configure after do_patch
315do_configure[dirs] = "${S} ${B}"
316do_configure[deptask] = "do_populate_sysroot"
317base_do_configure() {
318 :
319}
320
321addtask compile after do_configure
322do_compile[dirs] = "${S} ${B}"
323base_do_compile() {
324 if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
325 oe_runmake || die "make failed"
326 else
327 bbnote "nothing to compile"
328 fi
329}
330
331addtask install after do_compile
332do_install[dirs] = "${D} ${S} ${B}"
333# Remove and re-create ${D} so that is it guaranteed to be empty
334do_install[cleandirs] = "${D}"
335
336base_do_install() {
337 :
338}
339
340base_do_package() {
341 :
342}
343
344addtask build after do_populate_sysroot
345do_build = ""
346do_build[func] = "1"
347do_build[noexec] = "1"
348do_build[recrdeptask] += "do_deploy"
349do_build () {
350 :
351}
352
353def set_packagetriplet(d):
354 archs = []
355 tos = []
356 tvs = []
357
358 archs.append(d.getVar("PACKAGE_ARCHS", True).split())
359 tos.append(d.getVar("TARGET_OS", True))
360 tvs.append(d.getVar("TARGET_VENDOR", True))
361
362 def settriplet(d, varname, archs, tos, tvs):
363 triplets = []
364 for i in range(len(archs)):
365 for arch in archs[i]:
366 triplets.append(arch + tvs[i] + "-" + tos[i])
367 triplets.reverse()
368 d.setVar(varname, " ".join(triplets))
369
370 settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
371
372 variants = d.getVar("MULTILIB_VARIANTS", True) or ""
373 for item in variants.split():
374 localdata = bb.data.createCopy(d)
375 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
376 localdata.setVar("OVERRIDES", overrides)
377 bb.data.update_data(localdata)
378
379 archs.append(localdata.getVar("PACKAGE_ARCHS", True).split())
380 tos.append(localdata.getVar("TARGET_OS", True))
381 tvs.append(localdata.getVar("TARGET_VENDOR", True))
382
383 settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
384
385python () {
386 import string, re
387
388 # Handle PACKAGECONFIG
389 #
390 # These take the form:
391 #
392 # PACKAGECONFIG ??= "<default options>"
393 # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends"
394 pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
395 if pkgconfigflags:
396 pkgconfig = (d.getVar('PACKAGECONFIG', True) or "").split()
397 pn = d.getVar("PN", True)
398 mlprefix = d.getVar("MLPREFIX", True)
399
400 def expandFilter(appends, extension, prefix):
401 appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
402 newappends = []
403 for a in appends:
404 if a.endswith("-native") or a.endswith("-cross"):
405 newappends.append(a)
406 elif a.startswith("virtual/"):
407 subs = a.split("/", 1)[1]
408 newappends.append("virtual/" + prefix + subs + extension)
409 else:
410 if a.startswith(prefix):
411 newappends.append(a + extension)
412 else:
413 newappends.append(prefix + a + extension)
414 return newappends
415
416 def appendVar(varname, appends):
417 if not appends:
418 return
419 if varname.find("DEPENDS") != -1:
420 if pn.startswith("nativesdk-"):
421 appends = expandFilter(appends, "", "nativesdk-")
422 if pn.endswith("-native"):
423 appends = expandFilter(appends, "-native", "")
424 if mlprefix:
425 appends = expandFilter(appends, "", mlprefix)
426 varname = d.expand(varname)
427 d.appendVar(varname, " " + " ".join(appends))
428
429 extradeps = []
430 extrardeps = []
431 extraconf = []
432 for flag, flagval in sorted(pkgconfigflags.items()):
433 if flag == "defaultval":
434 continue
435 items = flagval.split(",")
436 num = len(items)
437 if num > 4:
438 bb.error("Only enable,disable,depend,rdepend can be specified!")
439
440 if flag in pkgconfig:
441 if num >= 3 and items[2]:
442 extradeps.append(items[2])
443 if num >= 4 and items[3]:
444 extrardeps.append(items[3])
445 if num >= 1 and items[0]:
446 extraconf.append(items[0])
447 elif num >= 2 and items[1]:
448 extraconf.append(items[1])
449 appendVar('DEPENDS', extradeps)
450 appendVar('RDEPENDS_${PN}', extrardeps)
451 if bb.data.inherits_class('cmake', d):
452 appendVar('EXTRA_OECMAKE', extraconf)
453 else:
454 appendVar('EXTRA_OECONF', extraconf)
455
456 # If PRINC is set, try and increase the PR value by the amount specified
457 # The PR server is now the preferred way to handle PR changes based on
458 # the checksum of the recipe (including bbappend). The PRINC is now
459 # obsolete. Return a warning to the user.
460 princ = d.getVar('PRINC', True)
461 if princ and princ != "0":
462 bb.warn("Use of PRINC %s was detected in the recipe %s (or one of its .bbappends)\nUse of PRINC is deprecated. The PR server should be used to automatically increment the PR. See: https://wiki.yoctoproject.org/wiki/PR_Service." % (princ, d.getVar("FILE", True)))
463 pr = d.getVar('PR', True)
464 pr_prefix = re.search("\D+",pr)
465 prval = re.search("\d+",pr)
466 if pr_prefix is None or prval is None:
467 bb.error("Unable to analyse format of PR variable: %s" % pr)
468 nval = int(prval.group(0)) + int(princ)
469 pr = pr_prefix.group(0) + str(nval) + pr[prval.end():]
470 d.setVar('PR', pr)
471
472 pn = d.getVar('PN', True)
473 license = d.getVar('LICENSE', True)
474 if license == "INVALID":
475 bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
476
477 if bb.data.inherits_class('license', d):
478 unmatched_license_flag = check_license_flags(d)
479 if unmatched_license_flag:
480 bb.debug(1, "Skipping %s because it has a restricted license not"
481 " whitelisted in LICENSE_FLAGS_WHITELIST" % pn)
482 raise bb.parse.SkipPackage("because it has a restricted license not"
483 " whitelisted in LICENSE_FLAGS_WHITELIST")
484
485 # If we're building a target package we need to use fakeroot (pseudo)
486 # in order to capture permissions, owners, groups and special files
487 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
488 d.setVarFlag('do_unpack', 'umask', '022')
489 d.setVarFlag('do_configure', 'umask', '022')
490 d.setVarFlag('do_compile', 'umask', '022')
491 d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
492 d.setVarFlag('do_install', 'fakeroot', 1)
493 d.setVarFlag('do_install', 'umask', '022')
494 d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
495 d.setVarFlag('do_package', 'fakeroot', 1)
496 d.setVarFlag('do_package', 'umask', '022')
497 d.setVarFlag('do_package_setscene', 'fakeroot', 1)
498 d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
499 d.setVarFlag('do_devshell', 'fakeroot', 1)
500 d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
501 source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', 0)
502 if not source_mirror_fetch:
503 need_host = d.getVar('COMPATIBLE_HOST', True)
504 if need_host:
505 import re
506 this_host = d.getVar('HOST_SYS', True)
507 if not re.match(need_host, this_host):
508 raise bb.parse.SkipPackage("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
509
510 need_machine = d.getVar('COMPATIBLE_MACHINE', True)
511 if need_machine:
512 import re
513 compat_machines = (d.getVar('MACHINEOVERRIDES', True) or "").split(":")
514 for m in compat_machines:
515 if re.match(need_machine, m):
516 break
517 else:
518 raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE', True))
519
520
521 bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
522
523 check_license = False if pn.startswith("nativesdk-") else True
524 for t in ["-native", "-cross", "-cross-initial", "-cross-intermediate",
525 "-crosssdk-intermediate", "-crosssdk", "-crosssdk-initial",
526 "-cross-canadian-" + d.getVar('TRANSLATED_TARGET_ARCH', True)]:
527 if pn.endswith(t):
528 check_license = False
529
530 if check_license and bad_licenses:
531 whitelist = []
532 for lic in bad_licenses:
533 for w in ["HOSTTOOLS_WHITELIST_", "LGPLv2_WHITELIST_", "WHITELIST_"]:
534 whitelist.extend((d.getVar(w + lic, True) or "").split())
535 spdx_license = return_spdx(d, lic)
536 if spdx_license:
537 whitelist.extend((d.getVar('HOSTTOOLS_WHITELIST_%s' % spdx_license, True) or "").split())
538 if not pn in whitelist:
539 recipe_license = d.getVar('LICENSE', True)
540 pkgs = d.getVar('PACKAGES', True).split()
541 skipped_pkgs = []
542 unskipped_pkgs = []
543 for pkg in pkgs:
544 if incompatible_license(d, bad_licenses, pkg):
545 skipped_pkgs.append(pkg)
546 else:
547 unskipped_pkgs.append(pkg)
548 all_skipped = skipped_pkgs and not unskipped_pkgs
549 if unskipped_pkgs:
550 for pkg in skipped_pkgs:
551 bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + recipe_license)
552 d.setVar('LICENSE_EXCLUSION-' + pkg, 1)
553 for pkg in unskipped_pkgs:
554 bb.debug(1, "INCLUDING the package " + pkg)
555 elif all_skipped or incompatible_license(d, bad_licenses):
556 bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, recipe_license))
557 raise bb.parse.SkipPackage("incompatible with license %s" % recipe_license)
558
559 srcuri = d.getVar('SRC_URI', True)
560 # Svn packages should DEPEND on subversion-native
561 if "svn://" in srcuri:
562 d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
563
564 # Git packages should DEPEND on git-native
565 if "git://" in srcuri:
566 d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
567
568 # Mercurial packages should DEPEND on mercurial-native
569 elif "hg://" in srcuri:
570 d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
571
572 # OSC packages should DEPEND on osc-native
573 elif "osc://" in srcuri:
574 d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
575
576 # *.lz4 should depends on lz4-native for unpacking
577 # Not endswith because of "*.patch.lz4;patch=1". Need bb.fetch.decodeurl in future
578 if '.lz4' in srcuri:
579 d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
580
581 # *.xz should depends on xz-native for unpacking
582 # Not endswith because of "*.patch.xz;patch=1". Need bb.fetch.decodeurl in future
583 if '.xz' in srcuri:
584 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
585
586 # unzip-native should already be staged before unpacking ZIP recipes
587 if ".zip" in srcuri:
588 d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
589
590 # file is needed by rpm2cpio.sh
591 if ".src.rpm" in srcuri:
592 d.appendVarFlag('do_unpack', 'depends', ' file-native:do_populate_sysroot')
593
594 set_packagetriplet(d)
595
596 # 'multimachine' handling
597 mach_arch = d.getVar('MACHINE_ARCH', True)
598 pkg_arch = d.getVar('PACKAGE_ARCH', True)
599
600 if (pkg_arch == mach_arch):
601 # Already machine specific - nothing further to do
602 return
603
604 #
605 # We always try to scan SRC_URI for urls with machine overrides
606 # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
607 #
608 override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', True)
609 if override != '0':
610 paths = []
611 fpaths = (d.getVar('FILESPATH', True) or '').split(':')
612 machine = d.getVar('MACHINE', True)
613 for p in fpaths:
614 if os.path.basename(p) == machine and os.path.isdir(p):
615 paths.append(p)
616
617 if len(paths) != 0:
618 for s in srcuri.split():
619 if not s.startswith("file://"):
620 continue
621 fetcher = bb.fetch2.Fetch([s], d)
622 local = fetcher.localpath(s)
623 for mp in paths:
624 if local.startswith(mp):
625 #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
626 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
627 return
628
629 packages = d.getVar('PACKAGES', True).split()
630 for pkg in packages:
631 pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg, True)
632
633 # We could look for != PACKAGE_ARCH here but how to choose
634 # if multiple differences are present?
635 # Look through PACKAGE_ARCHS for the priority order?
636 if pkgarch and pkgarch == mach_arch:
637 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
638 bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN", True))
639}
640
641addtask cleansstate after do_clean
642python do_cleansstate() {
643 sstate_clean_cachefiles(d)
644}
645
646addtask cleanall after do_cleansstate
647python do_cleanall() {
648 src_uri = (d.getVar('SRC_URI', True) or "").split()
649 if len(src_uri) == 0:
650 return
651
652 try:
653 fetcher = bb.fetch2.Fetch(src_uri, d)
654 fetcher.clean()
655 except bb.fetch2.BBFetchException, e:
656 raise bb.build.FuncFailed(e)
657}
658do_cleanall[nostamp] = "1"
659
660
661EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
new file mode 100644
index 0000000000..a52b75be5c
--- /dev/null
+++ b/meta/classes/bin_package.bbclass
@@ -0,0 +1,36 @@
1#
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4#
5# Common variable and task for the binary package recipe.
6# Basic principle:
7# * The files have been unpacked to ${S} by base.bbclass
8# * Skip do_configure and do_compile
9# * Use do_install to install the files to ${D}
10#
11# Note:
12# The "subdir" parameter in the SRC_URI is useful when the input package
13# is rpm, ipk, deb and so on, for example:
14#
15# SRC_URI = "http://foo.com/foo-1.0-r1.i586.rpm;subdir=foo-1.0"
16#
17# Then the files would be unpacked to ${WORKDIR}/foo-1.0, otherwise
18# they would be in ${WORKDIR}.
19#
20
21# Skip the unwanted steps
22do_configure[noexec] = "1"
23do_compile[noexec] = "1"
24
25# Install the files to ${D}
26bin_package_do_install () {
27 # Do it carefully
28 [ -d "${S}" ] || exit 1
29 cd ${S} || exit 1
30 tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
31 | tar --no-same-owner -xpf - -C ${D}
32}
33
34FILES_${PN} = "/"
35
36EXPORT_FUNCTIONS do_install
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
new file mode 100644
index 0000000000..7158c8c705
--- /dev/null
+++ b/meta/classes/binconfig.bbclass
@@ -0,0 +1,63 @@
1FILES_${PN}-dev += "${bindir}/*-config"
2
3# The namespaces can clash here hence the two step replace
4def get_binconfig_mangle(d):
5 s = "-e ''"
6 if not bb.data.inherits_class('native', d):
7 optional_quote = r"\(\"\?\)"
8 s += " -e 's:=%s${base_libdir}:=\\1OEBASELIBDIR:;'" % optional_quote
9 s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
10 s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
11 s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
12 s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote
13 s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
14 s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
15 s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
16 s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'"
17 s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
18 s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
19 s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
20 s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
21 s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
22 s += " -e 's:-I${WORKDIR}:-I${STAGING_INCDIR}:'"
23 s += " -e 's:-L${WORKDIR}:-L${STAGING_LIBDIR}:'"
24 if bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d):
25 s += bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d)
26
27 return s
28
29BINCONFIG_GLOB ?= "*-config"
30
31PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
32
33binconfig_package_preprocess () {
34 for config in `find ${PKGD} -name '${BINCONFIG_GLOB}'`; do
35 sed -i \
36 -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
37 -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
38 -e 's:${STAGING_INCDIR}:${includedir}:g;' \
39 -e 's:${STAGING_DATADIR}:${datadir}:' \
40 -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
41 $config
42 done
43 for lafile in `find ${PKGD} -name "*.la"` ; do
44 sed -i \
45 -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
46 -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
47 -e 's:${STAGING_INCDIR}:${includedir}:g;' \
48 -e 's:${STAGING_DATADIR}:${datadir}:' \
49 -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
50 $lafile
51 done
52}
53
54SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
55
56binconfig_sysroot_preprocess () {
57 for config in `find ${S} -name '${BINCONFIG_GLOB}'` `find ${B} -name '${BINCONFIG_GLOB}'`; do
58 configname=`basename $config`
59 install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
60 cat $config | sed ${@get_binconfig_mangle(d)} > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
61 chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
62 done
63}
diff --git a/meta/classes/blacklist.bbclass b/meta/classes/blacklist.bbclass
new file mode 100644
index 0000000000..a0141a82c0
--- /dev/null
+++ b/meta/classes/blacklist.bbclass
@@ -0,0 +1,45 @@
1# anonymous support class from originally from angstrom
2#
3# To use the blacklist, a distribution should include this
4# class in the INHERIT_DISTRO
5#
6# No longer use ANGSTROM_BLACKLIST, instead use a table of
7# recipes in PNBLACKLIST
8#
9# Features:
10#
11# * To add a package to the blacklist, set:
12# PNBLACKLIST[pn] = "message"
13#
14
15# Cope with PNBLACKLIST flags for multilib case
16addhandler blacklist_multilib_eventhandler
17blacklist_multilib_eventhandler[eventmask] = "bb.event.ConfigParsed"
18python blacklist_multilib_eventhandler() {
19 multilibs = e.data.getVar('MULTILIBS', True)
20 if not multilibs:
21 return
22
23 # this block has been copied from base.bbclass so keep it in sync
24 prefixes = []
25 for ext in multilibs.split():
26 eext = ext.split(':')
27 if len(eext) > 1 and eext[0] == 'multilib':
28 prefixes.append(eext[1])
29
30 blacklists = e.data.getVarFlags('PNBLACKLIST') or {}
31 for pkg, reason in blacklists.items():
32 if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in pkg:
33 continue
34 for p in prefixes:
35 newpkg = p + "-" + pkg
36 if not e.data.getVarFlag('PNBLACKLIST', newpkg, True):
37 e.data.setVarFlag('PNBLACKLIST', newpkg, reason)
38}
39
40python () {
41 blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN', True), True)
42
43 if blacklist:
44 raise bb.parse.SkipPackage("Recipe is blacklisted: %s" % (blacklist))
45}
diff --git a/meta/classes/boot-directdisk.bbclass b/meta/classes/boot-directdisk.bbclass
new file mode 100644
index 0000000000..88e5c52e2b
--- /dev/null
+++ b/meta/classes/boot-directdisk.bbclass
@@ -0,0 +1,182 @@
1# boot-directdisk.bbclass
2# (loosly based off bootimg.bbclass Copyright (C) 2004, Advanced Micro Devices, Inc.)
3#
4# Create an image which can be placed directly onto a harddisk using dd and then
5# booted.
6#
7# This uses syslinux. extlinux would have been nice but required the ext2/3
8# partition to be mounted. grub requires to run itself as part of the install
9# process.
10#
11# The end result is a 512 boot sector populated with an MBR and partition table
12# followed by an msdos fat16 partition containing syslinux and a linux kernel
13# completed by the ext2/3 rootfs.
14#
15# We have to push the msdos parition table size > 16MB so fat 16 is used as parted
16# won't touch fat12 partitions.
17
18# External variables needed
19
20# ${ROOTFS} - the rootfs image to incorporate
21
22do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
23 syslinux:do_populate_sysroot \
24 syslinux-native:do_populate_sysroot \
25 parted-native:do_populate_sysroot \
26 mtools-native:do_populate_sysroot "
27
28PACKAGES = " "
29EXCLUDE_FROM_WORLD = "1"
30
31BOOTDD_VOLUME_ID ?= "boot"
32BOOTDD_EXTRA_SPACE ?= "16384"
33
34EFI = "${@base_contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
35EFI_PROVIDER ?= "grub-efi"
36EFI_CLASS = "${@base_contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
37
38# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
39# contain "efi". This way legacy is supported by default if neither is
40# specified, maintaining the original behavior.
41def pcbios(d):
42 pcbios = base_contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
43 if pcbios == "0":
44 pcbios = base_contains("MACHINE_FEATURES", "efi", "0", "1", d)
45 return pcbios
46
47def pcbios_class(d):
48 if d.getVar("PCBIOS", True) == "1":
49 return "syslinux"
50 return ""
51
52PCBIOS = "${@pcbios(d)}"
53PCBIOS_CLASS = "${@pcbios_class(d)}"
54
55inherit ${PCBIOS_CLASS}
56inherit ${EFI_CLASS}
57
58# Get the build_syslinux_cfg() function from the syslinux class
59
60AUTO_SYSLINUXCFG = "1"
61DISK_SIGNATURE ?= "${DISK_SIGNATURE_GENERATED}"
62SYSLINUX_ROOT ?= "root=/dev/sda2"
63SYSLINUX_TIMEOUT ?= "10"
64
65IS_VMDK = '${@base_contains("IMAGE_FSTYPES", "vmdk", "true", "false", d)}'
66
67boot_direct_populate() {
68 dest=$1
69 install -d $dest
70
71 # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
72 install -m 0644 ${STAGING_KERNEL_DIR}/bzImage $dest/vmlinuz
73
74 if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
75 install -m 0644 ${INITRD} $dest/initrd
76 fi
77
78}
79
80build_boot_dd() {
81 HDDDIR="${S}/hdd/boot"
82 HDDIMG="${S}/hdd.image"
83 IMAGE=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect
84
85 boot_direct_populate $HDDDIR
86
87 if [ "${PCBIOS}" = "1" ]; then
88 syslinux_hddimg_populate $HDDDIR
89 fi
90 if [ "${EFI}" = "1" ]; then
91 efi_hddimg_populate $HDDDIR
92 fi
93
94 if [ "${IS_VMDK}" = "true" ]; then
95 if [ "x${AUTO_SYSLINUXMENU}" = "x1" ] ; then
96 install -m 0644 ${STAGING_DIR}/${MACHINE}/usr/share/syslinux/vesamenu.c32 ${HDDDIR}${SYSLINUXDIR}/vesamenu.c32
97 if [ "x${SYSLINUX_SPLASH}" != "x" ] ; then
98 install -m 0644 ${SYSLINUX_SPLASH} ${HDDDIR}${SYSLINUXDIR}/splash.lss
99 fi
100 fi
101 fi
102
103 BLOCKS=`du -bks $HDDDIR | cut -f 1`
104 BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
105
106 # Ensure total sectors is an integral number of sectors per
107 # track or mcopy will complain. Sectors are 512 bytes, and we
108 # generate images with 32 sectors per track. This calculation is
109 # done in blocks, thus the mod by 16 instead of 32.
110 BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
111
112 mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C $HDDIMG $BLOCKS
113 mcopy -i $HDDIMG -s $HDDDIR/* ::/
114
115 if [ "${PCBIOS}" = "1" ]; then
116 syslinux_hdddirect_install $HDDIMG
117 fi
118 chmod 644 $HDDIMG
119
120 ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1`
121 TOTALSIZE=`expr $BLOCKS + $ROOTFSBLOCKS`
122 END1=`expr $BLOCKS \* 1024`
123 END2=`expr $END1 + 512`
124 END3=`expr \( $ROOTFSBLOCKS \* 1024 \) + $END1`
125
126 echo $ROOTFSBLOCKS $TOTALSIZE $END1 $END2 $END3
127 rm -rf $IMAGE
128 dd if=/dev/zero of=$IMAGE bs=1024 seek=$TOTALSIZE count=1
129
130 parted $IMAGE mklabel msdos
131 parted $IMAGE mkpart primary fat16 0 ${END1}B
132 parted $IMAGE unit B mkpart primary ext2 ${END2}B ${END3}B
133 parted $IMAGE set 1 boot on
134
135 parted $IMAGE print
136
137 awk "BEGIN { printf \"$(echo ${DISK_SIGNATURE} | fold -w 2 | tac | paste -sd '' | sed 's/\(..\)/\\x&/g')\" }" | \
138 dd of=$IMAGE bs=1 seek=440 conv=notrunc
139
140 OFFSET=`expr $END2 / 512`
141 if [ "${PCBIOS}" = "1" ]; then
142 dd if=${STAGING_DATADIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
143 fi
144
145 dd if=$HDDIMG of=$IMAGE conv=notrunc seek=1 bs=512
146 dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
147
148 cd ${DEPLOY_DIR_IMAGE}
149 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
150 ln -s ${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
151}
152
153python do_bootdirectdisk() {
154 validate_disk_signature(d)
155 if d.getVar("PCBIOS", True) == "1":
156 bb.build.exec_func('build_syslinux_cfg', d)
157 if d.getVar("EFI", True) == "1":
158 bb.build.exec_func('build_efi_cfg', d)
159 bb.build.exec_func('build_boot_dd', d)
160}
161
162def generate_disk_signature():
163 import uuid
164
165 signature = str(uuid.uuid4())[:8]
166
167 if signature != '00000000':
168 return signature
169 else:
170 return 'ffffffff'
171
172def validate_disk_signature(d):
173 import re
174
175 disk_signature = d.getVar("DISK_SIGNATURE", True)
176
177 if not re.match(r'^[0-9a-fA-F]{8}$', disk_signature):
178 bb.fatal("DISK_SIGNATURE '%s' must be an 8 digit hex string" % disk_signature)
179
180DISK_SIGNATURE_GENERATED := "${@generate_disk_signature()}"
181
182addtask bootdirectdisk before do_build
diff --git a/meta/classes/bootimg.bbclass b/meta/classes/bootimg.bbclass
new file mode 100644
index 0000000000..b13eef965d
--- /dev/null
+++ b/meta/classes/bootimg.bbclass
@@ -0,0 +1,240 @@
1# Copyright (C) 2004, Advanced Micro Devices, Inc. All Rights Reserved
2# Released under the MIT license (see packages/COPYING)
3
4# Creates a bootable image using syslinux, your kernel and an optional
5# initrd
6
7#
8# End result is two things:
9#
10# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
11# an initrd and a rootfs image. These can be written to harddisks directly and
12# also booted on USB flash disks (write them there with dd).
13#
14# 2. A CD .iso image
15
16# Boot process is that the initrd will boot and process which label was selected
17# in syslinux. Actions based on the label are then performed (e.g. installing to
18# an hdd)
19
20# External variables (also used by syslinux.bbclass)
21# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
22# ${COMPRESSISO} - Transparent compress ISO, reduce size ~40% if set to 1
23# ${NOISO} - skip building the ISO image if set to 1
24# ${NOHDD} - skip building the HDD image if set to 1
25# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
26
27do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
28 mtools-native:do_populate_sysroot \
29 cdrtools-native:do_populate_sysroot \
30 ${@oe.utils.ifelse(d.getVar('COMPRESSISO'),'zisofs-tools-native:do_populate_sysroot','')}"
31
32PACKAGES = " "
33EXCLUDE_FROM_WORLD = "1"
34
35HDDDIR = "${S}/hddimg"
36ISODIR = "${S}/iso"
37EFIIMGDIR = "${S}/efi_img"
38COMPACT_ISODIR = "${S}/iso.z"
39COMPRESSISO ?= "0"
40
41BOOTIMG_VOLUME_ID ?= "boot"
42BOOTIMG_EXTRA_SPACE ?= "512"
43
44EFI = "${@base_contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
45EFI_PROVIDER ?= "grub-efi"
46EFI_CLASS = "${@base_contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
47
48# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
49# contain "efi". This way legacy is supported by default if neither is
50# specified, maintaining the original behavior.
51def pcbios(d):
52 pcbios = base_contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
53 if pcbios == "0":
54 pcbios = base_contains("MACHINE_FEATURES", "efi", "0", "1", d)
55 return pcbios
56
57PCBIOS = "${@pcbios(d)}"
58
59# The syslinux is required for the isohybrid command and boot catalog
60inherit syslinux
61inherit ${EFI_CLASS}
62
63populate() {
64 DEST=$1
65 install -d ${DEST}
66
67 # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
68 install -m 0644 ${STAGING_KERNEL_DIR}/bzImage ${DEST}/vmlinuz
69
70 if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
71 install -m 0644 ${INITRD} ${DEST}/initrd
72 fi
73
74 if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
75 install -m 0644 ${ROOTFS} ${DEST}/rootfs.img
76 fi
77
78}
79
80build_iso() {
81 # Only create an ISO if we have an INITRD and NOISO was not set
82 if [ -z "${INITRD}" ] || [ ! -s "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
83 bbnote "ISO image will not be created."
84 return
85 fi
86
87 populate ${ISODIR}
88
89 if [ "${PCBIOS}" = "1" ]; then
90 syslinux_iso_populate ${ISODIR}
91 fi
92 if [ "${EFI}" = "1" ]; then
93 efi_iso_populate ${ISODIR}
94 build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img
95 fi
96
97 # EFI only
98 if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then
99 # Work around bug in isohybrid where it requires isolinux.bin
100 # In the boot catalog, even though it is not used
101 mkdir -p ${ISODIR}/${ISOLINUXDIR}
102 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
103 fi
104
105 if [ "${COMPRESSISO}" = "1" ] ; then
106 # create compact directory, compress iso
107 mkdir -p ${COMPACT_ISODIR}
108 mkzftree -z 9 -p 4 -F ${ISODIR}/rootfs.img ${COMPACT_ISODIR}/rootfs.img
109
110 # move compact iso to iso, then remove compact directory
111 mv ${COMPACT_ISODIR}/rootfs.img ${ISODIR}/rootfs.img
112 rm -Rf ${COMPACT_ISODIR}
113 mkisofs_compress_opts="-R -z -D -l"
114 else
115 mkisofs_compress_opts="-r"
116 fi
117
118 if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
119 # PCBIOS only media
120 mkisofs -V ${BOOTIMG_VOLUME_ID} \
121 -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
122 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
123 $mkisofs_compress_opts \
124 ${MKISOFS_OPTIONS} ${ISODIR}
125 else
126 # EFI only OR EFI+PCBIOS
127 mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
128 -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
129 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
130 $mkisofs_compress_opts ${MKISOFS_OPTIONS} \
131 -eltorito-alt-boot -eltorito-platform efi \
132 -b efi.img -no-emul-boot \
133 ${ISODIR}
134 isohybrid_args="-u"
135 fi
136
137 isohybrid $isohybrid_args ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso
138
139 cd ${DEPLOY_DIR_IMAGE}
140 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
141 ln -s ${IMAGE_NAME}.iso ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
142}
143
144build_fat_img() {
145 FATSOURCEDIR=$1
146 FATIMG=$2
147
148 # Calculate the size required for the final image including the
149 # data and filesystem overhead.
150 # Sectors: 512 bytes
151 # Blocks: 1024 bytes
152
153 # Determine the sector count just for the data
154 SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
155
156 # Account for the filesystem overhead. This includes directory
157 # entries in the clusters as well as the FAT itself.
158 # Assumptions:
159 # FAT32 (12 or 16 may be selected by mkdosfs, but the extra
160 # padding will be minimal on those smaller images and not
161 # worth the logic here to caclulate the smaller FAT sizes)
162 # < 16 entries per directory
163 # 8.3 filenames only
164
165 # 32 bytes per dir entry
166 DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
167 # 32 bytes for every end-of-directory dir entry
168 DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
169 # 4 bytes per FAT entry per sector of data
170 FAT_BYTES=$(expr $SECTORS \* 4)
171 # 4 bytes per FAT entry per end-of-cluster list
172 FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
173
174 # Use a ceiling function to determine FS overhead in sectors
175 DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
176 # There are two FATs on the image
177 FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
178 SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
179
180 # Determine the final size in blocks accounting for some padding
181 BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
182
183 # Ensure total sectors is an integral number of sectors per
184 # track or mcopy will complain. Sectors are 512 bytes, and we
185 # generate images with 32 sectors per track. This calculation is
186 # done in blocks, thus the mod by 16 instead of 32.
187 BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
188
189 # mkdosfs will sometimes use FAT16 when it is not appropriate,
190 # resulting in a boot failure from SYSLINUX. Use FAT32 for
191 # images larger than 512MB, otherwise let mkdosfs decide.
192 if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
193 FATSIZE="-F 32"
194 fi
195
196 mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} ${BLOCKS}
197 # Copy FATSOURCEDIR recursively into the image file directly
198 mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
199}
200
201build_hddimg() {
202 # Create an HDD image
203 if [ "${NOHDD}" != "1" ] ; then
204 populate ${HDDDIR}
205
206 if [ "${PCBIOS}" = "1" ]; then
207 syslinux_hddimg_populate ${HDDDIR}
208 fi
209 if [ "${EFI}" = "1" ]; then
210 efi_hddimg_populate ${HDDDIR}
211 fi
212
213 build_fat_img ${HDDDIR} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
214
215 if [ "${PCBIOS}" = "1" ]; then
216 syslinux_hddimg_install
217 fi
218
219 chmod 644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
220
221 cd ${DEPLOY_DIR_IMAGE}
222 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
223 ln -s ${IMAGE_NAME}.hddimg ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
224 fi
225}
226
227python do_bootimg() {
228 if d.getVar("PCBIOS", True) == "1":
229 bb.build.exec_func('build_syslinux_cfg', d)
230 if d.getVar("EFI", True) == "1":
231 bb.build.exec_func('build_efi_cfg', d)
232 bb.build.exec_func('build_hddimg', d)
233 bb.build.exec_func('build_iso', d)
234}
235
236IMAGE_TYPEDEP_iso = "ext3"
237IMAGE_TYPEDEP_hddimg = "ext3"
238IMAGE_TYPES_MASKED += "iso hddimg"
239
240addtask bootimg before do_build
diff --git a/meta/classes/bugzilla.bbclass b/meta/classes/bugzilla.bbclass
new file mode 100644
index 0000000000..3fc8956428
--- /dev/null
+++ b/meta/classes/bugzilla.bbclass
@@ -0,0 +1,187 @@
1#
2# Small event handler to automatically open URLs and file
3# bug reports at a bugzilla of your choiche
4# it uses XML-RPC interface, so you must have it enabled
5#
6# Before using you must define BUGZILLA_USER, BUGZILLA_PASS credentials,
7# BUGZILLA_XMLRPC - uri of xmlrpc.cgi,
8# BUGZILLA_PRODUCT, BUGZILLA_COMPONENT - a place in BTS for build bugs
9# BUGZILLA_VERSION - version against which to report new bugs
10#
11
12def bugzilla_find_bug_report(debug_file, server, args, bugname):
13 args['summary'] = bugname
14 bugs = server.Bug.search(args)
15 if len(bugs['bugs']) == 0:
16 print >> debug_file, "Bugs not found"
17 return (False,None)
18 else: # silently pick the first result
19 print >> debug_file, "Result of bug search is "
20 print >> debug_file, bugs
21 status = bugs['bugs'][0]['status']
22 id = bugs['bugs'][0]['id']
23 return (not status in ["CLOSED", "RESOLVED", "VERIFIED"],id)
24
25def bugzilla_file_bug(debug_file, server, args, name, text, version):
26 args['summary'] = name
27 args['comment'] = text
28 args['version'] = version
29 args['op_sys'] = 'Linux'
30 args['platform'] = 'Other'
31 args['severity'] = 'normal'
32 args['priority'] = 'Normal'
33 try:
34 return server.Bug.create(args)['id']
35 except Exception, e:
36 print >> debug_file, repr(e)
37 return None
38
39def bugzilla_reopen_bug(debug_file, server, args, bug_number):
40 args['ids'] = [bug_number]
41 args['status'] = "CONFIRMED"
42 try:
43 server.Bug.update(args)
44 return True
45 except Exception, e:
46 print >> debug_file, repr(e)
47 return False
48
49def bugzilla_create_attachment(debug_file, server, args, bug_number, text, file_name, log, logdescription):
50 args['ids'] = [bug_number]
51 args['file_name'] = file_name
52 args['summary'] = logdescription
53 args['content_type'] = "text/plain"
54 args['data'] = log
55 args['comment'] = text
56 try:
57 server.Bug.add_attachment(args)
58 return True
59 except Exception, e:
60 print >> debug_file, repr(e)
61 return False
62
63def bugzilla_add_comment(debug_file, server, args, bug_number, text):
64 args['id'] = bug_number
65 args['comment'] = text
66 try:
67 server.Bug.add_comment(args)
68 return True
69 except Exception, e:
70 print >> debug_file, repr(e)
71 return False
72
73addhandler bugzilla_eventhandler
74bugzilla_eventhandler[eventmask] = "bb.event.MsgNote bb.build.TaskFailed"
75python bugzilla_eventhandler() {
76 import glob
77 import xmlrpclib, httplib
78
79 class ProxiedTransport(xmlrpclib.Transport):
80 def __init__(self, proxy, use_datetime = 0):
81 xmlrpclib.Transport.__init__(self, use_datetime)
82 self.proxy = proxy
83 self.user = None
84 self.password = None
85
86 def set_user(self, user):
87 self.user = user
88
89 def set_password(self, password):
90 self.password = password
91
92 def make_connection(self, host):
93 self.realhost = host
94 return httplib.HTTP(self.proxy)
95
96 def send_request(self, connection, handler, request_body):
97 connection.putrequest("POST", 'http://%s%s' % (self.realhost, handler))
98 if self.user != None:
99 if self.password != None:
100 auth = "%s:%s" % (self.user, self.password)
101 else:
102 auth = self.user
103 connection.putheader("Proxy-authorization", "Basic " + base64.encodestring(auth))
104
105 event = e
106 data = e.data
107 name = bb.event.getName(event)
108 if name == "MsgNote":
109 # avoid recursion
110 return
111
112 if name == "TaskFailed":
113 xmlrpc = data.getVar("BUGZILLA_XMLRPC", True)
114 user = data.getVar("BUGZILLA_USER", True)
115 passw = data.getVar("BUGZILLA_PASS", True)
116 product = data.getVar("BUGZILLA_PRODUCT", True)
117 compon = data.getVar("BUGZILLA_COMPONENT", True)
118 version = data.getVar("BUGZILLA_VERSION", True)
119
120 proxy = data.getVar('http_proxy', True )
121 if (proxy):
122 import urllib2
123 s, u, p, hostport = urllib2._parse_proxy(proxy)
124 transport = ProxiedTransport(hostport)
125 else:
126 transport = None
127
128 server = xmlrpclib.ServerProxy(xmlrpc, transport=transport, verbose=0)
129 args = {
130 'Bugzilla_login': user,
131 'Bugzilla_password': passw,
132 'product': product,
133 'component': compon}
134
135 # evil hack to figure out what is going on
136 debug_file = open(os.path.join(data.getVar("TMPDIR", True),"..","bugzilla-log"),"a")
137
138 file = None
139 bugname = "%(package)s-%(pv)s-autobuild" % { "package" : data.getVar("PN", True),
140 "pv" : data.getVar("PV", True),
141 }
142 log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
143 text = "The %s step in %s failed at %s for machine %s" % (e.task, data.getVar("PN", True), data.getVar('DATETIME', True), data.getVar( 'MACHINE', True ) )
144 if len(log_file) != 0:
145 print >> debug_file, "Adding log file %s" % log_file[0]
146 file = open(log_file[0], 'r')
147 log = file.read()
148 file.close();
149 else:
150 print >> debug_file, "No log file found for the glob"
151 log = None
152
153 (bug_open, bug_number) = bugzilla_find_bug_report(debug_file, server, args.copy(), bugname)
154 print >> debug_file, "Bug is open: %s and bug number: %s" % (bug_open, bug_number)
155
156 # The bug is present and still open, attach an error log
157 if not bug_number:
158 bug_number = bugzilla_file_bug(debug_file, server, args.copy(), bugname, text, version)
159 if not bug_number:
160 print >> debug_file, "Couldn't acquire a new bug_numer, filing a bugreport failed"
161 else:
162 print >> debug_file, "The new bug_number: '%s'" % bug_number
163 elif not bug_open:
164 if not bugzilla_reopen_bug(debug_file, server, args.copy(), bug_number):
165 print >> debug_file, "Failed to reopen the bug #%s" % bug_number
166 else:
167 print >> debug_file, "Reopened the bug #%s" % bug_number
168
169 if bug_number and log:
170 print >> debug_file, "The bug is known as '%s'" % bug_number
171 desc = "Build log for machine %s" % (data.getVar('MACHINE', True))
172 if not bugzilla_create_attachment(debug_file, server, args.copy(), bug_number, text, log_file[0], log, desc):
173 print >> debug_file, "Failed to attach the build log for bug #%s" % bug_number
174 else:
175 print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
176 else:
177 print >> debug_file, "Not trying to create an attachment for bug #%s" % bug_number
178 if not bugzilla_add_comment(debug_file, server, args.copy(), bug_number, text, ):
179 print >> debug_file, "Failed to create a comment the build log for bug #%s" % bug_number
180 else:
181 print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
182
183 # store bug number for oestats-client
184 if bug_number:
185 data.setVar('OESTATS_BUG_NUMBER', bug_number)
186}
187
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
new file mode 100644
index 0000000000..262095f60a
--- /dev/null
+++ b/meta/classes/buildhistory.bbclass
@@ -0,0 +1,684 @@
1#
2# Records history of build output in order to detect regressions
3#
4# Based in part on testlab.bbclass and packagehistory.bbclass
5#
6# Copyright (C) 2011-2014 Intel Corporation
7# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
8#
9
10BUILDHISTORY_FEATURES ?= "image package sdk"
11BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory"
12BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}"
13BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}/${PN}"
14BUILDHISTORY_DIR_SDK = "${BUILDHISTORY_DIR}/sdk/${SDK_NAME}/${IMAGE_BASENAME}"
15BUILDHISTORY_IMAGE_FILES ?= "/etc/passwd /etc/group"
16BUILDHISTORY_COMMIT ?= "0"
17BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
18BUILDHISTORY_PUSH_REPO ?= ""
19
20SSTATEPOSTINSTFUNCS += "buildhistory_emit_pkghistory"
21# We want to avoid influence the signatures of sstate tasks - first the function itself:
22sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
23# then the value added to SSTATEPOSTINSTFUNCS:
24SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
25
26#
27# Write out metadata about this package for comparision when writing future packages
28#
29python buildhistory_emit_pkghistory() {
30 if not d.getVar('BB_CURRENTTASK', True) in ['packagedata', 'packagedata_setscene']:
31 return 0
32
33 if not "package" in (d.getVar('BUILDHISTORY_FEATURES', True) or "").split():
34 return 0
35
36 import re
37 import json
38 import errno
39
40 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
41
42 class RecipeInfo:
43 def __init__(self, name):
44 self.name = name
45 self.pe = "0"
46 self.pv = "0"
47 self.pr = "r0"
48 self.depends = ""
49 self.packages = ""
50 self.srcrev = ""
51
52
53 class PackageInfo:
54 def __init__(self, name):
55 self.name = name
56 self.pe = "0"
57 self.pv = "0"
58 self.pr = "r0"
59 # pkg/pkge/pkgv/pkgr should be empty because we want to be able to default them
60 self.pkg = ""
61 self.pkge = ""
62 self.pkgv = ""
63 self.pkgr = ""
64 self.size = 0
65 self.depends = ""
66 self.rprovides = ""
67 self.rdepends = ""
68 self.rrecommends = ""
69 self.rsuggests = ""
70 self.rreplaces = ""
71 self.rconflicts = ""
72 self.files = ""
73 self.filelist = ""
74 # Variables that need to be written to their own separate file
75 self.filevars = dict.fromkeys(['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'])
76
77 # Should check PACKAGES here to see if anything removed
78
79 def readPackageInfo(pkg, histfile):
80 pkginfo = PackageInfo(pkg)
81 with open(histfile, "r") as f:
82 for line in f:
83 lns = line.split('=')
84 name = lns[0].strip()
85 value = lns[1].strip(" \t\r\n").strip('"')
86 if name == "PE":
87 pkginfo.pe = value
88 elif name == "PV":
89 pkginfo.pv = value
90 elif name == "PR":
91 pkginfo.pr = value
92 elif name == "PKG":
93 pkginfo.pkg = value
94 elif name == "PKGE":
95 pkginfo.pkge = value
96 elif name == "PKGV":
97 pkginfo.pkgv = value
98 elif name == "PKGR":
99 pkginfo.pkgr = value
100 elif name == "RPROVIDES":
101 pkginfo.rprovides = value
102 elif name == "RDEPENDS":
103 pkginfo.rdepends = value
104 elif name == "RRECOMMENDS":
105 pkginfo.rrecommends = value
106 elif name == "RSUGGESTS":
107 pkginfo.rsuggests = value
108 elif name == "RREPLACES":
109 pkginfo.rreplaces = value
110 elif name == "RCONFLICTS":
111 pkginfo.rconflicts = value
112 elif name == "PKGSIZE":
113 pkginfo.size = long(value)
114 elif name == "FILES":
115 pkginfo.files = value
116 elif name == "FILELIST":
117 pkginfo.filelist = value
118 # Apply defaults
119 if not pkginfo.pkg:
120 pkginfo.pkg = pkginfo.name
121 if not pkginfo.pkge:
122 pkginfo.pkge = pkginfo.pe
123 if not pkginfo.pkgv:
124 pkginfo.pkgv = pkginfo.pv
125 if not pkginfo.pkgr:
126 pkginfo.pkgr = pkginfo.pr
127 return pkginfo
128
129 def getlastpkgversion(pkg):
130 try:
131 histfile = os.path.join(pkghistdir, pkg, "latest")
132 return readPackageInfo(pkg, histfile)
133 except EnvironmentError:
134 return None
135
136 def sortpkglist(string):
137 pkgiter = re.finditer(r'[a-zA-Z0-9.+-]+( \([><=]+ [^ )]+\))?', string, 0)
138 pkglist = [p.group(0) for p in pkgiter]
139 pkglist.sort()
140 return ' '.join(pkglist)
141
142 def sortlist(string):
143 items = string.split(' ')
144 items.sort()
145 return ' '.join(items)
146
147 pn = d.getVar('PN', True)
148 pe = d.getVar('PE', True) or "0"
149 pv = d.getVar('PV', True)
150 pr = d.getVar('PR', True)
151
152 pkgdata_dir = d.getVar('PKGDATA_DIR', True)
153 packages = ""
154 try:
155 with open(os.path.join(pkgdata_dir, pn)) as f:
156 for line in f.readlines():
157 if line.startswith('PACKAGES: '):
158 packages = squashspaces(line.split(': ', 1)[1])
159 break
160 except IOError as e:
161 if e.errno == errno.ENOENT:
162 # Probably a -cross recipe, just ignore
163 return 0
164 else:
165 raise
166
167 packagelist = packages.split()
168 if not os.path.exists(pkghistdir):
169 bb.utils.mkdirhier(pkghistdir)
170 else:
171 # Remove files for packages that no longer exist
172 for item in os.listdir(pkghistdir):
173 if item != "latest" and item != "latest_srcrev":
174 if item not in packagelist:
175 subdir = os.path.join(pkghistdir, item)
176 for subfile in os.listdir(subdir):
177 os.unlink(os.path.join(subdir, subfile))
178 os.rmdir(subdir)
179
180 rcpinfo = RecipeInfo(pn)
181 rcpinfo.pe = pe
182 rcpinfo.pv = pv
183 rcpinfo.pr = pr
184 rcpinfo.depends = sortlist(squashspaces(d.getVar('DEPENDS', True) or ""))
185 rcpinfo.packages = packages
186 write_recipehistory(rcpinfo, d)
187
188 pkgdest = d.getVar('PKGDEST', True)
189 for pkg in packagelist:
190 pkgdata = {}
191 with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
192 for line in f.readlines():
193 item = line.rstrip('\n').split(': ', 1)
194 key = item[0]
195 if key.endswith('_' + pkg):
196 key = key[:-len(pkg)-1]
197 pkgdata[key] = item[1].decode('utf-8').decode('string_escape')
198
199 pkge = pkgdata.get('PKGE', '0')
200 pkgv = pkgdata['PKGV']
201 pkgr = pkgdata['PKGR']
202 #
203 # Find out what the last version was
204 # Make sure the version did not decrease
205 #
206 lastversion = getlastpkgversion(pkg)
207 if lastversion:
208 last_pkge = lastversion.pkge
209 last_pkgv = lastversion.pkgv
210 last_pkgr = lastversion.pkgr
211 r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
212 if r < 0:
213 msg = "Package version for package %s went backwards which would break package feeds from (%s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
214 package_qa_handle_error("version-going-backwards", msg, d)
215
216 pkginfo = PackageInfo(pkg)
217 # Apparently the version can be different on a per-package basis (see Python)
218 pkginfo.pe = pkgdata.get('PE', '0')
219 pkginfo.pv = pkgdata['PV']
220 pkginfo.pr = pkgdata['PR']
221 pkginfo.pkg = pkgdata['PKG']
222 pkginfo.pkge = pkge
223 pkginfo.pkgv = pkgv
224 pkginfo.pkgr = pkgr
225 pkginfo.rprovides = sortpkglist(squashspaces(pkgdata.get('RPROVIDES', "")))
226 pkginfo.rdepends = sortpkglist(squashspaces(pkgdata.get('RDEPENDS', "")))
227 pkginfo.rrecommends = sortpkglist(squashspaces(pkgdata.get('RRECOMMENDS', "")))
228 pkginfo.rsuggests = sortpkglist(squashspaces(pkgdata.get('RSUGGESTS', "")))
229 pkginfo.rreplaces = sortpkglist(squashspaces(pkgdata.get('RREPLACES', "")))
230 pkginfo.rconflicts = sortpkglist(squashspaces(pkgdata.get('RCONFLICTS', "")))
231 pkginfo.files = squashspaces(pkgdata.get('FILES', ""))
232 for filevar in pkginfo.filevars:
233 pkginfo.filevars[filevar] = pkgdata.get(filevar, "")
234
235 # Gather information about packaged files
236 val = pkgdata.get('FILES_INFO', '')
237 dictval = json.loads(val)
238 filelist = dictval.keys()
239 filelist.sort()
240 pkginfo.filelist = " ".join(filelist)
241
242 pkginfo.size = int(pkgdata['PKGSIZE'])
243
244 write_pkghistory(pkginfo, d)
245}
246
247
248def write_recipehistory(rcpinfo, d):
249 import codecs
250
251 bb.debug(2, "Writing recipe history")
252
253 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
254
255 infofile = os.path.join(pkghistdir, "latest")
256 with codecs.open(infofile, "w", encoding='utf8') as f:
257 if rcpinfo.pe != "0":
258 f.write(u"PE = %s\n" % rcpinfo.pe)
259 f.write(u"PV = %s\n" % rcpinfo.pv)
260 f.write(u"PR = %s\n" % rcpinfo.pr)
261 f.write(u"DEPENDS = %s\n" % rcpinfo.depends)
262 f.write(u"PACKAGES = %s\n" % rcpinfo.packages)
263
264
265def write_pkghistory(pkginfo, d):
266 import codecs
267
268 bb.debug(2, "Writing package history for package %s" % pkginfo.name)
269
270 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
271
272 pkgpath = os.path.join(pkghistdir, pkginfo.name)
273 if not os.path.exists(pkgpath):
274 bb.utils.mkdirhier(pkgpath)
275
276 infofile = os.path.join(pkgpath, "latest")
277 with codecs.open(infofile, "w", encoding='utf8') as f:
278 if pkginfo.pe != "0":
279 f.write(u"PE = %s\n" % pkginfo.pe)
280 f.write(u"PV = %s\n" % pkginfo.pv)
281 f.write(u"PR = %s\n" % pkginfo.pr)
282
283 pkgvars = {}
284 pkgvars['PKG'] = pkginfo.pkg if pkginfo.pkg != pkginfo.name else ''
285 pkgvars['PKGE'] = pkginfo.pkge if pkginfo.pkge != pkginfo.pe else ''
286 pkgvars['PKGV'] = pkginfo.pkgv if pkginfo.pkgv != pkginfo.pv else ''
287 pkgvars['PKGR'] = pkginfo.pkgr if pkginfo.pkgr != pkginfo.pr else ''
288 for pkgvar in pkgvars:
289 val = pkgvars[pkgvar]
290 if val:
291 f.write(u"%s = %s\n" % (pkgvar, val))
292
293 f.write(u"RPROVIDES = %s\n" % pkginfo.rprovides)
294 f.write(u"RDEPENDS = %s\n" % pkginfo.rdepends)
295 f.write(u"RRECOMMENDS = %s\n" % pkginfo.rrecommends)
296 if pkginfo.rsuggests:
297 f.write(u"RSUGGESTS = %s\n" % pkginfo.rsuggests)
298 if pkginfo.rreplaces:
299 f.write(u"RREPLACES = %s\n" % pkginfo.rreplaces)
300 if pkginfo.rconflicts:
301 f.write(u"RCONFLICTS = %s\n" % pkginfo.rconflicts)
302 f.write(u"PKGSIZE = %d\n" % pkginfo.size)
303 f.write(u"FILES = %s\n" % pkginfo.files)
304 f.write(u"FILELIST = %s\n" % pkginfo.filelist)
305
306 for filevar in pkginfo.filevars:
307 filevarpath = os.path.join(pkgpath, "latest.%s" % filevar)
308 val = pkginfo.filevars[filevar]
309 if val:
310 with codecs.open(filevarpath, "w", encoding='utf8') as f:
311 f.write(val)
312 else:
313 if os.path.exists(filevarpath):
314 os.unlink(filevarpath)
315
316#
317# rootfs_type can be: image, sdk_target, sdk_host
318#
319def buildhistory_list_installed(d, rootfs_type="image"):
320 from oe.rootfs import image_list_installed_packages
321 from oe.sdk import sdk_list_installed_packages
322
323 process_list = [('file', 'bh_installed_pkgs.txt'),\
324 ('deps', 'bh_installed_pkgs_deps.txt')]
325
326 for output_type, output_file in process_list:
327 output_file_full = os.path.join(d.getVar('WORKDIR', True), output_file)
328
329 with open(output_file_full, 'w') as output:
330 if rootfs_type == "image":
331 output.write(image_list_installed_packages(d, output_type))
332 else:
333 output.write(sdk_list_installed_packages(d, rootfs_type == "sdk_target", output_type))
334
335python buildhistory_list_installed_image() {
336 buildhistory_list_installed(d)
337}
338
339python buildhistory_list_installed_sdk_target() {
340 buildhistory_list_installed(d, "sdk_target")
341}
342
343python buildhistory_list_installed_sdk_host() {
344 buildhistory_list_installed(d, "sdk_host")
345}
346
347buildhistory_get_installed() {
348 mkdir -p $1
349
350 # Get list of installed packages
351 pkgcache="$1/installed-packages.tmp"
352 cat ${WORKDIR}/bh_installed_pkgs.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs.txt
353
354 cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt
355 if [ -s $pkgcache ] ; then
356 cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt
357 else
358 printf "" > $1/installed-packages.txt
359 fi
360
361 # Produce dependency graph
362 # First, quote each name to handle characters that cause issues for dot
363 cat ${WORKDIR}/bh_installed_pkgs_deps.txt | sed 's:\([^| ]*\):"\1":g' > $1/depends.tmp && \
364 rm ${WORKDIR}/bh_installed_pkgs_deps.txt
365 # Change delimiter from pipe to -> and set style for recommend lines
366 sed -i -e 's:|: -> :' -e 's:"\[REC\]":[style=dotted]:' -e 's:$:;:' $1/depends.tmp
367 # Add header, sorted and de-duped contents and footer and then delete the temp file
368 printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
369 cat $1/depends.tmp | sort | uniq >> $1/depends.dot
370 echo "}" >> $1/depends.dot
371 rm $1/depends.tmp
372
373 # Produce installed package sizes list
374 printf "" > $1/installed-package-sizes.tmp
375 cat $pkgcache | while read pkg pkgfile pkgarch
376 do
377 size=`oe-pkgdata-util read-value ${PKGDATA_DIR} "PKGSIZE" ${pkg}_${pkgarch}`
378 if [ "$size" != "" ] ; then
379 echo "$size $pkg" >> $1/installed-package-sizes.tmp
380 fi
381 done
382 cat $1/installed-package-sizes.tmp | sort -n -r | awk '{print $1 "\tKiB " $2}' > $1/installed-package-sizes.txt
383 rm $1/installed-package-sizes.tmp
384
385 # We're now done with the cache, delete it
386 rm $pkgcache
387
388 if [ "$2" != "sdk" ] ; then
389 # Produce some cut-down graphs (for readability)
390 grep -v kernel_image $1/depends.dot | grep -v kernel-2 | grep -v kernel-3 > $1/depends-nokernel.dot
391 grep -v libc6 $1/depends-nokernel.dot | grep -v libgcc > $1/depends-nokernel-nolibc.dot
392 grep -v update- $1/depends-nokernel-nolibc.dot > $1/depends-nokernel-nolibc-noupdate.dot
393 grep -v kernel-module $1/depends-nokernel-nolibc-noupdate.dot > $1/depends-nokernel-nolibc-noupdate-nomodules.dot
394 fi
395
396 # add complementary package information
397 if [ -e ${WORKDIR}/complementary_pkgs.txt ]; then
398 cp ${WORKDIR}/complementary_pkgs.txt $1
399 fi
400}
401
402buildhistory_get_image_installed() {
403 # Anything requiring the use of the packaging system should be done in here
404 # in case the packaging files are going to be removed for this image
405
406 if [ "${@base_contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
407 return
408 fi
409
410 buildhistory_get_installed ${BUILDHISTORY_DIR_IMAGE}
411}
412
413buildhistory_get_sdk_installed() {
414 # Anything requiring the use of the packaging system should be done in here
415 # in case the packaging files are going to be removed for this SDK
416
417 if [ "${@base_contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then
418 return
419 fi
420
421 buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk
422}
423
424buildhistory_get_sdk_installed_host() {
425 buildhistory_get_sdk_installed host
426}
427
428buildhistory_get_sdk_installed_target() {
429 buildhistory_get_sdk_installed target
430}
431
432buildhistory_list_files() {
433 # List the files in the specified directory, but exclude date/time etc.
434 # This awk script is somewhat messy, but handles where the size is not printed for device files under pseudo
435 ( cd $1 && find . -printf "%M %-10u %-10g %10s %p -> %l\n" | sort -k5 | sed 's/ * -> $//' > $2 )
436}
437
438
439buildhistory_get_imageinfo() {
440 if [ "${@base_contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
441 return
442 fi
443
444 buildhistory_list_files ${IMAGE_ROOTFS} ${BUILDHISTORY_DIR_IMAGE}/files-in-image.txt
445
446 # Collect files requested in BUILDHISTORY_IMAGE_FILES
447 rm -rf ${BUILDHISTORY_DIR_IMAGE}/image-files
448 for f in ${BUILDHISTORY_IMAGE_FILES}; do
449 if [ -f ${IMAGE_ROOTFS}/$f ] ; then
450 mkdir -p ${BUILDHISTORY_DIR_IMAGE}/image-files/`dirname $f`
451 cp ${IMAGE_ROOTFS}/$f ${BUILDHISTORY_DIR_IMAGE}/image-files/$f
452 fi
453 done
454
455 # Record some machine-readable meta-information about the image
456 printf "" > ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
457 cat >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt <<END
458${@buildhistory_get_imagevars(d)}
459END
460 imagesize=`du -ks ${IMAGE_ROOTFS} | awk '{ print $1 }'`
461 echo "IMAGESIZE = $imagesize" >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
462
463 # Add some configuration information
464 echo "${MACHINE}: ${IMAGE_BASENAME} configured for ${DISTRO} ${DISTRO_VERSION}" > ${BUILDHISTORY_DIR_IMAGE}/build-id
465
466 cat >> ${BUILDHISTORY_DIR_IMAGE}/build-id <<END
467${@buildhistory_get_layers(d)}
468END
469}
470
471buildhistory_get_sdkinfo() {
472 if [ "${@base_contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then
473 return
474 fi
475
476 buildhistory_list_files ${SDK_OUTPUT} ${BUILDHISTORY_DIR_SDK}/files-in-sdk.txt
477
478 # Record some machine-readable meta-information about the SDK
479 printf "" > ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
480 cat >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt <<END
481${@buildhistory_get_sdkvars(d)}
482END
483 sdksize=`du -ks ${SDK_OUTPUT} | awk '{ print $1 }'`
484 echo "SDKSIZE = $sdksize" >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
485}
486
487# By prepending we get in before the removal of packaging files
488ROOTFS_POSTPROCESS_COMMAND =+ " buildhistory_list_installed_image ;\
489 buildhistory_get_image_installed ; "
490
491IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; "
492
493# We want these to be the last run so that we get called after complementary package installation
494POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target ;\
495 buildhistory_get_sdk_installed_target ; "
496POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host ;\
497 buildhistory_get_sdk_installed_host ; "
498
499SDK_POSTPROCESS_COMMAND += "buildhistory_get_sdkinfo ; "
500
501def buildhistory_get_layers(d):
502 if d.getVar('BB_WORKERCONTEXT', True) != '1':
503 return ""
504 layertext = "Configured metadata layers:\n%s\n" % '\n'.join(get_layers_branch_rev(d))
505 return layertext
506
507def buildhistory_get_metadata_revs(d):
508 # We want an easily machine-readable format here, so get_layers_branch_rev isn't quite what we want
509 layers = (d.getVar("BBLAYERS", True) or "").split()
510 medadata_revs = ["%-17s = %s:%s" % (os.path.basename(i), \
511 base_get_metadata_git_branch(i, None).strip(), \
512 base_get_metadata_git_revision(i, None)) \
513 for i in layers]
514 return '\n'.join(medadata_revs)
515
516
517def squashspaces(string):
518 import re
519 return re.sub("\s+", " ", string).strip()
520
521def outputvars(vars, listvars, d):
522 vars = vars.split()
523 listvars = listvars.split()
524 ret = ""
525 for var in vars:
526 value = d.getVar(var, True) or ""
527 if var in listvars:
528 # Squash out spaces
529 value = squashspaces(value)
530 ret += "%s = %s\n" % (var, value)
531 return ret.rstrip('\n')
532
533def buildhistory_get_imagevars(d):
534 if d.getVar('BB_WORKERCONTEXT', True) != '1':
535 return ""
536 imagevars = "DISTRO DISTRO_VERSION USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE ROOTFS_POSTPROCESS_COMMAND IMAGE_POSTPROCESS_COMMAND"
537 listvars = "USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS PACKAGE_EXCLUDE"
538 return outputvars(imagevars, listvars, d)
539
540def buildhistory_get_sdkvars(d):
541 if d.getVar('BB_WORKERCONTEXT', True) != '1':
542 return ""
543 sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
544 listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE"
545 return outputvars(sdkvars, listvars, d)
546
547
548def buildhistory_get_cmdline(d):
549 if sys.argv[0].endswith('bin/bitbake'):
550 bincmd = 'bitbake'
551 else:
552 bincmd = sys.argv[0]
553 return '%s %s' % (bincmd, ' '.join(sys.argv[1:]))
554
555
556buildhistory_commit() {
557 if [ ! -d ${BUILDHISTORY_DIR} ] ; then
558 # Code above that creates this dir never executed, so there can't be anything to commit
559 return
560 fi
561
562 # Create a machine-readable list of metadata revisions for each layer
563 cat > ${BUILDHISTORY_DIR}/metadata-revs <<END
564${@buildhistory_get_metadata_revs(d)}
565END
566
567 ( cd ${BUILDHISTORY_DIR}/
568 # Initialise the repo if necessary
569 if [ ! -d .git ] ; then
570 git init -q
571 else
572 git tag -f build-minus-3 build-minus-2 > /dev/null 2>&1 || true
573 git tag -f build-minus-2 build-minus-1 > /dev/null 2>&1 || true
574 git tag -f build-minus-1 > /dev/null 2>&1 || true
575 fi
576 # Check if there are new/changed files to commit (other than metadata-revs)
577 repostatus=`git status --porcelain | grep -v " metadata-revs$"`
578 HOSTNAME=`hostname 2>/dev/null || echo unknown`
579 CMDLINE="${@buildhistory_get_cmdline(d)}"
580 if [ "$repostatus" != "" ] ; then
581 git add -A .
582 # porcelain output looks like "?? packages/foo/bar"
583 # Ensure we commit metadata-revs with the first commit
584 for entry in `echo "$repostatus" | awk '{print $2}' | awk -F/ '{print $1}' | sort | uniq` ; do
585 git commit $entry metadata-revs -m "$entry: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" -m "cmd: $CMDLINE" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
586 done
587 git gc --auto --quiet
588 if [ "${BUILDHISTORY_PUSH_REPO}" != "" ] ; then
589 git push -q ${BUILDHISTORY_PUSH_REPO}
590 fi
591 else
592 git commit ${BUILDHISTORY_DIR}/ --allow-empty -m "No changes: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" -m "cmd: $CMDLINE" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
593 fi) || true
594}
595
596python buildhistory_eventhandler() {
597 if e.data.getVar('BUILDHISTORY_FEATURES', True).strip():
598 if e.data.getVar("BUILDHISTORY_COMMIT", True) == "1":
599 bb.note("Writing buildhistory")
600 bb.build.exec_func("buildhistory_commit", e.data)
601}
602
603addhandler buildhistory_eventhandler
604buildhistory_eventhandler[eventmask] = "bb.event.BuildCompleted"
605
606
607# FIXME this ought to be moved into the fetcher
608def _get_srcrev_values(d):
609 """
610 Return the version strings for the current recipe
611 """
612
613 scms = []
614 fetcher = bb.fetch.Fetch(d.getVar('SRC_URI', True).split(), d)
615 urldata = fetcher.ud
616 for u in urldata:
617 if urldata[u].method.supports_srcrev():
618 scms.append(u)
619
620 autoinc_templ = 'AUTOINC+'
621 dict_srcrevs = {}
622 dict_tag_srcrevs = {}
623 for scm in scms:
624 ud = urldata[scm]
625 for name in ud.names:
626 try:
627 rev = ud.method.sortable_revision(ud, d, name)
628 except TypeError:
629 # support old bitbake versions
630 rev = ud.method.sortable_revision(scm, ud, d, name)
631 # Clean this up when we next bump bitbake version
632 if type(rev) != str:
633 autoinc, rev = rev
634 elif rev.startswith(autoinc_templ):
635 rev = rev[len(autoinc_templ):]
636 dict_srcrevs[name] = rev
637 if 'tag' in ud.parm:
638 tag = ud.parm['tag'];
639 key = name+'_'+tag
640 dict_tag_srcrevs[key] = rev
641 return (dict_srcrevs, dict_tag_srcrevs)
642
643do_fetch[postfuncs] += "write_srcrev"
644do_fetch[vardepsexclude] += "write_srcrev"
645python write_srcrev() {
646 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
647 srcrevfile = os.path.join(pkghistdir, 'latest_srcrev')
648
649 srcrevs, tag_srcrevs = _get_srcrev_values(d)
650 if srcrevs:
651 if not os.path.exists(pkghistdir):
652 bb.utils.mkdirhier(pkghistdir)
653 old_tag_srcrevs = {}
654 if os.path.exists(srcrevfile):
655 with open(srcrevfile) as f:
656 for line in f:
657 if line.startswith('# tag_'):
658 key, value = line.split("=", 1)
659 key = key.replace('# tag_', '').strip()
660 value = value.replace('"', '').strip()
661 old_tag_srcrevs[key] = value
662 with open(srcrevfile, 'w') as f:
663 orig_srcrev = d.getVar('SRCREV', False) or 'INVALID'
664 if orig_srcrev != 'INVALID':
665 f.write('# SRCREV = "%s"\n' % orig_srcrev)
666 if len(srcrevs) > 1:
667 for name, srcrev in srcrevs.items():
668 orig_srcrev = d.getVar('SRCREV_%s' % name, False)
669 if orig_srcrev:
670 f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
671 f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
672 else:
673 f.write('SRCREV = "%s"\n' % srcrevs.itervalues().next())
674 if len(tag_srcrevs) > 0:
675 for name, srcrev in tag_srcrevs.items():
676 f.write('# tag_%s = "%s"\n' % (name, srcrev))
677 if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
678 pkg = d.getVar('PN', True)
679 bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
680
681 else:
682 if os.path.exists(srcrevfile):
683 os.remove(srcrevfile)
684}
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
new file mode 100644
index 0000000000..89ae72c679
--- /dev/null
+++ b/meta/classes/buildstats.bbclass
@@ -0,0 +1,289 @@
1BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
2BNFILE = "${BUILDSTATS_BASE}/.buildname"
3DEVFILE = "${BUILDSTATS_BASE}/.device"
4
5################################################################################
6# Build statistics gathering.
7#
8# The CPU and Time gathering/tracking functions and bbevent inspiration
9# were written by Christopher Larson and can be seen here:
10# http://kergoth.pastey.net/142813
11#
12################################################################################
13
14def get_process_cputime(pid):
15 with open("/proc/%d/stat" % pid, "r") as f:
16 fields = f.readline().rstrip().split()
17 # 13: utime, 14: stime, 15: cutime, 16: cstime
18 return sum(int(field) for field in fields[13:16])
19
20def get_cputime():
21 with open("/proc/stat", "r") as f:
22 fields = f.readline().rstrip().split()[1:]
23 return sum(int(field) for field in fields)
24
25def set_bn(e):
26 bn = e.getPkgs()[0] + "-" + e.data.getVar('MACHINE', True)
27 try:
28 os.remove(e.data.getVar('BNFILE', True))
29 except:
30 pass
31 with open(e.data.getVar('BNFILE', True), "w") as f:
32 f.write(os.path.join(bn, e.data.getVar('BUILDNAME', True)))
33
34def get_bn(e):
35 with open(e.data.getVar('BNFILE', True)) as f:
36 bn = f.readline()
37 return bn
38
39def set_device(e):
40 tmpdir = e.data.getVar('TMPDIR', True)
41 try:
42 os.remove(e.data.getVar('DEVFILE', True))
43 except:
44 pass
45 ############################################################################
46 # We look for the volume TMPDIR lives on. To do all disks would make little
47 # sense and not give us any particularly useful data. In theory we could do
48 # something like stick DL_DIR on a different partition and this would
49 # throw stats gathering off. The same goes with SSTATE_DIR. However, let's
50 # get the basics in here and work on the cornercases later.
51 # A note. /proc/diskstats does not contain info on encryptfs, tmpfs, etc.
52 # If we end up hitting one of these fs, we'll just skip diskstats collection.
53 ############################################################################
54 device=os.stat(tmpdir)
55 majordev=os.major(device.st_dev)
56 minordev=os.minor(device.st_dev)
57 ############################################################################
58 # Bug 1700:
59 # Because tmpfs/encryptfs/ramfs etc inserts no entry in /proc/diskstats
60 # we set rdev to NoLogicalDevice and search for it later. If we find NLD
61 # we do not collect diskstats as the method to collect meaningful statistics
62 # for these fs types requires a bit more research.
63 ############################################################################
64 rdev="NoLogicalDevice"
65 try:
66 with open("/proc/diskstats", "r") as f:
67 for line in f:
68 if majordev == int(line.split()[0]) and minordev == int(line.split()[1]):
69 rdev=line.split()[2]
70 except:
71 pass
72 file = open(e.data.getVar('DEVFILE', True), "w")
73 file.write(rdev)
74 file.close()
75
76def get_device(e):
77 file = open(e.data.getVar('DEVFILE', True))
78 device = file.readline()
79 file.close()
80 return device
81
82def get_diskstats(dev):
83 import itertools
84 ############################################################################
85 # For info on what these are, see kernel doc file iostats.txt
86 ############################################################################
87 DSTAT_KEYS = ['ReadsComp', 'ReadsMerged', 'SectRead', 'TimeReads', 'WritesComp', 'SectWrite', 'TimeWrite', 'IOinProgress', 'TimeIO', 'WTimeIO']
88 try:
89 with open("/proc/diskstats", "r") as f:
90 for x in f:
91 if dev in x:
92 diskstats_val = x.rstrip().split()[4:]
93 except IOError as e:
94 return
95 diskstats = dict(itertools.izip(DSTAT_KEYS, diskstats_val))
96 return diskstats
97
98def set_diskdata(var, dev, data):
99 data.setVar(var, get_diskstats(dev))
100
101def get_diskdata(var, dev, data):
102 olddiskdata = data.getVar(var, False)
103 diskdata = {}
104 if olddiskdata is None:
105 return
106 newdiskdata = get_diskstats(dev)
107 for key in olddiskdata.iterkeys():
108 diskdata["Start"+key] = str(int(olddiskdata[key]))
109 diskdata["End"+key] = str(int(newdiskdata[key]))
110 return diskdata
111
112def set_timedata(var, data, server_time=None):
113 import time
114 if server_time:
115 time = server_time
116 else:
117 time = time.time()
118 cputime = get_cputime()
119 proctime = get_process_cputime(os.getpid())
120 data.setVar(var, (time, cputime, proctime))
121
122def get_timedata(var, data, server_time=None):
123 import time
124 timedata = data.getVar(var, False)
125 if timedata is None:
126 return
127 oldtime, oldcpu, oldproc = timedata
128 procdiff = get_process_cputime(os.getpid()) - oldproc
129 cpudiff = get_cputime() - oldcpu
130 if server_time:
131 end_time = server_time
132 else:
133 end_time = time.time()
134 timediff = end_time - oldtime
135 if cpudiff > 0:
136 cpuperc = float(procdiff) * 100 / cpudiff
137 else:
138 cpuperc = None
139 return timediff, cpuperc
140
141def write_task_data(status, logfile, dev, e):
142 bn = get_bn(e)
143 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
144 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
145 file = open(os.path.join(logfile), "a")
146 timedata = get_timedata("__timedata_task", e.data, e.time)
147 if timedata:
148 elapsedtime, cpu = timedata
149 file.write(bb.data.expand("${PF}: %s: Elapsed time: %0.2f seconds \n" %
150 (e.task, elapsedtime), e.data))
151 if cpu:
152 file.write("CPU usage: %0.1f%% \n" % cpu)
153 ############################################################################
154 # Here we gather up disk data. In an effort to avoid lying with stats
155 # I do a bare minimum of analysis of collected data.
156 # The simple fact is, doing disk io collection on a per process basis
157 # without effecting build time would be difficult.
158 # For the best information, running things with BB_TOTAL_THREADS = "1"
159 # would return accurate per task results.
160 ############################################################################
161 if dev != "NoLogicalDevice":
162 diskdata = get_diskdata("__diskdata_task", dev, e.data)
163 if diskdata:
164 for key in sorted(diskdata.iterkeys()):
165 file.write(key + ": " + diskdata[key] + "\n")
166 if status is "passed":
167 file.write("Status: PASSED \n")
168 else:
169 file.write("Status: FAILED \n")
170 file.write("Ended: %0.2f \n" % e.time)
171 file.close()
172
173python run_buildstats () {
174 import bb.build
175 import bb.event
176 import bb.data
177 import time, subprocess, platform
178
179 if isinstance(e, bb.event.BuildStarted):
180 ########################################################################
181 # at first pass make the buildstats heriarchy and then
182 # set the buildname
183 ########################################################################
184 try:
185 bb.utils.mkdirhier(e.data.getVar('BUILDSTATS_BASE', True))
186 except:
187 pass
188 set_bn(e)
189 bn = get_bn(e)
190 set_device(e)
191 device = get_device(e)
192
193 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
194 try:
195 bb.utils.mkdirhier(bsdir)
196 except:
197 pass
198 if device != "NoLogicalDevice":
199 set_diskdata("__diskdata_build", device, e.data)
200 set_timedata("__timedata_build", e.data)
201 build_time = os.path.join(bsdir, "build_stats")
202 # write start of build into build_time
203 file = open(build_time,"a")
204 host_info = platform.uname()
205 file.write("Host Info: ")
206 for x in host_info:
207 if x:
208 file.write(x + " ")
209 file.write("\n")
210 file.write("Build Started: %0.2f \n" % time.time())
211 file.close()
212
213 elif isinstance(e, bb.event.BuildCompleted):
214 bn = get_bn(e)
215 device = get_device(e)
216 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
217 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
218 build_time = os.path.join(bsdir, "build_stats")
219 file = open(build_time, "a")
220 ########################################################################
221 # Write build statistics for the build
222 ########################################################################
223 timedata = get_timedata("__timedata_build", e.data)
224 if timedata:
225 time, cpu = timedata
226 # write end of build and cpu used into build_time
227 file.write("Elapsed time: %0.2f seconds \n" % (time))
228 if cpu:
229 file.write("CPU usage: %0.1f%% \n" % cpu)
230 if device != "NoLogicalDevice":
231 diskio = get_diskdata("__diskdata_build", device, e.data)
232 if diskio:
233 for key in sorted(diskio.iterkeys()):
234 file.write(key + ": " + diskio[key] + "\n")
235 file.close()
236
237 if isinstance(e, bb.build.TaskStarted):
238 bn = get_bn(e)
239 device = get_device(e)
240 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
241 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
242 if device != "NoLogicalDevice":
243 set_diskdata("__diskdata_task", device, e.data)
244 set_timedata("__timedata_task", e.data, e.time)
245 try:
246 bb.utils.mkdirhier(taskdir)
247 except:
248 pass
249 # write into the task event file the name and start time
250 file = open(os.path.join(taskdir, e.task), "a")
251 file.write("Event: %s \n" % bb.event.getName(e))
252 file.write("Started: %0.2f \n" % e.time)
253 file.close()
254
255 elif isinstance(e, bb.build.TaskSucceeded):
256 bn = get_bn(e)
257 device = get_device(e)
258 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
259 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
260 write_task_data("passed", os.path.join(taskdir, e.task), device, e)
261 if e.task == "do_rootfs":
262 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
263 bs=os.path.join(bsdir, "build_stats")
264 file = open(bs,"a")
265 rootfs = e.data.getVar('IMAGE_ROOTFS', True)
266 rootfs_size = subprocess.Popen(["du", "-sh", rootfs], stdout=subprocess.PIPE).stdout.read()
267 file.write("Uncompressed Rootfs size: %s" % rootfs_size)
268 file.close()
269
270 elif isinstance(e, bb.build.TaskFailed):
271 bn = get_bn(e)
272 device = get_device(e)
273 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
274 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
275 write_task_data("failed", os.path.join(taskdir, e.task), device, e)
276 ########################################################################
277 # Lets make things easier and tell people where the build failed in
278 # build_status. We do this here because BuildCompleted triggers no
279 # matter what the status of the build actually is
280 ########################################################################
281 build_status = os.path.join(bsdir, "build_stats")
282 file = open(build_status,"a")
283 file.write(e.data.expand("Failed at: ${PF} at task: %s \n" % e.task))
284 file.close()
285}
286
287addhandler run_buildstats
288run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
289
diff --git a/meta/classes/ccache.bbclass b/meta/classes/ccache.bbclass
new file mode 100644
index 0000000000..2cdce46932
--- /dev/null
+++ b/meta/classes/ccache.bbclass
@@ -0,0 +1,8 @@
1CCACHE = "${@bb.utils.which(d.getVar('PATH', True), 'ccache') and 'ccache '}"
2export CCACHE_DIR ?= "${TMPDIR}/ccache/${MULTIMACH_HOST_SYS}/${PN}"
3CCACHE_DISABLE[unexport] = "1"
4
5do_configure[dirs] =+ "${CCACHE_DIR}"
6do_kernel_configme[dirs] =+ "${CCACHE_DIR}"
7
8do_clean[cleandirs] += "${CCACHE_DIR}"
diff --git a/meta/classes/chrpath.bbclass b/meta/classes/chrpath.bbclass
new file mode 100644
index 0000000000..7bdb1b9938
--- /dev/null
+++ b/meta/classes/chrpath.bbclass
@@ -0,0 +1,115 @@
1CHRPATH_BIN ?= "chrpath"
2PREPROCESS_RELOCATE_DIRS ?= ""
3
4def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
5 import subprocess as sub
6
7 p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
8 err, out = p.communicate()
9 # If returned succesfully, process stderr for results
10 if p.returncode != 0:
11 return
12
13 # Throw away everything other than the rpath list
14 curr_rpath = err.partition("RPATH=")[2]
15 #bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
16 rpaths = curr_rpath.split(":")
17 new_rpaths = []
18 modified = False
19 for rpath in rpaths:
20 # If rpath is already dynamic copy it to new_rpath and continue
21 if rpath.find("$ORIGIN") != -1:
22 new_rpaths.append(rpath.strip())
23 continue
24 rpath = os.path.normpath(rpath)
25 if baseprefix not in rpath and tmpdir not in rpath:
26 new_rpaths.append(rpath.strip())
27 continue
28 new_rpaths.append("$ORIGIN/" + os.path.relpath(rpath.strip(), os.path.dirname(fpath.replace(rootdir, "/"))))
29 modified = True
30
31 # if we have modified some rpaths call chrpath to update the binary
32 if modified:
33 args = ":".join(new_rpaths)
34 #bb.note("Setting rpath for %s to %s" %(fpath, args))
35 p = sub.Popen([cmd, '-r', args, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
36 out, err = p.communicate()
37 if p.returncode != 0:
38 bb.error("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN', True), p.returncode, out, err))
39 raise bb.build.FuncFailed
40
41def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
42 import subprocess as sub
43
44 p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
45 err, out = p.communicate()
46 # If returned succesfully, process stderr for results
47 if p.returncode != 0:
48 return
49 for l in err.split("\n"):
50 if "(compatibility" not in l:
51 continue
52 rpath = l.partition("(compatibility")[0].strip()
53 if baseprefix not in rpath:
54 continue
55
56 newpath = "@loader_path/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/")))
57 bb.warn("%s %s %s %s" % (fpath, rpath, newpath, rootdir))
58 p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, newpath, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
59 err, out = p.communicate()
60
61def process_dir (rootdir, directory, d):
62 import stat
63
64 cmd = d.expand('${CHRPATH_BIN}')
65 tmpdir = os.path.normpath(d.getVar('TMPDIR'))
66 baseprefix = os.path.normpath(d.expand('${base_prefix}'))
67 hostos = d.getVar("HOST_OS", True)
68
69 #bb.debug("Checking %s for binaries to process" % directory)
70 if not os.path.exists(directory):
71 return
72
73 if "linux" in hostos:
74 process_file = process_file_linux
75 elif "darwin" in hostos:
76 process_file = process_file_darwin
77 else:
78 # Relocations not supported
79 return
80
81 dirs = os.listdir(directory)
82 for file in dirs:
83 fpath = directory + "/" + file
84 fpath = os.path.normpath(fpath)
85 if os.path.islink(fpath):
86 # Skip symlinks
87 continue
88
89 if os.path.isdir(fpath):
90 process_dir(rootdir, fpath, d)
91 else:
92 #bb.note("Testing %s for relocatability" % fpath)
93
94 # We need read and write permissions for chrpath, if we don't have
95 # them then set them temporarily. Take a copy of the files
96 # permissions so that we can restore them afterwards.
97 perms = os.stat(fpath)[stat.ST_MODE]
98 if os.access(fpath, os.W_OK|os.R_OK):
99 perms = None
100 else:
101 # Temporarily make the file writeable so we can chrpath it
102 os.chmod(fpath, perms|stat.S_IRWXU)
103 process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d)
104
105 if perms:
106 os.chmod(fpath, perms)
107
108def rpath_replace (path, d):
109 bindirs = d.expand("${bindir} ${sbindir} ${base_sbindir} ${base_bindir} ${libdir} ${base_libdir} ${libexecdir} ${PREPROCESS_RELOCATE_DIRS}").split()
110
111 for bindir in bindirs:
112 #bb.note ("Processing directory " + bindir)
113 directory = path + "/" + bindir
114 process_dir (path, directory, d)
115
diff --git a/meta/classes/clutter.bbclass b/meta/classes/clutter.bbclass
new file mode 100644
index 0000000000..167407dfdc
--- /dev/null
+++ b/meta/classes/clutter.bbclass
@@ -0,0 +1,22 @@
1
2def get_minor_dir(v):
3 import re
4 m = re.match("^([0-9]+)\.([0-9]+)", v)
5 return "%s.%s" % (m.group(1), m.group(2))
6
7def get_real_name(n):
8 import re
9 m = re.match("^([a-z]+(-[a-z]+)?)(-[0-9]+\.[0-9]+)?", n)
10 return "%s" % (m.group(1))
11
12VERMINOR = "${@get_minor_dir("${PV}")}"
13REALNAME = "${@get_real_name("${BPN}")}"
14
15CLUTTER_SRC_FTP = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
16
17CLUTTER_SRC_GIT = "git://git.gnome.org/${REALNAME}"
18
19SRC_URI = "${CLUTTER_SRC_FTP}"
20S = "${WORKDIR}/${REALNAME}-${PV}"
21
22inherit autotools pkgconfig gtk-doc gettext
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
new file mode 100644
index 0000000000..c9c15f3076
--- /dev/null
+++ b/meta/classes/cmake.bbclass
@@ -0,0 +1,115 @@
1DEPENDS_prepend = "cmake-native "
2B = "${WORKDIR}/build"
3
4# We need to unset CCACHE otherwise cmake gets too confused
5CCACHE = ""
6
7# We want the staging and installing functions from autotools
8inherit autotools
9
10# C/C++ Compiler (without cpu arch/tune arguments)
11OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
12OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
13
14# Compiler flags
15OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
16OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} -fpermissive"
17OECMAKE_C_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CFLAGS} -DNDEBUG"
18OECMAKE_CXX_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CXXFLAGS} -DNDEBUG"
19OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
20OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
21
22OECMAKE_RPATH ?= ""
23OECMAKE_PERLNATIVE_DIR ??= ""
24OECMAKE_EXTRA_ROOT_PATH ?= ""
25
26cmake_do_generate_toolchain_file() {
27 cat > ${WORKDIR}/toolchain.cmake <<EOF
28# CMake system name must be something like "Linux".
29# This is important for cross-compiling.
30set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).*/\1/'` )
31set( CMAKE_SYSTEM_PROCESSOR ${TARGET_ARCH} )
32set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
33set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
34set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
35set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
36set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "CFLAGS for release" )
37set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "CXXFLAGS for release" )
38set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
39set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
40
41# only search in the paths provided so cmake doesnt pick
42# up libraries and tools from the native build machine
43set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN})
44set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
45set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY )
46set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
47set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
48
49# Use qt.conf settings
50set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
51
52# We need to set the rpath to the correct directory as cmake does not provide any
53# directory as rpath by default
54set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
55
56# Use native cmake modules
57set( CMAKE_MODULE_PATH ${STAGING_DATADIR}/cmake/Modules/ )
58
59# add for non /usr/lib libdir, e.g. /usr/lib64
60set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
61
62EOF
63}
64
65addtask generate_toolchain_file after do_patch before do_configure
66
67cmake_do_configure() {
68 if [ "${OECMAKE_BUILDPATH}" -o "${OECMAKE_SOURCEPATH}" ]; then
69 bbnote "cmake.bbclass no longer uses OECMAKE_SOURCEPATH and OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
70 fi
71
72 if [ "${S}" != "${B}" ]; then
73 rm -rf ${B}
74 mkdir -p ${B}
75 cd ${B}
76 fi
77
78 # Just like autotools cmake can use a site file to cache result that need generated binaries to run
79 if [ -e ${WORKDIR}/site-file.cmake ] ; then
80 OECMAKE_SITEFILE=" -C ${WORKDIR}/site-file.cmake"
81 else
82 OECMAKE_SITEFILE=""
83 fi
84
85 cmake \
86 ${OECMAKE_SITEFILE} \
87 ${S} \
88 -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
89 -DCMAKE_INSTALL_BINDIR:PATH=${bindir} \
90 -DCMAKE_INSTALL_SBINDIR:PATH=${sbindir} \
91 -DCMAKE_INSTALL_LIBEXECDIR:PATH=${libexecdir} \
92 -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
93 -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${sharedstatedir} \
94 -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
95 -DCMAKE_INSTALL_LIBDIR:PATH=${libdir} \
96 -DCMAKE_INSTALL_INCLUDEDIR:PATH=${includedir} \
97 -DCMAKE_INSTALL_DATAROOTDIR:PATH=${datadir} \
98 -DCMAKE_INSTALL_SO_NO_EXE=0 \
99 -DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
100 -DCMAKE_VERBOSE_MAKEFILE=1 \
101 ${EXTRA_OECMAKE} \
102 -Wno-dev
103}
104
105cmake_do_compile() {
106 cd ${B}
107 base_do_compile
108}
109
110cmake_do_install() {
111 cd ${B}
112 autotools_do_install
113}
114
115EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
new file mode 100644
index 0000000000..34c0c4e6c7
--- /dev/null
+++ b/meta/classes/cml1.bbclass
@@ -0,0 +1,73 @@
1cml1_do_configure() {
2 set -e
3 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
4 oe_runmake oldconfig
5}
6
7EXPORT_FUNCTIONS do_configure
8addtask configure after do_unpack do_patch before do_compile
9
10inherit terminal
11
12OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS HOST_LOADLIBES TERMINFO"
13HOST_EXTRACFLAGS = "${BUILD_CFLAGS} ${BUILD_LDFLAGS}"
14HOSTLDFLAGS = "${BUILD_LDFLAGS}"
15HOST_LOADLIBES = "-lncurses"
16TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
17
18python do_menuconfig() {
19 import shutil
20
21 try:
22 mtime = os.path.getmtime(".config")
23 shutil.copy(".config", ".config.orig")
24 except OSError:
25 mtime = 0
26
27 oe_terminal("${SHELL} -c \"make menuconfig; if [ \$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"", '${PN} Configuration', d)
28
29 # FIXME this check can be removed when the minimum bitbake version has been bumped
30 if hasattr(bb.build, 'write_taint'):
31 try:
32 newmtime = os.path.getmtime(".config")
33 except OSError:
34 newmtime = 0
35
36 if newmtime > mtime:
37 bb.note("Configuration changed, recompile will be forced")
38 bb.build.write_taint('do_compile', d)
39}
40do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
41do_menuconfig[nostamp] = "1"
42addtask menuconfig after do_configure
43
44python do_diffconfig() {
45 import shutil
46 import subprocess
47
48 workdir = d.getVar('WORKDIR', True)
49 fragment = workdir + '/fragment.cfg'
50 configorig = '.config.orig'
51 config = '.config'
52
53 try:
54 md5newconfig = bb.utils.md5_file(configorig)
55 md5config = bb.utils.md5_file(config)
56 isdiff = md5newconfig != md5config
57 except IOError as e:
58 bb.fatal("No config files found. Did you do menuconfig ?\n%s" % e)
59
60 if isdiff:
61 statement = 'diff -Nurp ' + configorig + ' ' + config + '| sed -n "s/^\+//p" >' + fragment
62 subprocess.call(statement, shell=True)
63
64 shutil.copy(configorig, config)
65
66 bb.plain("Config fragment has been dumped into:\n %s" % fragment)
67 else:
68 if os.path.exists(fragment):
69 os.unlink(fragment)
70}
71
72do_diffconfig[nostamp] = "1"
73addtask diffconfig
diff --git a/meta/classes/copyleft_compliance.bbclass b/meta/classes/copyleft_compliance.bbclass
new file mode 100644
index 0000000000..907c1836b3
--- /dev/null
+++ b/meta/classes/copyleft_compliance.bbclass
@@ -0,0 +1,64 @@
1# Deploy sources for recipes for compliance with copyleft-style licenses
2# Defaults to using symlinks, as it's a quick operation, and one can easily
3# follow the links when making use of the files (e.g. tar with the -h arg).
4#
5# vi:sts=4:sw=4:et
6
7inherit copyleft_filter
8
9COPYLEFT_SOURCES_DIR ?= '${DEPLOY_DIR}/copyleft_sources'
10
11python do_prepare_copyleft_sources () {
12 """Populate a tree of the recipe sources and emit patch series files"""
13 import os.path
14 import shutil
15
16 p = d.getVar('P', True)
17 included, reason = copyleft_should_include(d)
18 if not included:
19 bb.debug(1, 'copyleft: %s is excluded: %s' % (p, reason))
20 return
21 else:
22 bb.debug(1, 'copyleft: %s is included: %s' % (p, reason))
23
24 sources_dir = d.getVar('COPYLEFT_SOURCES_DIR', True)
25 dl_dir = d.getVar('DL_DIR', True)
26 src_uri = d.getVar('SRC_URI', True).split()
27 fetch = bb.fetch2.Fetch(src_uri, d)
28 ud = fetch.ud
29
30 pf = d.getVar('PF', True)
31 dest = os.path.join(sources_dir, pf)
32 shutil.rmtree(dest, ignore_errors=True)
33 bb.utils.mkdirhier(dest)
34
35 for u in ud.values():
36 local = os.path.normpath(fetch.localpath(u.url))
37 if local.endswith('.bb'):
38 continue
39 elif local.endswith('/'):
40 local = local[:-1]
41
42 if u.mirrortarball:
43 tarball_path = os.path.join(dl_dir, u.mirrortarball)
44 if os.path.exists(tarball_path):
45 local = tarball_path
46
47 oe.path.symlink(local, os.path.join(dest, os.path.basename(local)), force=True)
48
49 patches = src_patches(d)
50 for patch in patches:
51 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
52 patchdir = parm.get('patchdir')
53 if patchdir:
54 series = os.path.join(dest, 'series.subdir.%s' % patchdir.replace('/', '_'))
55 else:
56 series = os.path.join(dest, 'series')
57
58 with open(series, 'a') as s:
59 s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
60}
61
62addtask prepare_copyleft_sources after do_fetch before do_build
63do_prepare_copyleft_sources[dirs] = "${WORKDIR}"
64do_build[recrdeptask] += 'do_prepare_copyleft_sources'
diff --git a/meta/classes/copyleft_filter.bbclass b/meta/classes/copyleft_filter.bbclass
new file mode 100644
index 0000000000..2c1d8f1c90
--- /dev/null
+++ b/meta/classes/copyleft_filter.bbclass
@@ -0,0 +1,62 @@
1# Filter the license, the copyleft_should_include returns True for the
2# COPYLEFT_LICENSE_INCLUDE recipe, and False for the
3# COPYLEFT_LICENSE_EXCLUDE.
4#
5# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary.
6#
7# vi:sts=4:sw=4:et
8
9COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL*'
10COPYLEFT_LICENSE_INCLUDE[type] = 'list'
11COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which include licenses'
12
13COPYLEFT_LICENSE_EXCLUDE ?= 'CLOSED Proprietary'
14COPYLEFT_LICENSE_EXCLUDE[type] = 'list'
15COPYLEFT_LICENSE_EXCLUDE[doc] = 'Space separated list of globs which exclude licenses'
16
17COPYLEFT_RECIPE_TYPE ?= '${@copyleft_recipe_type(d)}'
18COPYLEFT_RECIPE_TYPE[doc] = 'The "type" of the current recipe (e.g. target, native, cross)'
19
20COPYLEFT_RECIPE_TYPES ?= 'target'
21COPYLEFT_RECIPE_TYPES[type] = 'list'
22COPYLEFT_RECIPE_TYPES[doc] = 'Space separated list of recipe types to include'
23
24COPYLEFT_AVAILABLE_RECIPE_TYPES = 'target native nativesdk cross crosssdk cross-canadian'
25COPYLEFT_AVAILABLE_RECIPE_TYPES[type] = 'list'
26COPYLEFT_AVAILABLE_RECIPE_TYPES[doc] = 'Space separated list of available recipe types'
27
28def copyleft_recipe_type(d):
29 for recipe_type in oe.data.typed_value('COPYLEFT_AVAILABLE_RECIPE_TYPES', d):
30 if oe.utils.inherits(d, recipe_type):
31 return recipe_type
32 return 'target'
33
34def copyleft_should_include(d):
35 """
36 Determine if this recipe's sources should be deployed for compliance
37 """
38 import ast
39 import oe.license
40 from fnmatch import fnmatchcase as fnmatch
41
42 recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE', True)
43 if recipe_type not in oe.data.typed_value('COPYLEFT_RECIPE_TYPES', d):
44 return False, 'recipe type "%s" is excluded' % recipe_type
45
46 include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d)
47 exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d)
48
49 try:
50 is_included, reason = oe.license.is_included(d.getVar('LICENSE', True), include, exclude)
51 except oe.license.LicenseError as exc:
52 bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
53 else:
54 if is_included:
55 if reason:
56 return True, 'recipe has included licenses: %s' % ', '.join(reason)
57 else:
58 return False, 'recipe does not include a copyleft license'
59 else:
60 return False, 'recipe has excluded licenses: %s' % ', '.join(reason)
61
62
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
new file mode 100644
index 0000000000..7475d7d8d5
--- /dev/null
+++ b/meta/classes/core-image.bbclass
@@ -0,0 +1,79 @@
1# Common code for generating core reference images
2#
3# Copyright (C) 2007-2011 Linux Foundation
4
5LIC_FILES_CHKSUM = "file://${COREBASE}/LICENSE;md5=4d92cd373abda3937c2bc47fbc49d690 \
6 file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
7
8# IMAGE_FEATURES control content of the core reference images
9#
10# By default we install packagegroup-core-boot and packagegroup-base packages - this gives us
11# working (console only) rootfs.
12#
13# Available IMAGE_FEATURES:
14#
15# - x11 - X server
16# - x11-base - X server with minimal environment
17# - x11-sato - OpenedHand Sato environment
18# - tools-debug - debugging tools
19# - eclipse-debug - Eclipse remote debugging support
20# - tools-profile - profiling tools
21# - tools-testapps - tools usable to make some device tests
22# - tools-sdk - SDK (C/C++ compiler, autotools, etc.)
23# - nfs-server - NFS server
24# - ssh-server-dropbear - SSH server (dropbear)
25# - ssh-server-openssh - SSH server (openssh)
26# - qt4-pkgs - Qt4/X11 and demo applications
27# - hwcodecs - Install hardware acceleration codecs
28# - package-management - installs package management tools and preserves the package manager database
29# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
30# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
31# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
32# - doc-pkgs - documentation packages for all installed packages in the rootfs
33# - read-only-rootfs - tweaks an image to support read-only rootfs
34#
35FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
36FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
37FEATURE_PACKAGES_x11-sato = "packagegroup-core-x11-sato"
38FEATURE_PACKAGES_tools-debug = "packagegroup-core-tools-debug"
39FEATURE_PACKAGES_eclipse-debug = "packagegroup-core-eclipse-debug"
40FEATURE_PACKAGES_tools-profile = "packagegroup-core-tools-profile"
41FEATURE_PACKAGES_tools-testapps = "packagegroup-core-tools-testapps"
42FEATURE_PACKAGES_tools-sdk = "packagegroup-core-sdk packagegroup-core-standalone-sdk-target"
43FEATURE_PACKAGES_nfs-server = "packagegroup-core-nfs-server"
44FEATURE_PACKAGES_ssh-server-dropbear = "packagegroup-core-ssh-dropbear"
45FEATURE_PACKAGES_ssh-server-openssh = "packagegroup-core-ssh-openssh"
46FEATURE_PACKAGES_qt4-pkgs = "packagegroup-core-qt-demoapps"
47FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}"
48
49
50# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2'
51# Including image feature foo would replace the image features bar1 and bar2
52IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear"
53
54# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2'
55# An error exception would be raised if both image features foo and bar1(or bar2) are included
56
57MACHINE_HWCODECS ??= ""
58
59CORE_IMAGE_BASE_INSTALL = '\
60 packagegroup-core-boot \
61 packagegroup-base-extended \
62 \
63 ${CORE_IMAGE_EXTRA_INSTALL} \
64 '
65
66CORE_IMAGE_EXTRA_INSTALL ?= ""
67
68IMAGE_INSTALL ?= "${CORE_IMAGE_BASE_INSTALL}"
69
70inherit image
71
72# Create /etc/timestamp during image construction to give a reasonably sane default time setting
73ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
74
75# Zap the root password if debug-tweaks feature is not enabled
76ROOTFS_POSTPROCESS_COMMAND += '${@base_contains("IMAGE_FEATURES", "debug-tweaks", "", "zap_empty_root_password ; ",d)}'
77
78# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
79ROOTFS_POSTPROCESS_COMMAND += '${@base_contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass
new file mode 100644
index 0000000000..d9817ba6b6
--- /dev/null
+++ b/meta/classes/cpan-base.bbclass
@@ -0,0 +1,55 @@
1#
2# cpan-base providers various perl related information needed for building
3# cpan modules
4#
5FILES_${PN} += "${libdir}/perl ${datadir}/perl"
6
7DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
8RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
9
10PERL_OWN_DIR = "${@["", "/perl-native"][(bb.data.inherits_class('native', d))]}"
11
12# Determine the staged version of perl from the perl configuration file
13# Assign vardepvalue, because otherwise signature is changed before and after
14# perl is built (from None to real version in config.sh).
15get_perl_version[vardepvalue] = "${PERL_OWN_DIR}"
16def get_perl_version(d):
17 import re
18 cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh')
19 try:
20 f = open(cfg, 'r')
21 except IOError:
22 return None
23 l = f.readlines();
24 f.close();
25 r = re.compile("^version='(\d*\.\d*\.\d*)'")
26 for s in l:
27 m = r.match(s)
28 if m:
29 return m.group(1)
30 return None
31
32# Determine where the library directories are
33def perl_get_libdirs(d):
34 libdir = d.getVar('libdir', True)
35 if is_target(d) == "no":
36 libdir += '/perl-native'
37 libdir += '/perl'
38 return libdir
39
40def is_target(d):
41 if not bb.data.inherits_class('native', d):
42 return "yes"
43 return "no"
44
45PERLLIBDIRS := "${@perl_get_libdirs(d)}"
46PERLVERSION := "${@get_perl_version(d)}"
47PERLVERSION[vardepvalue] = ""
48
49FILES_${PN}-dbg += "${PERLLIBDIRS}/auto/*/.debug \
50 ${PERLLIBDIRS}/auto/*/*/.debug \
51 ${PERLLIBDIRS}/auto/*/*/*/.debug \
52 ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/.debug \
53 ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/.debug \
54 ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/*/.debug \
55 "
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
new file mode 100644
index 0000000000..7088039fa0
--- /dev/null
+++ b/meta/classes/cpan.bbclass
@@ -0,0 +1,55 @@
1#
2# This is for perl modules that use the old Makefile.PL build system
3#
4inherit cpan-base perlnative
5
6EXTRA_CPANFLAGS ?= ""
7EXTRA_PERLFLAGS ?= ""
8
9# Env var which tells perl if it should use host (no) or target (yes) settings
10export PERLCONFIGTARGET = "${@is_target(d)}"
11
12# Env var which tells perl where the perl include files are
13export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}/CORE"
14export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
15export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
16export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
17
18cpan_do_configure () {
19 export PERL5LIB="${PERL_ARCHLIB}"
20 yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL ${EXTRA_CPANFLAGS}
21
22 # Makefile.PLs can exit with success without generating a
23 # Makefile, e.g. in cases of missing configure time
24 # dependencies. This is considered a best practice by
25 # cpantesters.org. See:
26 # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
27 # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
28 [ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL"
29
30 if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
31 . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh
32 # Use find since there can be a Makefile generated for each Makefile.PL
33 for f in `find -name Makefile.PL`; do
34 f2=`echo $f | sed -e 's/.PL//'`
35 test -f $f2 || continue
36 sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \
37 -e 's/perl.real/perl/' \
38 -e "s|^\(CCFLAGS =.*\)|\1 ${CFLAGS}|" \
39 $f2
40 done
41 fi
42}
43
44cpan_do_compile () {
45 oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}"
46}
47
48cpan_do_install () {
49 oe_runmake DESTDIR="${D}" install_vendor
50 for PERLSCRIPT in `grep -rIEl '#!${bindir}/perl-native.*/perl' ${D}`; do
51 sed -i -e 's|^#!${bindir}/perl-native.*/perl|#!/usr/bin/env nativeperl|' $PERLSCRIPT
52 done
53}
54
55EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cpan_build.bbclass b/meta/classes/cpan_build.bbclass
new file mode 100644
index 0000000000..5b0ad61b4c
--- /dev/null
+++ b/meta/classes/cpan_build.bbclass
@@ -0,0 +1,50 @@
1#
2# This is for perl modules that use the new Build.PL build system
3#
4inherit cpan-base perlnative
5
6# Env var which tells perl if it should use host (no) or target (yes) settings
7export PERLCONFIGTARGET = "${@is_target(d)}"
8export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
9export LD = "${CCLD}"
10
11#
12# We also need to have built libmodule-build-perl-native for
13# everything except libmodule-build-perl-native itself (which uses
14# this class, but uses itself as the provider of
15# libmodule-build-perl)
16#
17def cpan_build_dep_prepend(d):
18 if d.getVar('CPAN_BUILD_DEPS', True):
19 return ''
20 pn = d.getVar('PN', True)
21 if pn in ['libmodule-build-perl', 'libmodule-build-perl-native']:
22 return ''
23 return 'libmodule-build-perl-native '
24
25DEPENDS_prepend = "${@cpan_build_dep_prepend(d)}"
26
27cpan_build_do_configure () {
28 if [ "${@is_target(d)}" = "yes" ]; then
29 # build for target
30 . ${STAGING_LIBDIR}/perl/config.sh
31 fi
32
33 perl Build.PL --installdirs vendor \
34 --destdir ${D} \
35 --install_path arch="${libdir}/perl" \
36 --install_path script=${bindir} \
37 --install_path bin=${bindir} \
38 --install_path bindoc=${mandir}/man1 \
39 --install_path libdoc=${mandir}/man3
40}
41
42cpan_build_do_compile () {
43 perl Build
44}
45
46cpan_build_do_install () {
47 perl Build install
48}
49
50EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
new file mode 100644
index 0000000000..7181c60d5f
--- /dev/null
+++ b/meta/classes/cross-canadian.bbclass
@@ -0,0 +1,102 @@
1#
2# NOTE - When using this class the user is repsonsible for ensuring that
3# TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH
4# is changed, another nativesdk xxx-canadian-cross can be installed
5#
6
7
8# SDK packages are built either explicitly by the user,
9# or indirectly via dependency. No need to be in 'world'.
10EXCLUDE_FROM_WORLD = "1"
11CLASSOVERRIDE = "class-cross-canadian"
12STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
13
14#
15# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
16#
17PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
18python () {
19 archs = d.getVar('PACKAGE_ARCHS', True).split()
20 sdkarchs = []
21 for arch in archs:
22 sdkarchs.append(arch + '-${SDKPKGSUFFIX}')
23 d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
24}
25MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
26
27INHIBIT_DEFAULT_DEPS = "1"
28
29STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
30
31TOOLCHAIN_OPTIONS = " --sysroot=${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
32
33PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
34PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
35
36HOST_ARCH = "${SDK_ARCH}"
37HOST_VENDOR = "${SDK_VENDOR}"
38HOST_OS = "${SDK_OS}"
39HOST_PREFIX = "${SDK_PREFIX}"
40HOST_CC_ARCH = "${SDK_CC_ARCH}"
41HOST_LD_ARCH = "${SDK_LD_ARCH}"
42HOST_AS_ARCH = "${SDK_AS_ARCH}"
43
44#assign DPKG_ARCH
45DPKG_ARCH = "${SDK_ARCH}"
46
47CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
48CFLAGS = "${BUILDSDK_CFLAGS}"
49CXXFLAGS = "${BUILDSDK_CFLAGS}"
50LDFLAGS = "${BUILDSDK_LDFLAGS} \
51 -Wl,-rpath-link,${STAGING_LIBDIR}/.. \
52 -Wl,-rpath,${libdir}/.. "
53
54DEPENDS_GETTEXT = "gettext-native nativesdk-gettext"
55
56#
57# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
58# binaries
59#
60DEPENDS_append = " chrpath-replacement-native"
61EXTRANATIVEPATH += "chrpath-native"
62
63# Path mangling needed by the cross packaging
64# Note that we use := here to ensure that libdir and includedir are
65# target paths.
66target_libdir := "${libdir}"
67target_includedir := "${includedir}"
68target_base_libdir := "${base_libdir}"
69target_prefix := "${prefix}"
70target_exec_prefix := "${exec_prefix}"
71
72# Change to place files in SDKPATH
73base_prefix = "${SDKPATHNATIVE}"
74prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
75exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
76bindir = "${exec_prefix}/bin/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
77sbindir = "${bindir}"
78base_bindir = "${bindir}"
79base_sbindir = "${bindir}"
80libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
81libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
82
83FILES_${PN} = "${prefix}"
84FILES_${PN}-dbg += "${prefix}/.debug \
85 ${prefix}/bin/.debug \
86 "
87
88export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
89export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
90
91do_populate_sysroot[stamp-extra-info] = ""
92
93USE_NLS = "${SDKUSE_NLS}"
94
95# We have to us TARGET_ARCH but we care about the absolute value
96# and not any particular tune that is enabled.
97TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
98
99# If MLPREFIX is set by multilib code, shlibs
100# points to the wrong place so force it
101SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs"
102SHLIBSWORKDIR = "${PKGDATA_DIR}/nativesdk-shlibs"
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
new file mode 100644
index 0000000000..bd1448965c
--- /dev/null
+++ b/meta/classes/cross.bbclass
@@ -0,0 +1,80 @@
1inherit relocatable
2
3# Cross packages are built indirectly via dependency,
4# no need for them to be a direct target of 'world'
5EXCLUDE_FROM_WORLD = "1"
6
7CLASSOVERRIDE = "class-cross"
8PACKAGES = ""
9PACKAGES_DYNAMIC = ""
10PACKAGES_DYNAMIC_class-native = ""
11
12HOST_ARCH = "${BUILD_ARCH}"
13HOST_VENDOR = "${BUILD_VENDOR}"
14HOST_OS = "${BUILD_OS}"
15HOST_PREFIX = "${BUILD_PREFIX}"
16HOST_CC_ARCH = "${BUILD_CC_ARCH}"
17HOST_LD_ARCH = "${BUILD_LD_ARCH}"
18HOST_AS_ARCH = "${BUILD_AS_ARCH}"
19
20STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
21
22export PKG_CONFIG_DIR = "${STAGING_DIR}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}${libdir}/pkgconfig"
23export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
24
25CPPFLAGS = "${BUILD_CPPFLAGS}"
26CFLAGS = "${BUILD_CFLAGS}"
27CXXFLAGS = "${BUILD_CFLAGS}"
28LDFLAGS = "${BUILD_LDFLAGS}"
29LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE}"
30
31TOOLCHAIN_OPTIONS = ""
32
33DEPENDS_GETTEXT = "gettext-native"
34
35# Path mangling needed by the cross packaging
36# Note that we use := here to ensure that libdir and includedir are
37# target paths.
38target_base_prefix := "${base_prefix}"
39target_prefix := "${prefix}"
40target_exec_prefix := "${exec_prefix}"
41target_base_libdir = "${target_base_prefix}/${baselib}"
42target_libdir = "${target_exec_prefix}/${baselib}"
43target_includedir := "${includedir}"
44
45# Overrides for paths
46CROSS_TARGET_SYS_DIR = "${MULTIMACH_TARGET_SYS}"
47prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
48base_prefix = "${STAGING_DIR_NATIVE}"
49exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
50bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}"
51sbindir = "${bindir}"
52base_bindir = "${bindir}"
53base_sbindir = "${bindir}"
54libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
55libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
56
57do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
58do_populate_sysroot[stamp-extra-info] = ""
59do_packagedata[stamp-extra-info] = ""
60
61python cross_virtclass_handler () {
62 classextend = e.data.getVar('BBCLASSEXTEND', True) or ""
63 if "cross" not in classextend:
64 return
65
66 pn = e.data.getVar("PN", True)
67 if not pn.endswith("-cross"):
68 return
69
70 bb.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-cross", e.data)
71}
72
73addhandler cross_virtclass_handler
74cross_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
75
76do_install () {
77 oe_runmake 'DESTDIR=${D}' install
78}
79
80USE_NLS = "no"
diff --git a/meta/classes/crosssdk.bbclass b/meta/classes/crosssdk.bbclass
new file mode 100644
index 0000000000..261a37465e
--- /dev/null
+++ b/meta/classes/crosssdk.bbclass
@@ -0,0 +1,35 @@
1inherit cross
2
3CLASSOVERRIDE = "class-crosssdk"
4PACKAGE_ARCH = "${SDK_ARCH}"
5python () {
6 # set TUNE_PKGARCH to SDK_ARCH
7 d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH', True))
8}
9
10STAGING_DIR_TARGET = "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}"
11STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
12
13TARGET_ARCH = "${SDK_ARCH}"
14TARGET_VENDOR = "${SDK_VENDOR}"
15TARGET_OS = "${SDK_OS}"
16TARGET_PREFIX = "${SDK_PREFIX}"
17TARGET_CC_ARCH = "${SDK_CC_ARCH}"
18TARGET_LD_ARCH = "${SDK_LD_ARCH}"
19TARGET_AS_ARCH = "${SDK_AS_ARCH}"
20TARGET_FPU = ""
21
22target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
23target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
24target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
25target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
26target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
27baselib = "lib"
28
29do_populate_sysroot[stamp-extra-info] = ""
30do_packagedata[stamp-extra-info] = ""
31
32# Need to force this to ensure consitency accross architectures
33EXTRA_OECONF_FPU = ""
34
35USE_NLS = "no"
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
new file mode 100644
index 0000000000..d7ea151a5d
--- /dev/null
+++ b/meta/classes/debian.bbclass
@@ -0,0 +1,125 @@
1# Debian package renaming only occurs when a package is built
2# We therefore have to make sure we build all runtime packages
3# before building the current package to make the packages runtime
4# depends are correct
5#
6# Custom library package names can be defined setting
7# DEBIANNAME_ + pkgname to the desired name.
8#
9# Better expressed as ensure all RDEPENDS package before we package
10# This means we can't have circular RDEPENDS/RRECOMMENDS
11DEBIANRDEP = "do_packagedata"
12do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
13do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
14do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
15do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
16
17python () {
18 if not d.getVar("PACKAGES", True):
19 d.setVar("DEBIANRDEP", "")
20}
21
22python debian_package_name_hook () {
23 import glob, copy, stat, errno, re
24
25 pkgdest = d.getVar('PKGDEST', True)
26 packages = d.getVar('PACKAGES', True)
27 bin_re = re.compile(".*/s?" + os.path.basename(d.getVar("bindir", True)) + "$")
28 lib_re = re.compile(".*/" + os.path.basename(d.getVar("libdir", True)) + "$")
29 so_re = re.compile("lib.*\.so")
30
31 def socrunch(s):
32 s = s.lower().replace('_', '-')
33 m = re.match("^(.*)(.)\.so\.(.*)$", s)
34 if m is None:
35 return None
36 if m.group(2) in '0123456789':
37 bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
38 else:
39 bin = m.group(1) + m.group(2) + m.group(3)
40 dev = m.group(1) + m.group(2)
41 return (bin, dev)
42
43 def isexec(path):
44 try:
45 s = os.stat(path)
46 except (os.error, AttributeError):
47 return 0
48 return (s[stat.ST_MODE] & stat.S_IEXEC)
49
50 def auto_libname(packages, orig_pkg):
51 sonames = []
52 has_bins = 0
53 has_libs = 0
54 for file in pkgfiles[orig_pkg]:
55 root = os.path.dirname(file)
56 if bin_re.match(root):
57 has_bins = 1
58 if lib_re.match(root):
59 has_libs = 1
60 if so_re.match(os.path.basename(file)):
61 cmd = (d.getVar('TARGET_PREFIX', True) or "") + "objdump -p " + file + " 2>/dev/null"
62 fd = os.popen(cmd)
63 lines = fd.readlines()
64 fd.close()
65 for l in lines:
66 m = re.match("\s+SONAME\s+([^\s]*)", l)
67 if m and not m.group(1) in sonames:
68 sonames.append(m.group(1))
69
70 bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
71 soname = None
72 if len(sonames) == 1:
73 soname = sonames[0]
74 elif len(sonames) > 1:
75 lead = d.getVar('LEAD_SONAME', True)
76 if lead:
77 r = re.compile(lead)
78 filtered = []
79 for s in sonames:
80 if r.match(s):
81 filtered.append(s)
82 if len(filtered) == 1:
83 soname = filtered[0]
84 elif len(filtered) > 1:
85 bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
86 else:
87 bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
88 else:
89 bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
90
91 if has_libs and not has_bins and soname:
92 soname_result = socrunch(soname)
93 if soname_result:
94 (pkgname, devname) = soname_result
95 for pkg in packages.split():
96 if (d.getVar('PKG_' + pkg) or d.getVar('DEBIAN_NOAUTONAME_' + pkg)):
97 continue
98 debian_pn = d.getVar('DEBIANNAME_' + pkg)
99 if debian_pn:
100 newpkg = debian_pn
101 elif pkg == orig_pkg:
102 newpkg = pkgname
103 else:
104 newpkg = pkg.replace(orig_pkg, devname, 1)
105 mlpre=d.getVar('MLPREFIX', True)
106 if mlpre:
107 if not newpkg.find(mlpre) == 0:
108 newpkg = mlpre + newpkg
109 if newpkg != pkg:
110 d.setVar('PKG_' + pkg, newpkg)
111
112 # reversed sort is needed when some package is substring of another
113 # ie in ncurses we get without reverse sort:
114 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
115 # and later
116 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
117 # so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
118 for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS', True) or "").split(), reverse=True):
119 auto_libname(packages, pkg)
120}
121
122EXPORT_FUNCTIONS package_name_hook
123
124DEBIAN_NAMES = "1"
125
diff --git a/meta/classes/deploy.bbclass b/meta/classes/deploy.bbclass
new file mode 100644
index 0000000000..78f5e4a7ba
--- /dev/null
+++ b/meta/classes/deploy.bbclass
@@ -0,0 +1,10 @@
1DEPLOYDIR = "${WORKDIR}/deploy-${PN}"
2SSTATETASKS += "do_deploy"
3do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"
4do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
5
6python do_deploy_setscene () {
7 sstate_setscene(d)
8}
9addtask do_deploy_setscene
10do_deploy[dirs] = "${DEPLOYDIR} ${B}"
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
new file mode 100644
index 0000000000..92edb9ef25
--- /dev/null
+++ b/meta/classes/devshell.bbclass
@@ -0,0 +1,33 @@
1inherit terminal
2
3DEVSHELL = "${SHELL}"
4
5python do_devshell () {
6 if d.getVarFlag("do_devshell", "manualfakeroot"):
7 d.prependVar("DEVSHELL", "pseudo ")
8 fakeenv = d.getVar("FAKEROOTENV", True).split()
9 for f in fakeenv:
10 k = f.split("=")
11 d.setVar(k[0], k[1])
12 d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
13 d.delVarFlag("do_devshell", "fakeroot")
14
15 oe_terminal(d.getVar('DEVSHELL', True), 'OpenEmbedded Developer Shell', d)
16}
17
18addtask devshell after do_patch
19
20do_devshell[dirs] = "${S}"
21do_devshell[nostamp] = "1"
22
23# devshell and fakeroot/pseudo need careful handling since only the final
24# command should run under fakeroot emulation, any X connection should
25# be done as the normal user. We therfore carefully construct the envionment
26# manually
27python () {
28 if d.getVarFlag("do_devshell", "fakeroot"):
29 # We need to signal our code that we want fakeroot however we
30 # can't manipulate the environment and variables here yet (see YOCTO #4795)
31 d.setVarFlag("do_devshell", "manualfakeroot", "1")
32 d.delVarFlag("do_devshell", "fakeroot")
33}
diff --git a/meta/classes/distro_features_check.bbclass b/meta/classes/distro_features_check.bbclass
new file mode 100644
index 0000000000..61b11b7d53
--- /dev/null
+++ b/meta/classes/distro_features_check.bbclass
@@ -0,0 +1,28 @@
1# Allow checking of required and conflicting DISTRO_FEATURES
2#
3# REQUIRED_DISTRO_FEATURES: ensure every item on this list is included
4# in DISTRO_FEATURES.
5# CONFLICT_DISTRO_FEATURES: ensure no item in this list is included in
6# DISTRO_FEATURES.
7#
8# Copyright 2013 (C) O.S. Systems Software LTDA.
9
10python () {
11 required_distro_features = d.getVar('REQUIRED_DISTRO_FEATURES', True)
12 if required_distro_features:
13 required_distro_features = required_distro_features.split()
14 distro_features = (d.getVar('DISTRO_FEATURES', True) or "").split()
15 for f in required_distro_features:
16 if f in distro_features:
17 break
18 else:
19 raise bb.parse.SkipPackage("missing required distro feature %s (not in DISTRO_FEATURES)" % required_distro_features)
20
21 conflict_distro_features = d.getVar('CONFLICT_DISTRO_FEATURES', True)
22 if conflict_distro_features:
23 conflict_distro_features = conflict_distro_features.split()
24 distro_features = (d.getVar('DISTRO_FEATURES', True) or "").split()
25 for f in conflict_distro_features:
26 if f in distro_features:
27 raise bb.parse.SkipPackage("conflicting distro feature %s (in DISTRO_FEATURES)" % conflict_distro_features)
28}
diff --git a/meta/classes/distrodata.bbclass b/meta/classes/distrodata.bbclass
new file mode 100644
index 0000000000..b47358b059
--- /dev/null
+++ b/meta/classes/distrodata.bbclass
@@ -0,0 +1,916 @@
1include conf/distro/include/package_regex.inc
2addhandler distro_eventhandler
3distro_eventhandler[eventmask] = "bb.event.BuildStarted"
4python distro_eventhandler() {
5 import oe.distro_check as dc
6 logfile = dc.create_log_file(e.data, "distrodata.csv")
7 lf = bb.utils.lockfile("%s.lock" % logfile)
8 f = open(logfile, "a")
9 f.write("Package,Description,Owner,License,VerMatch,Version,Upsteam,Reason,Recipe Status,Distro 1,Distro 2,Distro 3\n")
10 f.close()
11 bb.utils.unlockfile(lf)
12
13 return
14}
15
16addtask distrodata_np
17do_distrodata_np[nostamp] = "1"
18python do_distrodata_np() {
19 localdata = bb.data.createCopy(d)
20 pn = d.getVar("PN", True)
21 bb.note("Package Name: %s" % pn)
22
23 import oe.distro_check as dist_check
24 tmpdir = d.getVar('TMPDIR', True)
25 distro_check_dir = os.path.join(tmpdir, "distro_check")
26 datetime = localdata.getVar('DATETIME', True)
27 dist_check.update_distro_data(distro_check_dir, datetime)
28
29 if pn.find("-native") != -1:
30 pnstripped = pn.split("-native")
31 bb.note("Native Split: %s" % pnstripped)
32 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
33 bb.data.update_data(localdata)
34
35 if pn.find("-cross") != -1:
36 pnstripped = pn.split("-cross")
37 bb.note("cross Split: %s" % pnstripped)
38 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
39 bb.data.update_data(localdata)
40
41 if pn.find("-crosssdk") != -1:
42 pnstripped = pn.split("-crosssdk")
43 bb.note("cross Split: %s" % pnstripped)
44 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
45 bb.data.update_data(localdata)
46
47 if pn.startswith("nativesdk-"):
48 pnstripped = pn.replace("nativesdk-", "")
49 bb.note("NativeSDK Split: %s" % pnstripped)
50 localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
51 bb.data.update_data(localdata)
52
53
54 if pn.find("-initial") != -1:
55 pnstripped = pn.split("-initial")
56 bb.note("initial Split: %s" % pnstripped)
57 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
58 bb.data.update_data(localdata)
59
60 """generate package information from .bb file"""
61 pname = localdata.getVar('PN', True)
62 pcurver = localdata.getVar('PV', True)
63 pdesc = localdata.getVar('DESCRIPTION', True)
64 if pdesc is not None:
65 pdesc = pdesc.replace(',','')
66 pdesc = pdesc.replace('\n','')
67
68 pgrp = localdata.getVar('SECTION', True)
69 plicense = localdata.getVar('LICENSE', True).replace(',','_')
70
71 rstatus = localdata.getVar('RECIPE_COLOR', True)
72 if rstatus is not None:
73 rstatus = rstatus.replace(',','')
74
75 pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True)
76 if pcurver == pupver:
77 vermatch="1"
78 else:
79 vermatch="0"
80 noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
81 if noupdate_reason is None:
82 noupdate="0"
83 else:
84 noupdate="1"
85 noupdate_reason = noupdate_reason.replace(',','')
86
87 maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
88 rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True)
89 result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
90
91 bb.note("DISTRO: %s,%s,%s,%s,%s,%s,%s,%s,%s\n" % \
92 (pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus))
93 line = pn
94 for i in result:
95 line = line + "," + i
96 bb.note("%s\n" % line)
97}
98
99addtask distrodata
100do_distrodata[nostamp] = "1"
101python do_distrodata() {
102 logpath = d.getVar('LOG_DIR', True)
103 bb.utils.mkdirhier(logpath)
104 logfile = os.path.join(logpath, "distrodata.csv")
105
106 import oe.distro_check as dist_check
107 localdata = bb.data.createCopy(d)
108 tmpdir = d.getVar('TMPDIR', True)
109 distro_check_dir = os.path.join(tmpdir, "distro_check")
110 datetime = localdata.getVar('DATETIME', True)
111 dist_check.update_distro_data(distro_check_dir, datetime)
112
113 pn = d.getVar("PN", True)
114 bb.note("Package Name: %s" % pn)
115
116 if pn.find("-native") != -1:
117 pnstripped = pn.split("-native")
118 bb.note("Native Split: %s" % pnstripped)
119 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
120 bb.data.update_data(localdata)
121
122 if pn.startswith("nativesdk-"):
123 pnstripped = pn.replace("nativesdk-", "")
124 bb.note("NativeSDK Split: %s" % pnstripped)
125 localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
126 bb.data.update_data(localdata)
127
128 if pn.find("-cross") != -1:
129 pnstripped = pn.split("-cross")
130 bb.note("cross Split: %s" % pnstripped)
131 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
132 bb.data.update_data(localdata)
133
134 if pn.find("-crosssdk") != -1:
135 pnstripped = pn.split("-crosssdk")
136 bb.note("cross Split: %s" % pnstripped)
137 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
138 bb.data.update_data(localdata)
139
140 if pn.find("-initial") != -1:
141 pnstripped = pn.split("-initial")
142 bb.note("initial Split: %s" % pnstripped)
143 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
144 bb.data.update_data(localdata)
145
146 """generate package information from .bb file"""
147 pname = localdata.getVar('PN', True)
148 pcurver = localdata.getVar('PV', True)
149 pdesc = localdata.getVar('DESCRIPTION', True)
150 if pdesc is not None:
151 pdesc = pdesc.replace(',','')
152 pdesc = pdesc.replace('\n','')
153
154 pgrp = localdata.getVar('SECTION', True)
155 plicense = localdata.getVar('LICENSE', True).replace(',','_')
156
157 rstatus = localdata.getVar('RECIPE_COLOR', True)
158 if rstatus is not None:
159 rstatus = rstatus.replace(',','')
160
161 pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True)
162 if pcurver == pupver:
163 vermatch="1"
164 else:
165 vermatch="0"
166
167 noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
168 if noupdate_reason is None:
169 noupdate="0"
170 else:
171 noupdate="1"
172 noupdate_reason = noupdate_reason.replace(',','')
173
174 maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
175 rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True)
176 # do the comparison
177 result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
178
179 lf = bb.utils.lockfile("%s.lock" % logfile)
180 f = open(logfile, "a")
181 f.write("%s,%s,%s,%s,%s,%s,%s,%s,%s" % \
182 (pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus))
183 line = ""
184 for i in result:
185 line = line + "," + i
186 f.write(line + "\n")
187 f.close()
188 bb.utils.unlockfile(lf)
189}
190
191addtask distrodataall after do_distrodata
192do_distrodataall[recrdeptask] = "do_distrodataall do_distrodata"
193do_distrodataall[recideptask] = "do_${BB_DEFAULT_TASK}"
194do_distrodataall[nostamp] = "1"
195do_distrodataall() {
196 :
197}
198
199addhandler checkpkg_eventhandler
200checkpkg_eventhandler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted"
201python checkpkg_eventhandler() {
202 def parse_csv_file(filename):
203 package_dict = {}
204 fd = open(filename, "r")
205 lines = fd.read().rsplit("\n")
206 fd.close()
207
208 first_line = ''
209 index = 0
210 for line in lines:
211 #Skip the first line
212 if index == 0:
213 first_line = line
214 index += 1
215 continue
216 elif line == '':
217 continue
218 index += 1
219 package_name = line.rsplit("\t")[0]
220 if '-native' in package_name or 'nativesdk-' in package_name:
221 original_name = package_name.rsplit('-native')[0]
222 if original_name == '':
223 original_name = package_name.rsplit('nativesdk-')[0]
224 if original_name in package_dict:
225 continue
226 else:
227 package_dict[package_name] = line
228 else:
229 new_name = package_name + "-native"
230 if not(new_name in package_dict):
231 new_name = 'nativesdk-' + package_name
232 if new_name in package_dict:
233 del package_dict[new_name]
234 package_dict[package_name] = line
235
236 fd = open(filename, "w")
237 fd.write("%s\n"%first_line)
238 for el in package_dict:
239 fd.write(package_dict[el] + "\n")
240 fd.close()
241
242 del package_dict
243
244 if bb.event.getName(e) == "BuildStarted":
245 import oe.distro_check as dc
246 logfile = dc.create_log_file(e.data, "checkpkg.csv")
247
248 lf = bb.utils.lockfile("%s.lock" % logfile)
249 f = open(logfile, "a")
250 f.write("Package\tVersion\tUpver\tLicense\tSection\tHome\tRelease\tDepends\tBugTracker\tPE\tDescription\tStatus\tTracking\tURI\tMAINTAINER\tNoUpReason\n")
251 f.close()
252 bb.utils.unlockfile(lf)
253 elif bb.event.getName(e) == "BuildCompleted":
254 import os
255 filename = "tmp/log/checkpkg.csv"
256 if os.path.isfile(filename):
257 lf = bb.utils.lockfile("%s.lock"%filename)
258 parse_csv_file(filename)
259 bb.utils.unlockfile(lf)
260 return
261}
262
263addtask checkpkg
264do_checkpkg[nostamp] = "1"
265python do_checkpkg() {
266 localdata = bb.data.createCopy(d)
267 import re
268 import tempfile
269 import subprocess
270
271 """
272 sanity check to ensure same name and type. Match as many patterns as possible
273 such as:
274 gnome-common-2.20.0.tar.gz (most common format)
275 gtk+-2.90.1.tar.gz
276 xf86-input-synaptics-12.6.9.tar.gz
277 dri2proto-2.3.tar.gz
278 blktool_4.orig.tar.gz
279 libid3tag-0.15.1b.tar.gz
280 unzip552.tar.gz
281 icu4c-3_6-src.tgz
282 genext2fs_1.3.orig.tar.gz
283 gst-fluendo-mp3
284 """
285 prefix1 = "[a-zA-Z][a-zA-Z0-9]*([\-_][a-zA-Z]\w+)*\+?[\-_]" # match most patterns which uses "-" as separator to version digits
286 prefix2 = "[a-zA-Z]+" # a loose pattern such as for unzip552.tar.gz
287 prefix3 = "[0-9]+[\-]?[a-zA-Z]+" # a loose pattern such as for 80325-quicky-0.4.tar.gz
288 prefix = "(%s|%s|%s)" % (prefix1, prefix2, prefix3)
289 ver_regex = "(([A-Z]*\d+[a-zA-Z]*[\.\-_]*)+)"#"((\d+[\.\-_[a-z]])+)"
290 # src.rpm extension was added only for rpm package. Can be removed if the rpm
291 # packaged will always be considered as having to be manually upgraded
292 suffix = "(tar\.gz|tgz|tar\.bz2|tar\.lz4|zip|xz|rpm|bz2|lz4|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
293
294 suffixtuple = ("tar.gz", "tgz", "zip", "tar.bz2", "tar.xz", "tar.lz4", "bz2", "lz4", "orig.tar.gz", "src.tar.gz", "src.rpm", "src.tgz", "svnr\d+.tar.bz2", "stable.tar.gz", "src.rpm")
295 sinterstr = "(?P<name>%s?)v?(?P<ver>%s)(\-source)?" % (prefix, ver_regex)
296 sdirstr = "(?P<name>%s)\.?v?(?P<ver>%s)(\-source)?[\.\-](?P<type>%s$)" % (prefix, ver_regex, suffix)
297
298 def parse_inter(s):
299 m = re.search(sinterstr, s)
300 if not m:
301 return None
302 else:
303 return (m.group('name'), m.group('ver'), "")
304
305 def parse_dir(s):
306 m = re.search(sdirstr, s)
307 if not m:
308 return None
309 else:
310 return (m.group('name'), m.group('ver'), m.group('type'))
311
312 def modelate_version(version):
313 if version[0] in ['.', '-']:
314 if version[1].isdigit():
315 version = version[1] + version[0] + version[2:len(version)]
316 else:
317 version = version[1:len(version)]
318
319 version = re.sub('\-', '.', version)
320 version = re.sub('_', '.', version)
321 version = re.sub('(rc)+', '.-1.', version)
322 version = re.sub('(alpha)+', '.-3.', version)
323 version = re.sub('(beta)+', '.-2.', version)
324 if version[0] == 'v':
325 version = version[1:len(version)]
326 return version
327
328 """
329 Check whether 'new' is newer than 'old' version. We use existing vercmp() for the
330 purpose. PE is cleared in comparison as it's not for build, and PV is cleared too
331 for simplicity as it's somehow difficult to get from various upstream format
332 """
333 def __vercmp(old, new):
334 (on, ov, ot) = old
335 (en, ev, et) = new
336 if on != en or (et and et not in suffixtuple):
337 return False
338 ov = modelate_version(ov)
339 ev = modelate_version(ev)
340
341 result = bb.utils.vercmp(("0", ov, ""), ("0", ev, ""))
342 if result < 0:
343 return True
344 else:
345 return False
346
347 """
348 wrapper for fetch upstream directory info
349 'url' - upstream link customized by regular expression
350 'd' - database
351 'tmpf' - tmpfile for fetcher output
352 We don't want to exit whole build due to one recipe error. So handle all exceptions
353 gracefully w/o leaking to outer.
354 """
355 def internal_fetch_wget(url, d, tmpf):
356 status = "ErrFetchUnknown"
357 """
358 Clear internal url cache as it's a temporary check. Not doing so will have
359 bitbake check url multiple times when looping through a single url
360 """
361 fn = d.getVar('FILE', True)
362 bb.fetch2.urldata_cache[fn] = {}
363
364 """
365 To avoid impacting bitbake build engine, this trick is required for reusing bitbake
366 interfaces. bb.fetch.go() is not appliable as it checks downloaded content in ${DL_DIR}
367 while we don't want to pollute that place. So bb.fetch2.checkstatus() is borrowed here
368 which is designed for check purpose but we override check command for our own purpose
369 """
370 ld = bb.data.createCopy(d)
371 d.setVar('CHECKCOMMAND_wget', "/usr/bin/env wget -t 1 --passive-ftp -O %s --user-agent=\"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12\" '${URI}'" \
372 % tmpf.name)
373 bb.data.update_data(ld)
374
375 try:
376 fetcher = bb.fetch2.Fetch([url], ld)
377 fetcher.checkstatus()
378 status = "SUCC"
379 except bb.fetch2.BBFetchException, e:
380 status = "ErrFetch"
381
382 return status
383
384 """
385 Check on middle version directory such as "2.4/" in "http://xxx/2.4/pkg-2.4.1.tar.gz",
386 'url' - upstream link customized by regular expression
387 'd' - database
388 'curver' - current version
389 Return new version if success, or else error in "Errxxxx" style
390 """
391 def check_new_dir(url, curver, d):
392 pn = d.getVar('PN', True)
393 f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-1-" % pn)
394 status = internal_fetch_wget(url, d, f)
395 fhtml = f.read()
396 if status == "SUCC" and len(fhtml):
397 newver = parse_inter(curver)
398
399 """
400 match "*4.1/">*4.1/ where '*' matches chars
401 N.B. add package name, only match for digits
402 """
403 regex = d.getVar('REGEX', True)
404 if regex == '':
405 regex = "^%s" %prefix
406 m = re.search("^%s" % regex, curver)
407 if m:
408 s = "%s[^\d\"]*?(\d+[\.\-_])+\d+/?" % m.group()
409 else:
410 s = "(\d+[\.\-_])+\d+/?"
411
412 searchstr = "[hH][rR][eE][fF]=\"%s\">" % s
413
414 reg = re.compile(searchstr)
415 valid = 0
416 for line in fhtml.split("\n"):
417 if line.find(curver) >= 0:
418 valid = 1
419 m = reg.search(line)
420 if m:
421 ver = m.group().split("\"")[1]
422 ver = ver.strip("/")
423 ver = parse_inter(ver)
424 if ver and __vercmp(newver, ver) == True:
425 newver = ver
426
427 """Expect a match for curver in directory list, or else it indicates unknown format"""
428 if not valid:
429 status = "ErrParseInterDir"
430 else:
431 """rejoin the path name"""
432 status = newver[0] + newver[1]
433 elif not len(fhtml):
434 status = "ErrHostNoDir"
435
436 f.close()
437 if status != "ErrHostNoDir" and re.match("Err", status):
438 logpath = d.getVar('LOG_DIR', True)
439 subprocess.call("cp %s %s/" % (f.name, logpath), shell=True)
440 os.unlink(f.name)
441 return status
442
443 """
444 Check on the last directory to search '2.4.1' in "http://xxx/2.4/pkg-2.4.1.tar.gz",
445 'url' - upstream link customized by regular expression
446 'd' - database
447 'curname' - current package name
448 Return new version if success, or else error in "Errxxxx" style
449 """
450 def check_new_version(url, curname, d):
451 """possible to have no version in pkg name, such as spectrum-fw"""
452 if not re.search("\d+", curname):
453 return pcurver
454 pn = d.getVar('PN', True)
455 newver_regex = d.getVar('REGEX', True)
456 f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-2-" % pn)
457 status = internal_fetch_wget(url, d, f)
458 fhtml = f.read()
459
460 if status == "SUCC" and len(fhtml):
461 newver = parse_dir(curname)
462
463 if not newver_regex:
464 """this is the default matching pattern, if recipe does not """
465 """provide a regex expression """
466 """match "{PN}-5.21.1.tar.gz">{PN}-5.21.1.tar.gz """
467 pn1 = re.search("^%s" % prefix, curname).group()
468 s = "[^\"]*%s[^\d\"]*?(\d+[\.\-_])+[^\"]*" % pn1
469 searchstr = "[hH][rR][eE][fF]=\"%s\".*[>\"]" % s
470 reg = searchstr
471 else:
472 reg = newver_regex
473 valid = 0
474 count = 0
475 for line in fhtml.split("\n"):
476 if pn == 'kconfig-frontends':
477 m = re.findall(reg, line)
478 if m:
479 valid = 1
480 for match in m:
481 (on, ov, oe) = newver
482 ver = (on, match[0], oe)
483 if ver and __vercmp(newver, ver) == True:
484 newver = ver
485 continue
486 count += 1
487 m = re.search(reg, line)
488 if m:
489 valid = 1
490 if not newver_regex:
491 ver = m.group().split("\"")[1].split("/")[-1]
492 if ver == "download":
493 ver = m.group().split("\"")[1].split("/")[-2]
494 ver = parse_dir(ver)
495 else:
496 """ we cheat a little here, but we assume that the
497 regular expression in the recipe will extract exacly
498 the version """
499 (on, ov, oe) = newver
500 ver = (on, m.group('pver'), oe)
501 if ver and __vercmp(newver, ver) == True:
502 newver = ver
503 """Expect a match for curver in directory list, or else it indicates unknown format"""
504 if not valid:
505 status = "ErrParseDir"
506 else:
507 """newver still contains a full package name string"""
508 status = re.sub('_', '.', newver[1])
509 elif not len(fhtml):
510 status = "ErrHostNoDir"
511
512 f.close()
513 """if host hasn't directory information, no need to save tmp file"""
514 if status != "ErrHostNoDir" and re.match("Err", status):
515 logpath = d.getVar('LOG_DIR', True)
516 subprocess.call("cp %s %s/" % (f.name, logpath), shell=True)
517 os.unlink(f.name)
518 return status
519
520 """first check whether a uri is provided"""
521 src_uri = d.getVar('SRC_URI', True)
522 if not src_uri:
523 return
524
525 """initialize log files."""
526 logpath = d.getVar('LOG_DIR', True)
527 bb.utils.mkdirhier(logpath)
528 logfile = os.path.join(logpath, "checkpkg.csv")
529
530 """generate package information from .bb file"""
531 pname = d.getVar('PN', True)
532
533 if pname.find("-native") != -1:
534 if d.getVar('BBCLASSEXTEND', True):
535 return
536 pnstripped = pname.split("-native")
537 bb.note("Native Split: %s" % pnstripped)
538 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
539 bb.data.update_data(localdata)
540
541 if pname.startswith("nativesdk-"):
542 if d.getVar('BBCLASSEXTEND', True):
543 return
544 pnstripped = pname.replace("nativesdk-", "")
545 bb.note("NativeSDK Split: %s" % pnstripped)
546 localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
547 bb.data.update_data(localdata)
548
549 if pname.find("-cross") != -1:
550 pnstripped = pname.split("-cross")
551 bb.note("cross Split: %s" % pnstripped)
552 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
553 bb.data.update_data(localdata)
554
555 if pname.find("-initial") != -1:
556 pnstripped = pname.split("-initial")
557 bb.note("initial Split: %s" % pnstripped)
558 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
559 bb.data.update_data(localdata)
560
561 chk_uri = d.getVar('REGEX_URI', True)
562 if not chk_uri:
563 chk_uri = src_uri
564 pdesc = localdata.getVar('DESCRIPTION', True)
565 pgrp = localdata.getVar('SECTION', True)
566 if localdata.getVar('PRSPV', True):
567 pversion = localdata.getVar('PRSPV', True)
568 else:
569 pversion = localdata.getVar('PV', True)
570 plicense = localdata.getVar('LICENSE', True)
571 psection = localdata.getVar('SECTION', True)
572 phome = localdata.getVar('HOMEPAGE', True)
573 prelease = localdata.getVar('PR', True)
574 pdepends = localdata.getVar('DEPENDS', True)
575 pbugtracker = localdata.getVar('BUGTRACKER', True)
576 ppe = localdata.getVar('PE', True)
577 psrcuri = localdata.getVar('SRC_URI', True)
578 maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
579
580 found = 0
581 for uri in src_uri.split():
582 m = re.compile('(?P<type>[^:]*)').match(uri)
583 if not m:
584 raise MalformedUrl(uri)
585 elif m.group('type') in ('http', 'https', 'ftp', 'cvs', 'svn', 'git'):
586 found = 1
587 pproto = m.group('type')
588 break
589 if not found:
590 pproto = "file"
591 pupver = "N/A"
592 pstatus = "ErrUnknown"
593
594 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(uri)
595 if type in ['http', 'https', 'ftp']:
596 if d.getVar('PRSPV', True):
597 pcurver = d.getVar('PRSPV', True)
598 else:
599 pcurver = d.getVar('PV', True)
600 else:
601 if d.getVar('PRSPV', True):
602 pcurver = d.getVar('PRSPV', True)
603 else:
604 pcurver = d.getVar("SRCREV", True)
605
606
607 if type in ['http', 'https', 'ftp']:
608 newver = pcurver
609 altpath = path
610 dirver = "-"
611 curname = "-"
612
613 """
614 match version number amid the path, such as "5.7" in:
615 http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz
616 N.B. how about sth. like "../5.7/5.8/..."? Not find such example so far :-P
617 """
618 m = re.search(r"[^/]*(\d+\.)+\d+([\-_]r\d+)*/", path)
619 if m:
620 altpath = path.split(m.group())[0]
621 dirver = m.group().strip("/")
622
623 """use new path and remove param. for wget only param is md5sum"""
624 alturi = bb.fetch.encodeurl([type, host, altpath, user, pswd, {}])
625 my_uri = d.getVar('REGEX_URI', True)
626 if my_uri:
627 if d.getVar('PRSPV', True):
628 newver = d.getVar('PRSPV', True)
629 else:
630 newver = d.getVar('PV', True)
631 else:
632 newver = check_new_dir(alturi, dirver, d)
633 altpath = path
634 if not re.match("Err", newver) and dirver != newver:
635 altpath = altpath.replace(dirver, newver, True)
636 # For folder in folder cases - try to enter the folder again and then try parsing
637 """Now try to acquire all remote files in current directory"""
638 if not re.match("Err", newver):
639 curname = altpath.split("/")[-1]
640
641 """get remote name by skipping pacakge name"""
642 m = re.search(r"/.*/", altpath)
643 if not m:
644 altpath = "/"
645 else:
646 altpath = m.group()
647
648 chk_uri = d.getVar('REGEX_URI', True)
649 if not chk_uri:
650 alturi = bb.fetch.encodeurl([type, host, altpath, user, pswd, {}])
651 else:
652 alturi = chk_uri
653 newver = check_new_version(alturi, curname, d)
654 while(newver == "ErrHostNoDir"):
655 if alturi == "/download":
656 break
657 else:
658 alturi = "/".join(alturi.split("/")[0:-2]) + "/download"
659 newver = check_new_version(alturi, curname, d)
660 if not re.match("Err", newver):
661 pupver = newver
662 if pupver != pcurver:
663 pstatus = "UPDATE"
664 else:
665 pstatus = "MATCH"
666
667 if re.match("Err", newver):
668 pstatus = newver + ":" + altpath + ":" + dirver + ":" + curname
669 elif type == 'git':
670 if user:
671 gituser = user + '@'
672 else:
673 gituser = ""
674
675 if 'protocol' in parm:
676 gitproto = parm['protocol']
677 else:
678 gitproto = "git"
679
680 # Get all tags and HEAD
681 if d.getVar('GIT_REGEX', True):
682 gitcmd = "git ls-remote %s://%s%s%s %s 2>&1" % (gitproto, gituser, host, path, d.getVar('GIT_REGEX', True))
683 else:
684 gitcmd = "git ls-remote %s://%s%s%s *tag* 2>&1" % (gitproto, gituser, host, path)
685 gitcmd2 = "git ls-remote %s://%s%s%s HEAD 2>&1" % (gitproto, gituser, host, path)
686
687 tmp = os.popen(gitcmd).read()
688 if 'unable to connect' in tmp:
689 tmp = None
690 tmp2 = os.popen(gitcmd2).read()
691 if 'unable to connect' in tmp2:
692 tmp2 = None
693 #This is for those repos have tag like: refs/tags/1.2.2
694 phash = pversion.rsplit("+")[-1]
695 if tmp:
696 tmpline = tmp.split("\n")
697 verflag = 0
698 pupver = pversion
699 for line in tmpline:
700 if len(line)==0:
701 break;
702 puptag = line.split("/")[-1]
703 upstr_regex = d.getVar('REGEX', True)
704 if upstr_regex:
705 puptag = re.search(upstr_regex, puptag)
706 else:
707 puptag = re.search("(?P<pver>([0-9][\.|_]?)+)", puptag)
708 if puptag == None:
709 continue
710 puptag = puptag.group('pver')
711 puptag = re.sub("_",".",puptag)
712 plocaltag = pupver.split("+git")[0]
713 if "git" in plocaltag:
714 plocaltag = plocaltag.split("-")[0]
715 result = bb.utils.vercmp(("0", puptag, ""), ("0", plocaltag, ""))
716
717 if result > 0:
718 verflag = 1
719 pupver = puptag
720 elif verflag == 0 :
721 pupver = plocaltag
722 #This is for those no tag repo
723 elif tmp2:
724 pupver = pversion.rsplit("+")[0]
725 phash = pupver
726 else:
727 pstatus = "ErrGitAccess"
728 if not ('ErrGitAccess' in pstatus):
729
730 latest_head = tmp2.rsplit("\t")[0][:7]
731 tmp3 = re.search('(?P<git_ver>(\d+[\.-]?)+)(?P<git_prefix>(\+git[r|\-|]?)AUTOINC\+)(?P<head_md5>([\w|_]+))', pversion)
732 tmp4 = re.search('(?P<git_ver>(\d+[\.-]?)+)(?P<git_prefix>(\+git[r|\-|]?)AUTOINC\+)(?P<head_md5>([\w|_]+))', pupver)
733 if not tmp4:
734 tmp4 = re.search('(?P<git_ver>(\d+[\.-]?)+)', pupver)
735
736 if tmp3:
737 # Get status of the package - MATCH/UPDATE
738 result = bb.utils.vercmp(("0", tmp3.group('git_ver'), ""), ("0",tmp3.group('git_ver') , ""))
739 # Get the latest tag
740 pstatus = 'MATCH'
741 if result < 0:
742 latest_pv = tmp3.group('git_ver')
743 else:
744 latest_pv = pupver
745 if not(tmp3.group('head_md5')[:7] in latest_head) or not(latest_head in tmp3.group('head_md5')[:7]):
746 pstatus = 'UPDATE'
747
748 git_prefix = tmp3.group('git_prefix')
749 pupver = latest_pv + tmp3.group('git_prefix') + latest_head
750 else:
751 if not tmp3:
752 bb.plain("#DEBUG# Package %s: current version (%s) doesn't match the usual pattern" %(pname, pversion))
753 elif type == 'svn':
754 ud = bb.fetch2.FetchData(uri, d)
755
756 svnFetcher = bb.fetch2.svn.Svn(d)
757 svnFetcher.urldata_init(ud, d)
758 try:
759 pupver = svnFetcher.latest_revision(ud, d, ud.names[0])
760 except bb.fetch2.FetchError:
761 pstatus = "ErrSvnAccess"
762
763 if pupver:
764 if pupver in pversion:
765 pstatus = "MATCH"
766 else:
767 pstatus = "UPDATE"
768 else:
769 pstatus = "ErrSvnAccess"
770
771 if 'rev' in ud.parm:
772 pcurver = ud.parm['rev']
773
774 if pstatus != "ErrSvnAccess":
775 tag = pversion.rsplit("+svn")[0]
776 svn_prefix = re.search('(\+svn[r|\-]?)', pversion)
777 if tag and svn_prefix:
778 pupver = tag + svn_prefix.group() + pupver
779
780 elif type == 'cvs':
781 pupver = "HEAD"
782 pstatus = "UPDATE"
783 elif type == 'file':
784 """local file is always up-to-date"""
785 pupver = pcurver
786 pstatus = "MATCH"
787 else:
788 pstatus = "ErrUnsupportedProto"
789
790 if re.match("Err", pstatus):
791 pstatus += ":%s%s" % (host, path)
792
793 """Read from manual distro tracking fields as alternative"""
794 pmver = d.getVar("RECIPE_UPSTREAM_VERSION", True)
795 if not pmver:
796 pmver = "N/A"
797 pmstatus = "ErrNoRecipeData"
798 else:
799 if pmver == pcurver:
800 pmstatus = "MATCH"
801 else:
802 pmstatus = "UPDATE"
803
804 psrcuri = psrcuri.split()[0]
805 pdepends = "".join(pdepends.split("\t"))
806 pdesc = "".join(pdesc.split("\t"))
807 no_upgr_reason = d.getVar('RECIPE_NO_UPDATE_REASON', True)
808 lf = bb.utils.lockfile("%s.lock" % logfile)
809 f = open(logfile, "a")
810 f.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % \
811 (pname,pversion,pupver,plicense,psection, phome,prelease, pdepends,pbugtracker,ppe,pdesc,pstatus,pmver,psrcuri,maintainer, no_upgr_reason))
812 f.close()
813 bb.utils.unlockfile(lf)
814}
815
816addtask checkpkgall after do_checkpkg
817do_checkpkgall[recrdeptask] = "do_checkpkgall do_checkpkg"
818do_checkpkgall[recideptask] = "do_${BB_DEFAULT_TASK}"
819do_checkpkgall[nostamp] = "1"
820do_checkpkgall() {
821 :
822}
823
824addhandler distro_check_eventhandler
825distro_check_eventhandler[eventmask] = "bb.event.BuildStarted"
826python distro_check_eventhandler() {
827 """initialize log files."""
828 import oe.distro_check as dc
829 result_file = dc.create_log_file(e.data, "distrocheck.csv")
830 return
831}
832
833addtask distro_check
834do_distro_check[nostamp] = "1"
835python do_distro_check() {
836 """checks if the package is present in other public Linux distros"""
837 import oe.distro_check as dc
838 import shutil
839 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk',d):
840 return
841
842 localdata = bb.data.createCopy(d)
843 bb.data.update_data(localdata)
844 tmpdir = d.getVar('TMPDIR', True)
845 distro_check_dir = os.path.join(tmpdir, "distro_check")
846 logpath = d.getVar('LOG_DIR', True)
847 bb.utils.mkdirhier(logpath)
848 result_file = os.path.join(logpath, "distrocheck.csv")
849 datetime = localdata.getVar('DATETIME', True)
850 dc.update_distro_data(distro_check_dir, datetime)
851
852 # do the comparison
853 result = dc.compare_in_distro_packages_list(distro_check_dir, d)
854
855 # save the results
856 dc.save_distro_check_result(result, datetime, result_file, d)
857}
858
859addtask distro_checkall after do_distro_check
860do_distro_checkall[recrdeptask] = "do_distro_checkall do_distro_check"
861do_distro_checkall[recideptask] = "do_${BB_DEFAULT_TASK}"
862do_distro_checkall[nostamp] = "1"
863do_distro_checkall() {
864 :
865}
866#
867#Check Missing License Text.
868#Use this task to generate the missing license text data for pkg-report system,
869#then we can search those recipes which license text isn't exsit in common-licenses directory
870#
871addhandler checklicense_eventhandler
872checklicense_eventhandler[eventmask] = "bb.event.BuildStarted"
873python checklicense_eventhandler() {
874 """initialize log files."""
875 import oe.distro_check as dc
876 logfile = dc.create_log_file(e.data, "missinglicense.csv")
877 lf = bb.utils.lockfile("%s.lock" % logfile)
878 f = open(logfile, "a")
879 f.write("Package\tLicense\tMissingLicense\n")
880 f.close()
881 bb.utils.unlockfile(lf)
882 return
883}
884
885addtask checklicense
886do_checklicense[nostamp] = "1"
887python do_checklicense() {
888 import shutil
889 logpath = d.getVar('LOG_DIR', True)
890 bb.utils.mkdirhier(logpath)
891 pn = d.getVar('PN', True)
892 logfile = os.path.join(logpath, "missinglicense.csv")
893 generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
894 license_types = d.getVar('LICENSE', True)
895 for license_type in ((license_types.replace('+', '').replace('|', '&')
896 .replace('(', '').replace(')', '').replace(';', '')
897 .replace(',', '').replace(" ", "").split("&"))):
898 if not os.path.isfile(os.path.join(generic_directory, license_type)):
899 lf = bb.utils.lockfile("%s.lock" % logfile)
900 f = open(logfile, "a")
901 f.write("%s\t%s\t%s\n" % \
902 (pn,license_types,license_type))
903 f.close()
904 bb.utils.unlockfile(lf)
905 return
906}
907
908addtask checklicenseall after do_checklicense
909do_checklicenseall[recrdeptask] = "do_checklicenseall do_checklicense"
910do_checklicenseall[recideptask] = "do_${BB_DEFAULT_TASK}"
911do_checklicenseall[nostamp] = "1"
912do_checklicenseall() {
913 :
914}
915
916
diff --git a/meta/classes/distutils-base.bbclass b/meta/classes/distutils-base.bbclass
new file mode 100644
index 0000000000..aa18e8b292
--- /dev/null
+++ b/meta/classes/distutils-base.bbclass
@@ -0,0 +1,4 @@
1DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}"
2RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
3
4inherit distutils-common-base pythonnative
diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/distutils-common-base.bbclass
new file mode 100644
index 0000000000..9a608eb63e
--- /dev/null
+++ b/meta/classes/distutils-common-base.bbclass
@@ -0,0 +1,24 @@
1inherit python-dir
2
3EXTRA_OEMAKE = ""
4
5export STAGING_INCDIR
6export STAGING_LIBDIR
7
8PACKAGES = "${PN}-staticdev ${PN}-dev ${PN}-dbg ${PN}-doc ${PN}"
9
10FILES_${PN} = "${bindir}/* ${libdir}/* ${libdir}/${PYTHON_DIR}/*"
11
12FILES_${PN}-staticdev += "\
13 ${PYTHON_SITEPACKAGES_DIR}/*.a \
14"
15FILES_${PN}-dev += "\
16 ${datadir}/pkgconfig \
17 ${libdir}/pkgconfig \
18 ${PYTHON_SITEPACKAGES_DIR}/*.la \
19"
20FILES_${PN}-dbg += "\
21 ${PYTHON_SITEPACKAGES_DIR}/.debug \
22 ${PYTHON_SITEPACKAGES_DIR}/*/.debug \
23 ${PYTHON_SITEPACKAGES_DIR}/*/*/.debug \
24"
diff --git a/meta/classes/distutils-native-base.bbclass b/meta/classes/distutils-native-base.bbclass
new file mode 100644
index 0000000000..509cb9551a
--- /dev/null
+++ b/meta/classes/distutils-native-base.bbclass
@@ -0,0 +1,3 @@
1inherit distutils-common-base
2
3DEPENDS += "${@["${PYTHON_PN}-native", ""][(d.getVar('PACKAGES', True) == '')]}"
diff --git a/meta/classes/distutils-tools.bbclass b/meta/classes/distutils-tools.bbclass
new file mode 100644
index 0000000000..f43450e56f
--- /dev/null
+++ b/meta/classes/distutils-tools.bbclass
@@ -0,0 +1,77 @@
1DISTUTILS_BUILD_ARGS ?= ""
2DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
3DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
4 --install-data=${STAGING_DATADIR}"
5DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
6 --install-data=${D}/${datadir}"
7
8distutils_do_compile() {
9 STAGING_INCDIR=${STAGING_INCDIR} \
10 STAGING_LIBDIR=${STAGING_LIBDIR} \
11 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
12 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
13 bbfatal "${PYTHON_PN} setup.py build_ext execution failed."
14}
15
16distutils_stage_headers() {
17 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
18 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
19 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
20 bbfatal "${PYTHON_PN} setup.py install_headers execution failed."
21}
22
23distutils_stage_all() {
24 STAGING_INCDIR=${STAGING_INCDIR} \
25 STAGING_LIBDIR=${STAGING_LIBDIR} \
26 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
27 PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
28 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
29 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
30 bbfatal "${PYTHON_PN} setup.py install (stage) execution failed."
31}
32
33distutils_do_install() {
34 echo "Beginning ${PN} Install ..."
35 install -d ${D}${PYTHON_SITEPACKAGES_DIR}
36 echo "Step 2 of ${PN} Install ..."
37 STAGING_INCDIR=${STAGING_INCDIR} \
38 STAGING_LIBDIR=${STAGING_LIBDIR} \
39 PYTHONPATH=${D}/${PYTHON_SITEPACKAGES_DIR} \
40 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
41 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
42 bbfatal "${PYTHON_PN} setup.py install execution failed."
43
44 echo "Step 3 of ${PN} Install ..."
45 # support filenames with *spaces*
46 find ${D} -name "*.py" -print0 | while read -d $'\0' i ; do \
47 sed -i -e s:${D}::g $i
48 done
49
50 echo "Step 4 of ${PN} Install ..."
51 if test -e ${D}${bindir} ; then
52 for i in ${D}${bindir}/* ; do \
53 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
54 done
55 fi
56
57 echo "Step 4 of ${PN} Install ..."
58 if test -e ${D}${sbindir}; then
59 for i in ${D}${sbindir}/* ; do \
60 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
61 done
62 fi
63
64 echo "Step 5 of ${PN} Install ..."
65 rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
66
67 #
68 # FIXME: Bandaid against wrong datadir computation
69 #
70 if test -e ${D}${datadir}/share; then
71 mv -f ${D}${datadir}/share/* ${D}${datadir}/
72 fi
73}
74
75#EXPORT_FUNCTIONS do_compile do_install
76
77export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/distutils.bbclass b/meta/classes/distutils.bbclass
new file mode 100644
index 0000000000..f3da023942
--- /dev/null
+++ b/meta/classes/distutils.bbclass
@@ -0,0 +1,81 @@
1inherit distutils-base
2
3DISTUTILS_BUILD_ARGS ?= ""
4DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
5DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
6 --install-data=${STAGING_DATADIR}"
7DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
8 --install-data=${D}/${datadir}"
9
10distutils_do_compile() {
11 STAGING_INCDIR=${STAGING_INCDIR} \
12 STAGING_LIBDIR=${STAGING_LIBDIR} \
13 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
14 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
15 bbfatal "${PYTHON_PN} setup.py build_ext execution failed."
16}
17
18distutils_stage_headers() {
19 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
20 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
21 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
22 bbfatal "${PYTHON_PN} setup.py install_headers execution failed."
23}
24
25distutils_stage_all() {
26 STAGING_INCDIR=${STAGING_INCDIR} \
27 STAGING_LIBDIR=${STAGING_LIBDIR} \
28 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
29 PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
30 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
31 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
32 bbfatal "${PYTHON_PN} setup.py install (stage) execution failed."
33}
34
35distutils_do_install() {
36 install -d ${D}${PYTHON_SITEPACKAGES_DIR}
37 STAGING_INCDIR=${STAGING_INCDIR} \
38 STAGING_LIBDIR=${STAGING_LIBDIR} \
39 PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
40 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
41 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
42 bbfatal "${PYTHON_PN} setup.py install execution failed."
43
44 # support filenames with *spaces*
45 find ${D} -name "*.py" -print0 | while read -d $'\0' i ; do \
46 sed -i -e s:${D}::g "$i"
47 done
48
49 if test -e ${D}${bindir} ; then
50 for i in ${D}${bindir}/* ; do \
51 if [ ${PN} != "${BPN}-native" ]; then
52 sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i
53 fi
54 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
55 done
56 fi
57
58 if test -e ${D}${sbindir}; then
59 for i in ${D}${sbindir}/* ; do \
60 if [ ${PN} != "${BPN}-native" ]; then
61 sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i
62 fi
63 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
64 done
65 fi
66
67 rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
68 rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/site.py*
69
70 #
71 # FIXME: Bandaid against wrong datadir computation
72 #
73 if test -e ${D}${datadir}/share; then
74 mv -f ${D}${datadir}/share/* ${D}${datadir}/
75 rmdir ${D}${datadir}/share
76 fi
77}
78
79EXPORT_FUNCTIONS do_compile do_install
80
81export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/distutils3-base.bbclass b/meta/classes/distutils3-base.bbclass
new file mode 100644
index 0000000000..d4d25dccb9
--- /dev/null
+++ b/meta/classes/distutils3-base.bbclass
@@ -0,0 +1,8 @@
1DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}"
2RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
3
4PYTHON_BASEVERSION = "3.3"
5PYTHON_ABI = "m"
6
7inherit distutils-common-base python3native
8
diff --git a/meta/classes/distutils3-native-base.bbclass b/meta/classes/distutils3-native-base.bbclass
new file mode 100644
index 0000000000..ed3fe54587
--- /dev/null
+++ b/meta/classes/distutils3-native-base.bbclass
@@ -0,0 +1,4 @@
1PYTHON_BASEVERSION = "3.3"
2PYTHON_ABI = "m"
3
4inherit distutils-native-base
diff --git a/meta/classes/distutils3.bbclass b/meta/classes/distutils3.bbclass
new file mode 100644
index 0000000000..bbd645cc63
--- /dev/null
+++ b/meta/classes/distutils3.bbclass
@@ -0,0 +1,98 @@
1inherit distutils3-base
2
3DISTUTILS_BUILD_ARGS ?= ""
4DISTUTILS_BUILD_EXT_ARGS ?= ""
5DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
6DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
7 --install-data=${STAGING_DATADIR}"
8DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
9 --install-data=${D}/${datadir}"
10
11distutils3_do_compile() {
12 if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
13 SYS=${MACHINE}
14 else
15 SYS=${HOST_SYS}
16 fi
17 STAGING_INCDIR=${STAGING_INCDIR} \
18 STAGING_LIBDIR=${STAGING_LIBDIR} \
19 BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
20 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
21 build ${DISTUTILS_BUILD_ARGS} || \
22 bbfatal "${PYTHON_PN} setup.py build_ext execution failed."
23}
24
25distutils3_stage_headers() {
26 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
27 if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
28 SYS=${MACHINE}
29 else
30 SYS=${HOST_SYS}
31 fi
32 BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
33 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
34 bbfatal "${PYTHON_PN} setup.py install_headers execution failed."
35}
36
37distutils3_stage_all() {
38 if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
39 SYS=${MACHINE}
40 else
41 SYS=${HOST_SYS}
42 fi
43 STAGING_INCDIR=${STAGING_INCDIR} \
44 STAGING_LIBDIR=${STAGING_LIBDIR} \
45 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
46 PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
47 BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
48 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
49 bbfatal "${PYTHON_PN} setup.py install (stage) execution failed."
50}
51
52distutils3_do_install() {
53 install -d ${D}${PYTHON_SITEPACKAGES_DIR}
54 if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
55 SYS=${MACHINE}
56 else
57 SYS=${HOST_SYS}
58 fi
59 STAGING_INCDIR=${STAGING_INCDIR} \
60 STAGING_LIBDIR=${STAGING_LIBDIR} \
61 PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
62 BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
63 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
64 bbfatal "${PYTHON_PN} setup.py install execution failed."
65
66 # support filenames with *spaces*
67 find ${D} -name "*.py" -print0 | while read -d $'\0' i ; do \
68 sed -i -e s:${D}::g "$i"
69 done
70
71 if test -e ${D}${bindir} ; then
72 for i in ${D}${bindir}/* ; do \
73 sed -i -e s:${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}:${bindir}/env\ ${PYTHON_PN}:g $i
74 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
75 done
76 fi
77
78 if test -e ${D}${sbindir}; then
79 for i in ${D}${sbindir}/* ; do \
80 sed -i -e s:${STAGING_BINDIR_NATIVE}/python-${PYTHON_PN}/${PYTHON_PN}:${bindir}/env\ ${PYTHON_PN}:g $i
81 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
82 done
83 fi
84
85 rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
86
87 #
88 # FIXME: Bandaid against wrong datadir computation
89 #
90 if test -e ${D}${datadir}/share; then
91 mv -f ${D}${datadir}/share/* ${D}${datadir}/
92 rmdir ${D}${datadir}/share
93 fi
94}
95
96EXPORT_FUNCTIONS do_compile do_install
97
98export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
new file mode 100644
index 0000000000..2ac62747a2
--- /dev/null
+++ b/meta/classes/externalsrc.bbclass
@@ -0,0 +1,53 @@
1# Copyright (C) 2012 Linux Foundation
2# Author: Richard Purdie
3# Some code and influence taken from srctree.bbclass:
4# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
5# Released under the MIT license (see COPYING.MIT for the terms)
6#
7# externalsrc.bbclass enables use of an existing source tree, usually external to
8# the build system to build a piece of software rather than the usual fetch/unpack/patch
9# process.
10#
11# To use, add externalsrc to the global inherit and set EXTERNALSRC to point at the
12# directory you want to use containing the sources e.g. from local.conf for a recipe
13# called "myrecipe" you would do:
14#
15# INHERIT += "externalsrc"
16# EXTERNALSRC_pn-myrecipe = "/path/to/my/source/tree"
17#
18# In order to make this class work for both target and native versions (or with
19# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
20# directory under the work directory (split source and build directories). This is
21# the default, but the build directory can be set to the source directory if
22# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
23#
24# EXTERNALSRC_BUILD_pn-myrecipe = "/path/to/my/source/tree"
25#
26
27SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
28
29python () {
30 externalsrc = d.getVar('EXTERNALSRC', True)
31 if externalsrc:
32 d.setVar('S', externalsrc)
33 externalsrcbuild = d.getVar('EXTERNALSRC_BUILD', True)
34 if externalsrcbuild:
35 d.setVar('B', externalsrcbuild)
36 else:
37 d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
38 d.setVar('SRC_URI', '')
39
40 tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
41
42 for task in tasks:
43 if task.endswith("_setscene"):
44 # sstate is never going to work for external source trees, disable it
45 bb.build.deltask(task, d)
46 else:
47 # Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
48 d.appendVarFlag(task, "lockfiles", "${S}/singletask.lock")
49
50 for task in d.getVar("SRCTREECOVEREDTASKS", True).split():
51 bb.build.deltask(task, d)
52}
53
diff --git a/meta/classes/extrausers.bbclass b/meta/classes/extrausers.bbclass
new file mode 100644
index 0000000000..faf57b108e
--- /dev/null
+++ b/meta/classes/extrausers.bbclass
@@ -0,0 +1,65 @@
1# This bbclass is mainly used for image level user/group configuration.
2# Inherit this class if you want to make EXTRA_USERS_PARAMS effective.
3
4# Below is an example showing how to use this functionality.
5# INHERIT += "extrausers"
6# EXTRA_USERS_PARAMS = "\
7# useradd -p '' tester; \
8# groupadd developers; \
9# userdel nobody; \
10# groupdel -g video; \
11# groupmod -g 1020 developers; \
12# usermod -s /bin/sh tester; \
13# "
14
15
16inherit useradd_base
17
18IMAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS', True))]}"
19
20# Image level user / group settings
21ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group;"
22
23# Image level user / group settings
24set_user_group () {
25 user_group_settings="${EXTRA_USERS_PARAMS}"
26 export PSEUDO="${FAKEROOTENV} ${STAGING_DIR_NATIVE}${bindir}/pseudo"
27 setting=`echo $user_group_settings | cut -d ';' -f1`
28 remaining=`echo $user_group_settings | cut -d ';' -f2-`
29 while test "x$setting" != "x"; do
30 cmd=`echo $setting | cut -d ' ' -f1`
31 opts=`echo $setting | cut -d ' ' -f2-`
32 # Different from useradd.bbclass, there's no file locking issue here, as
33 # this setting is actually a serial process. So we only retry once.
34 case $cmd in
35 useradd)
36 perform_useradd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
37 ;;
38 groupadd)
39 perform_groupadd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
40 ;;
41 userdel)
42 perform_userdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
43 ;;
44 groupdel)
45 perform_groupdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
46 ;;
47 usermod)
48 perform_usermod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
49 ;;
50 groupmod)
51 perform_groupmod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
52 ;;
53 *)
54 bbfatal "Invalid command in EXTRA_USERS_PARAMS: $cmd"
55 ;;
56 esac
57 # Avoid infinite loop if the last parameter doesn't end with ';'
58 if [ "$setting" = "$remaining" ]; then
59 break
60 fi
61 # iterate to the next setting
62 setting=`echo $remaining | cut -d ';' -f1`
63 remaining=`echo $remaining | cut -d ';' -f2-`
64 done
65}
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
new file mode 100644
index 0000000000..afd3fd2252
--- /dev/null
+++ b/meta/classes/fontcache.bbclass
@@ -0,0 +1,40 @@
1#
2# This class will generate the proper postinst/postrm scriptlets for font
3# packages.
4#
5
6DEPENDS += "qemu-native"
7inherit qemu
8
9FONT_PACKAGES ??= "${PN}"
10FONT_EXTRA_RDEPENDS ?= "fontconfig-utils"
11
12fontcache_common() {
13if [ "x$D" != "x" ] ; then
14 $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} bindir=${bindir} \
15 libdir=${libdir} base_libdir=${base_libdir}
16else
17 fc-cache
18fi
19}
20
21python populate_packages_append() {
22 font_pkgs = d.getVar('FONT_PACKAGES', True).split()
23 deps = d.getVar("FONT_EXTRA_RDEPENDS", True)
24
25 for pkg in font_pkgs:
26 if deps: d.appendVar('RDEPENDS_' + pkg, ' '+deps)
27
28 bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
29 postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
30 if not postinst:
31 postinst = '#!/bin/sh\n'
32 postinst += d.getVar('fontcache_common', True)
33 d.setVar('pkg_postinst_%s' % pkg, postinst)
34
35 postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
36 if not postrm:
37 postrm = '#!/bin/sh\n'
38 postrm += d.getVar('fontcache_common', True)
39 d.setVar('pkg_postrm_%s' % pkg, postrm)
40}
diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass
new file mode 100644
index 0000000000..e9076b2779
--- /dev/null
+++ b/meta/classes/gconf.bbclass
@@ -0,0 +1,70 @@
1DEPENDS += "gconf gconf-native"
2
3# These are for when gconftool is used natively and the prefix isn't necessarily
4# the sysroot. TODO: replicate the postinst logic for -native packages going
5# into sysroot as they won't be running their own install-time schema
6# registration (disabled below) nor the postinst script (as they don't happen).
7export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults"
8export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2"
9
10# Disable install-time schema registration as we're a packaging system so this
11# happens in the postinst script, not at install time. Set both the configure
12# script option and the traditional envionment variable just to make sure.
13EXTRA_OECONF += "--disable-schemas-install"
14export GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL = "1"
15
16gconf_postinst() {
17if [ "x$D" != "x" ]; then
18 export GCONF_CONFIG_SOURCE="xml::$D${sysconfdir}/gconf/gconf.xml.defaults"
19else
20 export GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source`
21fi
22
23SCHEMA_LOCATION=$D/etc/gconf/schemas
24for SCHEMA in ${SCHEMA_FILES}; do
25 if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
26 HOME=$D/root gconftool-2 \
27 --makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
28 fi
29done
30}
31
32gconf_prerm() {
33SCHEMA_LOCATION=/etc/gconf/schemas
34for SCHEMA in ${SCHEMA_FILES}; do
35 if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
36 HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
37 gconftool-2 \
38 --makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
39 fi
40done
41}
42
43python populate_packages_append () {
44 import re
45 packages = d.getVar('PACKAGES', True).split()
46 pkgdest = d.getVar('PKGDEST', True)
47
48 for pkg in packages:
49 schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
50 schemas = []
51 schema_re = re.compile(".*\.schemas$")
52 if os.path.exists(schema_dir):
53 for f in os.listdir(schema_dir):
54 if schema_re.match(f):
55 schemas.append(f)
56 if schemas != []:
57 bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
58 d.setVar('SCHEMA_FILES', " ".join(schemas))
59 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
60 if not postinst:
61 postinst = '#!/bin/sh\n'
62 postinst += d.getVar('gconf_postinst', True)
63 d.setVar('pkg_postinst_%s' % pkg, postinst)
64 prerm = d.getVar('pkg_prerm_%s' % pkg, True)
65 if not prerm:
66 prerm = '#!/bin/sh\n'
67 prerm += d.getVar('gconf_prerm', True)
68 d.setVar('pkg_prerm_%s' % pkg, prerm)
69 d.appendVar("RDEPENDS_%s" % pkg, ' ' + d.getVar('MLPREFIX') + 'gconf')
70}
diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass
new file mode 100644
index 0000000000..03b89b2455
--- /dev/null
+++ b/meta/classes/gettext.bbclass
@@ -0,0 +1,19 @@
1def gettext_dependencies(d):
2 if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
3 return ""
4 if d.getVar('USE_NLS', True) == 'no':
5 return "gettext-minimal-native"
6 return d.getVar('DEPENDS_GETTEXT', False)
7
8def gettext_oeconf(d):
9 if d.getVar('USE_NLS', True) == 'no':
10 return '--disable-nls'
11 # Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set
12 if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
13 return '--disable-nls'
14 return "--enable-nls"
15
16DEPENDS_GETTEXT ??= "virtual/gettext gettext-native"
17
18BASEDEPENDS =+ "${@gettext_dependencies(d)}"
19EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
diff --git a/meta/classes/gnome.bbclass b/meta/classes/gnome.bbclass
new file mode 100644
index 0000000000..0de22dd6d2
--- /dev/null
+++ b/meta/classes/gnome.bbclass
@@ -0,0 +1,5 @@
1inherit gnomebase gtk-icon-cache gconf mime
2
3EXTRA_OECONF += "--disable-introspection"
4
5UNKNOWN_CONFIGURE_WHITELIST += "--disable-introspection"
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
new file mode 100644
index 0000000000..739bf60649
--- /dev/null
+++ b/meta/classes/gnomebase.bbclass
@@ -0,0 +1,30 @@
1def gnome_verdir(v):
2 return oe.utils.trim_version(v, 2)
3
4GNOME_COMPRESS_TYPE ?= "bz2"
5SECTION ?= "x11/gnome"
6GNOMEBN ?= "${BPN}"
7SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
8
9DEPENDS += "gnome-common-native"
10
11FILES_${PN} += "${datadir}/application-registry \
12 ${datadir}/mime-info \
13 ${datadir}/mime/packages \
14 ${datadir}/mime/application \
15 ${datadir}/gnome-2.0 \
16 ${datadir}/polkit* \
17 ${datadir}/GConf \
18 ${datadir}/glib-2.0/schemas \
19"
20
21FILES_${PN}-doc += "${datadir}/devhelp"
22
23inherit autotools pkgconfig
24
25do_install_append() {
26 rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
27 rm -rf ${D}${localstatedir}/scrollkeeper/*
28 rm -f ${D}${datadir}/applications/*.cache
29}
30
diff --git a/meta/classes/grub-efi.bbclass b/meta/classes/grub-efi.bbclass
new file mode 100644
index 0000000000..5c80c177de
--- /dev/null
+++ b/meta/classes/grub-efi.bbclass
@@ -0,0 +1,141 @@
1# grub-efi.bbclass
2# Copyright (c) 2011, Intel Corporation.
3# All rights reserved.
4#
5# Released under the MIT license (see packages/COPYING)
6
7# Provide grub-efi specific functions for building bootable images.
8
9# External variables
10# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
11# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
12# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
13# ${LABELS} - a list of targets for the automatic config
14# ${APPEND} - an override list of append strings for each label
15# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
16# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
17
18do_bootimg[depends] += "grub-efi:do_deploy"
19do_bootdirectdisk[depends] += "grub-efi:do_deploy"
20
21GRUB_SERIAL ?= "console=ttyS0,115200"
22GRUBCFG = "${S}/grub.cfg"
23GRUB_TIMEOUT ?= "10"
24#FIXME: build this from the machine config
25GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
26
27EFIDIR = "/EFI/BOOT"
28
29efi_populate() {
30 # DEST must be the root of the image so that EFIDIR is not
31 # nested under a top level directory.
32 DEST=$1
33
34 install -d ${DEST}${EFIDIR}
35
36 GRUB_IMAGE="bootia32.efi"
37 if [ "${TARGET_ARCH}" = "x86_64" ]; then
38 GRUB_IMAGE="bootx64.efi"
39 fi
40 install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}
41
42 install -m 0644 ${GRUBCFG} ${DEST}${EFIDIR}
43}
44
45efi_iso_populate() {
46 iso_dir=$1
47 efi_populate $iso_dir
48 # Build a EFI directory to create efi.img
49 mkdir -p ${EFIIMGDIR}/${EFIDIR}
50 cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
51 cp $iso_dir/vmlinuz ${EFIIMGDIR}
52 echo "${GRUB_IMAGE}" > ${EFIIMGDIR}/startup.nsh
53 if [ -f "$iso_dir/initrd" ] ; then
54 cp $iso_dir/initrd ${EFIIMGDIR}
55 fi
56}
57
58efi_hddimg_populate() {
59 efi_populate $1
60}
61
62python build_efi_cfg() {
63 import sys
64
65 workdir = d.getVar('WORKDIR', True)
66 if not workdir:
67 bb.error("WORKDIR not defined, unable to package")
68 return
69
70 gfxserial = d.getVar('GRUB_GFXSERIAL', True) or ""
71
72 labels = d.getVar('LABELS', True)
73 if not labels:
74 bb.debug(1, "LABELS not defined, nothing to do")
75 return
76
77 if labels == []:
78 bb.debug(1, "No labels, nothing to do")
79 return
80
81 cfile = d.getVar('GRUBCFG', True)
82 if not cfile:
83 raise bb.build.FuncFailed('Unable to read GRUBCFG')
84
85 try:
86 cfgfile = file(cfile, 'w')
87 except OSError:
88 raise bb.build.funcFailed('Unable to open %s' % (cfile))
89
90 cfgfile.write('# Automatically created by OE\n')
91
92 opts = d.getVar('GRUB_OPTS', True)
93 if opts:
94 for opt in opts.split(';'):
95 cfgfile.write('%s\n' % opt)
96
97 cfgfile.write('default=%s\n' % (labels.split()[0]))
98
99 timeout = d.getVar('GRUB_TIMEOUT', True)
100 if timeout:
101 cfgfile.write('timeout=%s\n' % timeout)
102 else:
103 cfgfile.write('timeout=50\n')
104
105 if gfxserial == "1":
106 btypes = [ [ " graphics console", "" ],
107 [ " serial console", d.getVar('GRUB_SERIAL', True) or "" ] ]
108 else:
109 btypes = [ [ "", "" ] ]
110
111 for label in labels.split():
112 localdata = d.createCopy()
113
114 overrides = localdata.getVar('OVERRIDES', True)
115 if not overrides:
116 raise bb.build.FuncFailed('OVERRIDES not defined')
117
118 for btype in btypes:
119 localdata.setVar('OVERRIDES', label + ':' + overrides)
120 bb.data.update_data(localdata)
121
122 cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
123 lb = label
124 if label == "install":
125 lb = "install-efi"
126 cfgfile.write('linux /vmlinuz LABEL=%s' % (lb))
127
128 append = localdata.getVar('APPEND', True)
129 initrd = localdata.getVar('INITRD', True)
130
131 if append:
132 cfgfile.write('%s' % (append))
133 cfgfile.write(' %s' % btype[1])
134 cfgfile.write('\n')
135
136 if initrd:
137 cfgfile.write('initrd /initrd')
138 cfgfile.write('\n}\n')
139
140 cfgfile.close()
141}
diff --git a/meta/classes/gsettings.bbclass b/meta/classes/gsettings.bbclass
new file mode 100644
index 0000000000..dec5abc026
--- /dev/null
+++ b/meta/classes/gsettings.bbclass
@@ -0,0 +1,37 @@
1# A bbclass to handle installed GSettings (glib) schemas, updated the compiled
2# form on package install and remove.
3#
4# The compiled schemas are platform-agnostic, so we can depend on
5# glib-2.0-native for the native tool and run the postinst script when the
6# rootfs builds to save a little time on first boot.
7
8# TODO use a trigger so that this runs once per package operation run
9
10DEPENDS += "glib-2.0-native"
11
12RDEPENDS_${PN} += "glib-2.0-utils"
13
14FILES_${PN} += "${datadir}/glib-2.0/schemas"
15
16gsettings_postinstrm () {
17 glib-compile-schemas $D${datadir}/glib-2.0/schemas
18}
19
20python populate_packages_append () {
21 pkg = d.getVar('PN', True)
22 bb.note("adding gsettings postinst scripts to %s" % pkg)
23
24 postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
25 if not postinst:
26 postinst = '#!/bin/sh\n'
27 postinst += d.getVar('gsettings_postinstrm', True)
28 d.setVar('pkg_postinst_%s' % pkg, postinst)
29
30 bb.note("adding gsettings postrm scripts to %s" % pkg)
31
32 postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
33 if not postrm:
34 postrm = '#!/bin/sh\n'
35 postrm += d.getVar('gsettings_postinstrm', True)
36 d.setVar('pkg_postrm_%s' % pkg, postrm)
37}
diff --git a/meta/classes/gtk-doc.bbclass b/meta/classes/gtk-doc.bbclass
new file mode 100644
index 0000000000..fb7863e99b
--- /dev/null
+++ b/meta/classes/gtk-doc.bbclass
@@ -0,0 +1,23 @@
1# Helper class to pull in the right gtk-doc dependencies and disable
2# gtk-doc.
3#
4# Long-term it would be great if this class could be toggled between
5# gtk-doc-stub-native and the real gtk-doc-native, which would enable
6# re-generation of documentation. For now, we'll make do with this which
7# packages up any existing documentation (so from tarball builds).
8
9# The documentation directory, where the infrastructure will be copied.
10# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
11GTKDOC_DOCDIR ?= "${S}"
12
13DEPENDS_append = " gtk-doc-stub-native"
14
15EXTRA_OECONF_append = "\
16 --disable-gtk-doc \
17 --disable-gtk-doc-html \
18 --disable-gtk-doc-pdf \
19"
20
21do_configure_prepend () {
22 ( cd ${S}; gtkdocize --docdir ${GTKDOC_DOCDIR} )
23}
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
new file mode 100644
index 0000000000..789fa38a16
--- /dev/null
+++ b/meta/classes/gtk-icon-cache.bbclass
@@ -0,0 +1,62 @@
1FILES_${PN} += "${datadir}/icons/hicolor"
2
3DEPENDS += "${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk-update-icon-cache-native"
4
5gtk_icon_cache_postinst() {
6if [ "x$D" != "x" ]; then
7 $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
8 base_libdir=${base_libdir}
9else
10
11 # Update the pixbuf loaders in case they haven't been registered yet
12 GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache
13
14 for icondir in /usr/share/icons/* ; do
15 if [ -d $icondir ] ; then
16 gtk-update-icon-cache -fqt $icondir
17 fi
18 done
19fi
20}
21
22gtk_icon_cache_postrm() {
23if [ "x$D" != "x" ]; then
24 $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
25 base_libdir=${base_libdir}
26else
27 for icondir in /usr/share/icons/* ; do
28 if [ -d $icondir ] ; then
29 gtk-update-icon-cache -qt $icondir
30 fi
31 done
32fi
33}
34
35python populate_packages_append () {
36 packages = d.getVar('PACKAGES', True).split()
37 pkgdest = d.getVar('PKGDEST', True)
38
39 for pkg in packages:
40 icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir', True))
41 if not os.path.exists(icon_dir):
42 continue
43
44 bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
45 rdepends = ' ' + d.getVar('MLPREFIX') + "hicolor-icon-theme"
46 d.appendVar('RDEPENDS_%s' % pkg, rdepends)
47
48 bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
49
50 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
51 if not postinst:
52 postinst = '#!/bin/sh\n'
53 postinst += d.getVar('gtk_icon_cache_postinst', True)
54 d.setVar('pkg_postinst_%s' % pkg, postinst)
55
56 postrm = d.getVar('pkg_postrm_%s' % pkg, True)
57 if not postrm:
58 postrm = '#!/bin/sh\n'
59 postrm += d.getVar('gtk_icon_cache_postrm', True)
60 d.setVar('pkg_postrm_%s' % pkg, postrm)
61}
62
diff --git a/meta/classes/gtk-immodules-cache.bbclass b/meta/classes/gtk-immodules-cache.bbclass
new file mode 100644
index 0000000000..5b45149080
--- /dev/null
+++ b/meta/classes/gtk-immodules-cache.bbclass
@@ -0,0 +1,83 @@
1# This class will update the inputmethod module cache for virtual keyboards
2#
3# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules
4
5DEPENDS =+ "qemu-native"
6
7inherit qemu
8
9GTKIMMODULES_PACKAGES ?= "${PN}"
10
11gtk_immodule_cache_postinst() {
12if [ "x$D" != "x" ]; then
13 for maj_ver in 2 3; do
14 if [ -x $D${bindir}/gtk-query-immodules-$maj_ver.0 ]; then
15 IMFILES=$(ls $D${libdir}/gtk-$maj_ver.0/*/immodules/*.so)
16 ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-$maj_ver.0')} \
17 $IMFILES > $D/etc/gtk-$maj_ver.0/gtk.immodules 2>/dev/null &&
18 sed -i -e "s:$D::" $D/etc/gtk-$maj_ver.0/gtk.immodules
19
20 [ $? -ne 0 ] && exit 1
21 fi
22 done
23
24 exit 0
25fi
26if [ ! -z `which gtk-query-immodules-2.0` ]; then
27 gtk-query-immodules-2.0 > /etc/gtk-2.0/gtk.immodules
28fi
29if [ ! -z `which gtk-query-immodules-3.0` ]; then
30 gtk-query-immodules-3.0 > /etc/gtk-3.0/gtk.immodules
31fi
32}
33
34gtk_immodule_cache_postrm() {
35if [ "x$D" != "x" ]; then
36 for maj_ver in 2 3; do
37 if [ -x $D${bindir}/gtk-query-immodules-$maj_ver.0 ]; then
38 IMFILES=$(ls $D${libdir}/gtk-$maj_ver.0/*/immodules/*.so)
39 ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-$maj_ver.0')} \
40 $IMFILES > $D/etc/gtk-$maj_ver.0/gtk.immodules 2>/dev/null &&
41 sed -i -e "s:$D::" $D/etc/gtk-$maj_ver.0/gtk.immodules
42
43 [ $? -ne 0 ] && exit 1
44 fi
45 done
46
47 exit 0
48fi
49if [ ! -z `which gtk-query-immodules-2.0` ]; then
50 gtk-query-immodules-2.0 > /etc/gtk-2.0/gtk.immodules
51fi
52if [ ! -z `which gtk-query-immodules-3.0` ]; then
53 gtk-query-immodules-3.0 > /etc/gtk-3.0/gtk.immodules
54fi
55}
56
57python populate_packages_append () {
58 gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES', True).split()
59
60 for pkg in gtkimmodules_pkgs:
61 bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
62
63 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
64 if not postinst:
65 postinst = '#!/bin/sh\n'
66 postinst += d.getVar('gtk_immodule_cache_postinst', True)
67 d.setVar('pkg_postinst_%s' % pkg, postinst)
68
69 postrm = d.getVar('pkg_postrm_%s' % pkg, True)
70 if not postrm:
71 postrm = '#!/bin/sh\n'
72 postrm += d.getVar('gtk_immodule_cache_postrm', True)
73 d.setVar('pkg_postrm_%s' % pkg, postrm)
74}
75
76python __anonymous() {
77 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
78 gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES')
79 if not gtkimmodules_check:
80 bb_filename = d.getVar('FILE')
81 raise bb.build.FuncFailed("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename)
82}
83
diff --git a/meta/classes/gummiboot.bbclass b/meta/classes/gummiboot.bbclass
new file mode 100644
index 0000000000..021465201f
--- /dev/null
+++ b/meta/classes/gummiboot.bbclass
@@ -0,0 +1,114 @@
1# Copyright (C) 2014 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5# gummiboot.bbclass - equivalent of grub-efi.bbclass
6# Set EFI_PROVIDER = "gummiboot" to use gummiboot on your live images instead of grub-efi
7# (images built by bootimage.bbclass or boot-directdisk.bbclass)
8
9do_bootimg[depends] += "gummiboot:do_deploy"
10do_bootdirectdisk[depends] += "gummiboot:do_deploy"
11
12EFIDIR = "/EFI/BOOT"
13
14GUMMIBOOT_CFG ?= "${S}/loader.conf"
15GUMMIBOOT_ENTRIES ?= ""
16GUMMIBOOT_TIMEOUT ?= "10"
17
18efi_populate() {
19 DEST=$1
20
21 EFI_IMAGE="gummibootia32.efi"
22 DEST_EFI_IMAGE="bootia32.efi"
23 if [ "${TARGET_ARCH}" = "x86_64" ]; then
24 EFI_IMAGE="gummibootx64.efi"
25 DEST_EFI_IMAGE="bootx64.efi"
26 fi
27
28 install -d ${DEST}${EFIDIR}
29 # gummiboot requires these paths for configuration files
30 # they are not customizable so no point in new vars
31 install -d ${DEST}/loader
32 install -d ${DEST}/loader/entries
33 install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE}
34 install -m 0644 ${GUMMIBOOT_CFG} ${DEST}/loader/loader.conf
35 for i in ${GUMMIBOOT_ENTRIES}; do
36 install -m 0644 ${i} ${DEST}/loader/entries
37 done
38}
39
40efi_iso_populate() {
41 iso_dir=$1
42 efi_populate $iso_dir
43 mkdir -p ${EFIIMGDIR}/${EFIDIR}
44 cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
45 cp $iso_dir/vmlinuz ${EFIIMGDIR}
46 echo "${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
47 if [ -f "$iso_dir/initrd" ] ; then
48 cp $iso_dir/initrd ${EFIIMGDIR}
49 fi
50}
51
52efi_hddimg_populate() {
53 efi_populate $1
54}
55
56python build_efi_cfg() {
57 s = d.getVar("S", True)
58 labels = d.getVar('LABELS', True)
59 if not labels:
60 bb.debug(1, "LABELS not defined, nothing to do")
61 return
62
63 if labels == []:
64 bb.debug(1, "No labels, nothing to do")
65 return
66
67 cfile = d.getVar('GUMMIBOOT_CFG', True)
68 try:
69 cfgfile = open(cfile, 'w')
70 except OSError:
71 raise bb.build.funcFailed('Unable to open %s' % (cfile))
72
73 cfgfile.write('# Automatically created by OE\n')
74 cfgfile.write('default %s\n' % (labels.split()[0]))
75 timeout = d.getVar('GUMMIBOOT_TIMEOUT', True)
76 if timeout:
77 cfgfile.write('timeout %s\n' % timeout)
78 else:
79 cfgfile.write('timeout 10\n')
80 cfgfile.close()
81
82 for label in labels.split():
83 localdata = d.createCopy()
84
85 overrides = localdata.getVar('OVERRIDES', True)
86 if not overrides:
87 raise bb.build.FuncFailed('OVERRIDES not defined')
88
89 entryfile = "%s/%s.conf" % (s, label)
90 d.appendVar("GUMMIBOOT_ENTRIES", " " + entryfile)
91 try:
92 entrycfg = open(entryfile, "w")
93 except OSError:
94 raise bb.build.funcFailed('Unable to open %s' % (entryfile))
95 localdata.setVar('OVERRIDES', label + ':' + overrides)
96 bb.data.update_data(localdata)
97
98 entrycfg.write('title %s\n' % label)
99 entrycfg.write('linux /vmlinuz\n')
100
101 append = localdata.getVar('APPEND', True)
102 initrd = localdata.getVar('INITRD', True)
103
104 if initrd:
105 entrycfg.write('initrd /initrd\n')
106 lb = label
107 if label == "install":
108 lb = "install-efi"
109 entrycfg.write('options LABEL=%s ' % lb)
110 if append:
111 entrycfg.write('%s' % append)
112 entrycfg.write('\n')
113 entrycfg.close()
114}
diff --git a/meta/classes/gzipnative.bbclass b/meta/classes/gzipnative.bbclass
new file mode 100644
index 0000000000..326cbbb6f6
--- /dev/null
+++ b/meta/classes/gzipnative.bbclass
@@ -0,0 +1,5 @@
1EXTRANATIVEPATH += "pigz-native gzip-native"
2DEPENDS += "gzip-native"
3
4# tar may get run by do_unpack or do_populate_lic which could call gzip
5do_unpack[depends] += "gzip-native:do_populate_sysroot"
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
new file mode 100644
index 0000000000..5c9e66c95e
--- /dev/null
+++ b/meta/classes/icecc.bbclass
@@ -0,0 +1,325 @@
1# IceCream distributed compiling support
2#
3# Stages directories with symlinks from gcc/g++ to icecc, for both
4# native and cross compilers. Depending on each configure or compile,
5# the directories are added at the head of the PATH list and ICECC_CXX
6# and ICEC_CC are set.
7#
8# For the cross compiler, creates a tar.gz of our toolchain and sets
9# ICECC_VERSION accordingly.
10#
11# The class now handles all 3 different compile 'stages' (i.e native ,cross-kernel and target) creating the
12# necessary environment tar.gz file to be used by the remote machines.
13# It also supports meta-toolchain generation
14#
15# If ICECC_PATH is not set in local.conf then the class will try to locate it using 'bb.utils.which'
16# but nothing is sure ;)
17#
18# If ICECC_ENV_EXEC is set in local.conf, then it should point to the icecc-create-env script provided by the user
19# or the default one provided by icecc-create-env.bb will be used
20# (NOTE that this is a modified version of the script need it and *not the one that comes with icecc*
21#
22# User can specify if specific packages or packages belonging to class should not use icecc to distribute
23# compile jobs to remote machines, but handled locally, by defining ICECC_USER_CLASS_BL and ICECC_USER_PACKAGE_BL
24# with the appropriate values in local.conf. In addition the user can force to enable icecc for packages
25# which set an empty PARALLEL_MAKE variable by defining ICECC_USER_PACKAGE_WL.
26#
27#########################################################################################
28#Error checking is kept to minimum so double check any parameters you pass to the class
29###########################################################################################
30
31BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL"
32
33ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
34
35def icecc_dep_prepend(d):
36 # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
37 # we need that built is the responsibility of the patch function / class, not
38 # the application.
39 if not d.getVar('INHIBIT_DEFAULT_DEPS'):
40 return "icecc-create-env-native"
41 return ""
42
43DEPENDS_prepend += "${@icecc_dep_prepend(d)} "
44
45def get_cross_kernel_cc(bb,d):
46 kernel_cc = d.getVar('KERNEL_CC')
47
48 # evaluate the expression by the shell if necessary
49 if '`' in kernel_cc or '$(' in kernel_cc:
50 kernel_cc = os.popen("echo %s" % kernel_cc).read()[:-1]
51
52 kernel_cc = d.expand(kernel_cc)
53 kernel_cc = kernel_cc.replace('ccache', '').strip()
54 kernel_cc = kernel_cc.split(' ')[0]
55 kernel_cc = kernel_cc.strip()
56 return kernel_cc
57
58def get_icecc(d):
59 return d.getVar('ICECC_PATH') or bb.utils.which(os.getenv("PATH"), "icecc")
60
61def create_path(compilers, bb, d):
62 """
63 Create Symlinks for the icecc in the staging directory
64 """
65 staging = os.path.join(d.expand('${STAGING_BINDIR}'), "ice")
66 if icc_is_kernel(bb, d):
67 staging += "-kernel"
68
69 #check if the icecc path is set by the user
70 icecc = get_icecc(d)
71
72 # Create the dir if necessary
73 try:
74 os.stat(staging)
75 except:
76 try:
77 os.makedirs(staging)
78 except:
79 pass
80
81 for compiler in compilers:
82 gcc_path = os.path.join(staging, compiler)
83 try:
84 os.stat(gcc_path)
85 except:
86 try:
87 os.symlink(icecc, gcc_path)
88 except:
89 pass
90
91 return staging
92
93def use_icc(bb,d):
94 # allarch recipes don't use compiler
95 if icc_is_allarch(bb, d):
96 return "no"
97
98 pn = d.getVar('PN', True)
99
100 system_class_blacklist = []
101 user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
102 package_class_blacklist = system_class_blacklist + user_class_blacklist
103
104 for black in package_class_blacklist:
105 if bb.data.inherits_class(black, d):
106 bb.debug(1, "%s: class %s found in blacklist, disable icecc" % (pn, black))
107 return "no"
108
109 # "system" recipe blacklist contains a list of packages that can not distribute compile tasks
110 # for one reason or the other
111 # this is the old list (which doesn't seem to be valid anymore, because I was able to build
112 # all these with icecc enabled)
113 # system_package_blacklist = [ "uclibc", "glibc", "gcc", "bind", "u-boot", "dhcp-forwarder", "enchant", "connman", "orbit2" ]
114 # when adding new entry, please document why (how it failed) so that we can re-evaluate it later
115 # e.g. when there is new version
116 system_package_blacklist = []
117 user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split()
118 user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL') or "").split()
119 package_blacklist = system_package_blacklist + user_package_blacklist
120
121 if pn in package_blacklist:
122 bb.debug(1, "%s: found in blacklist, disable icecc" % pn)
123 return "no"
124
125 if pn in user_package_whitelist:
126 bb.debug(1, "%s: found in whitelist, enable icecc" % pn)
127 return "yes"
128
129 if d.getVar('PARALLEL_MAKE') == "":
130 bb.debug(1, "%s: has empty PARALLEL_MAKE, disable icecc" % pn)
131 return "no"
132
133 return "yes"
134
135def icc_is_allarch(bb, d):
136 return \
137 bb.data.inherits_class("allarch", d);
138
139def icc_is_kernel(bb, d):
140 return \
141 bb.data.inherits_class("kernel", d);
142
143def icc_is_native(bb, d):
144 return \
145 bb.data.inherits_class("cross", d) or \
146 bb.data.inherits_class("native", d);
147
148# Don't pollute allarch signatures with TARGET_FPU
149icc_version[vardepsexclude] += "TARGET_FPU"
150def icc_version(bb, d):
151 if use_icc(bb, d) == "no":
152 return ""
153
154 parallel = d.getVar('ICECC_PARALLEL_MAKE') or ""
155 if not d.getVar('PARALLEL_MAKE') == "" and parallel:
156 d.setVar("PARALLEL_MAKE", parallel)
157
158 if icc_is_native(bb, d):
159 archive_name = "local-host-env"
160 elif d.expand('${HOST_PREFIX}') == "":
161 bb.fatal(d.expand("${PN}"), " NULL prefix")
162 else:
163 prefix = d.expand('${HOST_PREFIX}' )
164 distro = d.expand('${DISTRO}')
165 target_sys = d.expand('${TARGET_SYS}')
166 float = d.getVar('TARGET_FPU') or "hard"
167 archive_name = prefix + distro + "-" + target_sys + "-" + float
168 if icc_is_kernel(bb, d):
169 archive_name += "-kernel"
170
171 import socket
172 ice_dir = d.expand('${STAGING_DIR_NATIVE}${prefix_native}')
173 tar_file = os.path.join(ice_dir, 'ice', archive_name + "-@VERSION@-" + socket.gethostname() + '.tar.gz')
174
175 return tar_file
176
177def icc_path(bb,d):
178 if icc_is_kernel(bb, d):
179 return create_path( [get_cross_kernel_cc(bb,d), ], bb, d)
180
181 else:
182 prefix = d.expand('${HOST_PREFIX}')
183 return create_path( [prefix+"gcc", prefix+"g++"], bb, d)
184
185def icc_get_external_tool(bb, d, tool):
186 external_toolchain_bindir = d.expand('${EXTERNAL_TOOLCHAIN}${bindir_cross}')
187 target_prefix = d.expand('${TARGET_PREFIX}')
188 return os.path.join(external_toolchain_bindir, '%s%s' % (target_prefix, tool))
189
190# Don't pollute native signatures with target TUNE_PKGARCH through STAGING_BINDIR_TOOLCHAIN
191icc_get_tool[vardepsexclude] += "STAGING_BINDIR_TOOLCHAIN"
192def icc_get_tool(bb, d, tool):
193 if icc_is_native(bb, d):
194 return bb.utils.which(os.getenv("PATH"), tool)
195 elif icc_is_kernel(bb, d):
196 return bb.utils.which(os.getenv("PATH"), get_cross_kernel_cc(bb, d))
197 else:
198 ice_dir = d.expand('${STAGING_BINDIR_TOOLCHAIN}')
199 target_sys = d.expand('${TARGET_SYS}')
200 tool_bin = os.path.join(ice_dir, "%s-%s" % (target_sys, tool))
201 if os.path.isfile(tool_bin):
202 return tool_bin
203 else:
204 external_tool_bin = icc_get_external_tool(bb, d, tool)
205 if os.path.isfile(external_tool_bin):
206 return external_tool_bin
207 else:
208 return ""
209
210def icc_get_and_check_tool(bb, d, tool):
211 # Check that g++ or gcc is not a symbolic link to icecc binary in
212 # PATH or icecc-create-env script will silently create an invalid
213 # compiler environment package.
214 t = icc_get_tool(bb, d, tool)
215 if t and os.popen("readlink -f %s" % t).read()[:-1] == get_icecc(d):
216 bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, get_icecc(d)))
217 return ""
218 else:
219 return t
220
221wait_for_file() {
222 local TIME_ELAPSED=0
223 local FILE_TO_TEST=$1
224 local TIMEOUT=$2
225 until [ -f "$FILE_TO_TEST" ]
226 do
227 TIME_ELAPSED=`expr $TIME_ELAPSED + 1`
228 if [ $TIME_ELAPSED -gt $TIMEOUT ]
229 then
230 return 1
231 fi
232 sleep 1
233 done
234}
235
236def set_icecc_env():
237 # dummy python version of set_icecc_env
238 return
239
240set_icecc_env() {
241 if [ "x${ICECC_DISABLED}" != "x" ]
242 then
243 return
244 fi
245 ICECC_VERSION="${@icc_version(bb, d)}"
246 if [ "x${ICECC_VERSION}" = "x" ]
247 then
248 bbwarn "Cannot use icecc: could not get ICECC_VERSION"
249 return
250 fi
251
252 ICE_PATH="${@icc_path(bb, d)}"
253 if [ "x${ICE_PATH}" = "x" ]
254 then
255 bbwarn "Cannot use icecc: could not get ICE_PATH"
256 return
257 fi
258
259 ICECC_CC="${@icc_get_and_check_tool(bb, d, "gcc")}"
260 ICECC_CXX="${@icc_get_and_check_tool(bb, d, "g++")}"
261 # cannot use icc_get_and_check_tool here because it assumes as without target_sys prefix
262 ICECC_WHICH_AS="${@bb.utils.which(os.getenv('PATH'), 'as')}"
263 if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
264 then
265 bbwarn "Cannot use icecc: could not get ICECC_CC or ICECC_CXX"
266 return
267 fi
268
269 ICE_VERSION=`$ICECC_CC -dumpversion`
270 ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"`
271 if [ ! -x "${ICECC_ENV_EXEC}" ]
272 then
273 bbwarn "Cannot use icecc: invalid ICECC_ENV_EXEC"
274 return
275 fi
276
277 ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
278 # for target recipes should return something like:
279 # /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
280 # and just "as" for native, if it returns "as" in current directory (for whatever reason) use "as" from PATH
281 if [ "`dirname "${ICECC_AS}"`" = "." ]
282 then
283 ICECC_AS="${ICECC_WHICH_AS}"
284 fi
285
286 if [ ! -f "${ICECC_VERSION}.done" ]
287 then
288 mkdir -p "`dirname "${ICECC_VERSION}"`"
289
290 # the ICECC_VERSION generation step must be locked by a mutex
291 # in order to prevent race conditions
292 if flock -n "${ICECC_VERSION}.lock" \
293 ${ICECC_ENV_EXEC} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
294 then
295 touch "${ICECC_VERSION}.done"
296 elif [ ! wait_for_file "${ICECC_VERSION}.done" 30 ]
297 then
298 # locking failed so wait for ${ICECC_VERSION}.done to appear
299 bbwarn "Timeout waiting for ${ICECC_VERSION}.done"
300 return
301 fi
302 fi
303
304 export ICECC_VERSION ICECC_CC ICECC_CXX
305 export PATH="$ICE_PATH:$PATH"
306 export CCACHE_PATH="$PATH"
307
308 bbnote "Using icecc"
309}
310
311do_configure_prepend() {
312 set_icecc_env
313}
314
315do_compile_prepend() {
316 set_icecc_env
317}
318
319do_compile_kernelmodules_prepend() {
320 set_icecc_env
321}
322
323do_install_prepend() {
324 set_icecc_env
325}
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
new file mode 100644
index 0000000000..c7e6937fa9
--- /dev/null
+++ b/meta/classes/image-live.bbclass
@@ -0,0 +1,18 @@
1
2AUTO_SYSLINUXCFG = "1"
3INITRD_IMAGE ?= "core-image-minimal-initramfs"
4INITRD ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE}-${MACHINE}.cpio.gz"
5SYSLINUX_ROOT = "root=/dev/ram0 "
6SYSLINUX_TIMEOUT ?= "10"
7SYSLINUX_LABELS ?= "boot install"
8LABELS_append = " ${SYSLINUX_LABELS} "
9
10ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext3"
11
12do_bootimg[depends] += "${INITRD_IMAGE}:do_rootfs"
13do_bootimg[depends] += "${PN}:do_rootfs"
14
15inherit bootimg
16
17IMAGE_TYPEDEP_live = "ext3"
18IMAGE_TYPES_MASKED += "live"
diff --git a/meta/classes/image-mklibs.bbclass b/meta/classes/image-mklibs.bbclass
new file mode 100644
index 0000000000..11f082b373
--- /dev/null
+++ b/meta/classes/image-mklibs.bbclass
@@ -0,0 +1,71 @@
1do_rootfs[depends] += "mklibs-native:do_populate_sysroot"
2
3IMAGE_PREPROCESS_COMMAND += "mklibs_optimize_image; "
4
5mklibs_optimize_image_doit() {
6 rm -rf ${WORKDIR}/mklibs
7 mkdir -p ${WORKDIR}/mklibs/dest
8 cd ${IMAGE_ROOTFS}
9 du -bs > ${WORKDIR}/mklibs/du.before.mklibs.txt
10 for i in `find .`; do file $i; done \
11 | grep ELF \
12 | grep "LSB *executable" \
13 | grep "dynamically linked" \
14 | sed "s/:.*//" \
15 | sed "s+^\./++" \
16 > ${WORKDIR}/mklibs/executables.list
17
18 case ${TARGET_ARCH} in
19 powerpc | mips | microblaze )
20 dynamic_loader="${base_libdir}/ld.so.1"
21 ;;
22 powerpc64)
23 dynamic_loader="${base_libdir}/ld64.so.1"
24 ;;
25 x86_64)
26 dynamic_loader="${base_libdir}/ld-linux-x86-64.so.2"
27 ;;
28 i586 )
29 dynamic_loader="${base_libdir}/ld-linux.so.2"
30 ;;
31 arm )
32 dynamic_loader="${base_libdir}/ld-linux.so.3"
33 ;;
34 * )
35 dynamic_loader="/unknown_dynamic_linker"
36 ;;
37 esac
38
39 mklibs -v \
40 --ldlib ${dynamic_loader} \
41 --libdir ${baselib} \
42 --sysroot ${PKG_CONFIG_SYSROOT_DIR} \
43 --gcc-options "--sysroot=${PKG_CONFIG_SYSROOT_DIR}" \
44 --root ${IMAGE_ROOTFS} \
45 --target `echo ${TARGET_PREFIX} | sed 's/-$//' ` \
46 -d ${WORKDIR}/mklibs/dest \
47 `cat ${WORKDIR}/mklibs/executables.list`
48
49 cd ${WORKDIR}/mklibs/dest
50 for i in *
51 do
52 cp $i `find ${IMAGE_ROOTFS} -name $i`
53 done
54
55 cd ${IMAGE_ROOTFS}
56 du -bs > ${WORKDIR}/mklibs/du.after.mklibs.txt
57
58 echo rootfs size before mklibs optimization: `cat ${WORKDIR}/mklibs/du.before.mklibs.txt`
59 echo rootfs size after mklibs optimization: `cat ${WORKDIR}/mklibs/du.after.mklibs.txt`
60}
61
62mklibs_optimize_image() {
63 for img in ${MKLIBS_OPTIMIZED_IMAGES}
64 do
65 if [ "${img}" = "${PN}" ] || [ "${img}" = "all" ]
66 then
67 mklibs_optimize_image_doit
68 break
69 fi
70 done
71}
diff --git a/meta/classes/image-prelink.bbclass b/meta/classes/image-prelink.bbclass
new file mode 100644
index 0000000000..d4bb3aec39
--- /dev/null
+++ b/meta/classes/image-prelink.bbclass
@@ -0,0 +1,33 @@
1do_rootfs[depends] += "prelink-native:do_populate_sysroot"
2
3IMAGE_PREPROCESS_COMMAND += "prelink_image; "
4
5prelink_image () {
6# export PSEUDO_DEBUG=4
7# /bin/env | /bin/grep PSEUDO
8# echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
9# echo "LD_PRELOAD=$LD_PRELOAD"
10
11 pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
12 echo "Size before prelinking $pre_prelink_size."
13
14 # We need a prelink conf on the filesystem, add one if it's missing
15 if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
16 cp ${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf \
17 ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
18 dummy_prelink_conf=true;
19 else
20 dummy_prelink_conf=false;
21 fi
22
23 # prelink!
24 ${STAGING_DIR_NATIVE}${sbindir_native}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf
25
26 # Remove the prelink.conf if we had to add it.
27 if [ "$dummy_prelink_conf" = "true" ]; then
28 rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
29 fi
30
31 pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
32 echo "Size after prelinking $pre_prelink_size."
33}
diff --git a/meta/classes/image-swab.bbclass b/meta/classes/image-swab.bbclass
new file mode 100644
index 0000000000..124a090605
--- /dev/null
+++ b/meta/classes/image-swab.bbclass
@@ -0,0 +1,94 @@
1HOST_DATA ?= "${TMPDIR}/host-contamination-data/"
2SWABBER_REPORT ?= "${LOG_DIR}/swabber/"
3SWABBER_LOGS ?= "${LOG_DIR}/contamination-logs"
4TRACE_LOGDIR ?= "${SWABBER_LOGS}/${PACKAGE_ARCH}"
5TRACE_LOGFILE = "${TRACE_LOGDIR}/${PN}-${PV}"
6
7SWAB_ORIG_TASK := "${BB_DEFAULT_TASK}"
8BB_DEFAULT_TASK = "generate_swabber_report"
9
10# Several recipes don't build with parallel make when run under strace
11# Ideally these should be fixed but as a temporary measure disable parallel
12# builds for troublesome recipes
13PARALLEL_MAKE_pn-openssl = ""
14PARALLEL_MAKE_pn-eglibc = ""
15PARALLEL_MAKE_pn-glib-2.0 = ""
16PARALLEL_MAKE_pn-libxml2 = ""
17PARALLEL_MAKE_pn-readline = ""
18PARALLEL_MAKE_pn-util-linux = ""
19PARALLEL_MAKE_pn-binutils = ""
20PARALLEL_MAKE_pn-bison = ""
21PARALLEL_MAKE_pn-cmake = ""
22PARALLEL_MAKE_pn-elfutils = ""
23PARALLEL_MAKE_pn-gcc = ""
24PARALLEL_MAKE_pn-gcc-runtime = ""
25PARALLEL_MAKE_pn-m4 = ""
26PARALLEL_MAKE_pn-opkg = ""
27PARALLEL_MAKE_pn-pkgconfig = ""
28PARALLEL_MAKE_pn-prelink = ""
29PARALLEL_MAKE_pn-rpm = ""
30PARALLEL_MAKE_pn-tcl = ""
31PARALLEL_MAKE_pn-beecrypt = ""
32PARALLEL_MAKE_pn-curl = ""
33PARALLEL_MAKE_pn-gmp = ""
34PARALLEL_MAKE_pn-libmpc = ""
35PARALLEL_MAKE_pn-libxslt = ""
36PARALLEL_MAKE_pn-lzo = ""
37PARALLEL_MAKE_pn-popt = ""
38PARALLEL_MAKE_pn-linux-wrs = ""
39PARALLEL_MAKE_pn-libgcrypt = ""
40PARALLEL_MAKE_pn-gpgme = ""
41PARALLEL_MAKE_pn-udev = ""
42PARALLEL_MAKE_pn-gnutls = ""
43
44python() {
45 # NOTE: It might be useful to detect host infection on native and cross
46 # packages but as it turns out to be pretty hard to do this for all native
47 # and cross packages which aren't swabber-native or one of its dependencies
48 # I have ignored them for now...
49 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('nativesdk', d) and not bb.data.inherits_class('cross', d):
50 deps = (d.getVarFlag('do_setscene', 'depends') or "").split()
51 deps.append('strace-native:do_populate_sysroot')
52 d.setVarFlag('do_setscene', 'depends', " ".join(deps))
53 logdir = d.expand("${TRACE_LOGDIR}")
54 bb.utils.mkdirhier(logdir)
55 else:
56 d.setVar('STRACEFUNC', '')
57}
58
59STRACEPID = "${@os.getpid()}"
60STRACEFUNC = "imageswab_attachstrace"
61
62do_configure[prefuncs] += "${STRACEFUNC}"
63do_compile[prefuncs] += "${STRACEFUNC}"
64
65imageswab_attachstrace () {
66 STRACE=`which strace`
67
68 if [ -x "$STRACE" ]; then
69 swabber-strace-attach "$STRACE -f -o ${TRACE_LOGFILE}-${BB_CURRENTTASK}.log -e trace=open,execve -p ${STRACEPID}" "${TRACE_LOGFILE}-traceattach-${BB_CURRENTTASK}.log"
70 fi
71}
72
73do_generate_swabber_report () {
74
75 update_distro ${HOST_DATA}
76
77 # Swabber can't create the directory for us
78 mkdir -p ${SWABBER_REPORT}
79
80 REPORTSTAMP=${SWAB_ORIG_TASK}-`date +%2m%2d%2H%2M%Y`
81
82 if [ `which ccache` ] ; then
83 CCACHE_DIR=`( ccache -s | grep "cache directory" | grep -o '[^ ]*$' 2> /dev/null )`
84 fi
85
86 if [ "$(ls -A ${HOST_DATA})" ]; then
87 echo "Generating swabber report"
88 swabber -d ${HOST_DATA} -l ${SWABBER_LOGS} -o ${SWABBER_REPORT}/report-${REPORTSTAMP}.txt -r ${SWABBER_REPORT}/extra_report-${REPORTSTAMP}.txt -c all -p ${TOPDIR} -f ${OEROOT}/meta/conf/swabber ${TOPDIR} ${OEROOT} ${CCACHE_DIR}
89 else
90 echo "No host data, cannot generate swabber report."
91 fi
92}
93addtask generate_swabber_report after do_${SWAB_ORIG_TASK}
94do_generate_swabber_report[depends] = "swabber-native:do_populate_sysroot"
diff --git a/meta/classes/image-vmdk.bbclass b/meta/classes/image-vmdk.bbclass
new file mode 100644
index 0000000000..fac5f71833
--- /dev/null
+++ b/meta/classes/image-vmdk.bbclass
@@ -0,0 +1,35 @@
1
2#NOISO = "1"
3
4SYSLINUX_ROOT = "root=/dev/hda2 "
5SYSLINUX_PROMPT ?= "0"
6SYSLINUX_TIMEOUT ?= "1"
7SYSLINUX_LABELS = "boot"
8LABELS_append = " ${SYSLINUX_LABELS} "
9
10# need to define the dependency and the ROOTFS for directdisk
11do_bootdirectdisk[depends] += "${PN}:do_rootfs"
12ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext3"
13
14# creating VMDK relies on having a live hddimg so ensure we
15# inherit it here.
16#inherit image-live
17inherit boot-directdisk
18
19IMAGE_TYPEDEP_vmdk = "ext3"
20IMAGE_TYPES_MASKED += "vmdk"
21
22create_vmdk_image () {
23 qemu-img convert -O vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.vmdk
24 ln -sf ${IMAGE_NAME}.vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.vmdk
25}
26
27python do_vmdkimg() {
28 bb.build.exec_func('create_vmdk_image', d)
29}
30
31#addtask vmdkimg after do_bootimg before do_build
32addtask vmdkimg after do_bootdirectdisk before do_build
33
34do_vmdkimg[depends] += "qemu-native:do_populate_sysroot"
35
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
new file mode 100644
index 0000000000..79de5a2cae
--- /dev/null
+++ b/meta/classes/image.bbclass
@@ -0,0 +1,408 @@
1inherit rootfs_${IMAGE_PKGTYPE}
2
3inherit populate_sdk_base
4
5TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
6TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
7POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; "
8
9inherit gzipnative
10
11LICENSE = "MIT"
12PACKAGES = ""
13DEPENDS += "${MLPREFIX}qemuwrapper-cross ${MLPREFIX}depmodwrapper-cross"
14RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL}"
15RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
16
17INHIBIT_DEFAULT_DEPS = "1"
18
19TESTIMAGECLASS = "${@base_conditional('TEST_IMAGE', '1', 'testimage-auto', '', d)}"
20inherit ${TESTIMAGECLASS}
21
22# IMAGE_FEATURES may contain any available package group
23IMAGE_FEATURES ?= ""
24IMAGE_FEATURES[type] = "list"
25IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs"
26
27# rootfs bootstrap install
28ROOTFS_BOOTSTRAP_INSTALL = "${@base_contains("IMAGE_FEATURES", "package-management", "", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)}"
29
30# packages to install from features
31FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
32FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
33
34# Define some very basic feature package groups
35FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}"
36SPLASH ?= "psplash"
37FEATURE_PACKAGES_splash = "${SPLASH}"
38
39IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
40
41def check_image_features(d):
42 valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems', True) or "").split()
43 valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys()
44 for var in d:
45 if var.startswith("PACKAGE_GROUP_"):
46 bb.warn("PACKAGE_GROUP is deprecated, please use FEATURE_PACKAGES instead")
47 valid_features.append(var[14:])
48 elif var.startswith("FEATURE_PACKAGES_"):
49 valid_features.append(var[17:])
50 valid_features.sort()
51
52 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
53 for feature in features:
54 if feature not in valid_features:
55 bb.fatal("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
56
57IMAGE_INSTALL ?= ""
58IMAGE_INSTALL[type] = "list"
59export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}"
60PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
61
62# Images are generally built explicitly, do not need to be part of world.
63EXCLUDE_FROM_WORLD = "1"
64
65USE_DEVFS ?= "1"
66
67PID = "${@os.getpid()}"
68
69PACKAGE_ARCH = "${MACHINE_ARCH}"
70
71LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
72LDCONFIGDEPEND_libc-uclibc = ""
73
74do_rootfs[depends] += "makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND}"
75do_rootfs[depends] += "virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot"
76do_rootfs[recrdeptask] += "do_packagedata"
77do_rootfs[vardeps] += "BAD_RECOMMENDATIONS NO_RECOMMENDATIONS"
78
79do_build[depends] += "virtual/kernel:do_deploy"
80
81def build_live(d):
82 if base_contains("IMAGE_FSTYPES", "live", "live", "0", d) == "0": # live is not set but hob might set iso or hddimg
83 d.setVar('NOISO', base_contains('IMAGE_FSTYPES', "iso", "0", "1", d))
84 d.setVar('NOHDD', base_contains('IMAGE_FSTYPES', "hddimg", "0", "1", d))
85 if d.getVar('NOISO', True) == "0" or d.getVar('NOHDD', True) == "0":
86 return "image-live"
87 return ""
88 return "image-live"
89
90IMAGE_TYPE_live = "${@build_live(d)}"
91
92inherit ${IMAGE_TYPE_live}
93IMAGE_TYPE_vmdk = '${@base_contains("IMAGE_FSTYPES", "vmdk", "image-vmdk", "", d)}'
94inherit ${IMAGE_TYPE_vmdk}
95
96python () {
97 deps = " " + imagetypes_getdepends(d)
98 d.appendVarFlag('do_rootfs', 'depends', deps)
99
100 deps = ""
101 for dep in (d.getVar('EXTRA_IMAGEDEPENDS', True) or "").split():
102 deps += " %s:do_populate_sysroot" % dep
103 d.appendVarFlag('do_build', 'depends', deps)
104
105 #process IMAGE_FEATURES, we must do this before runtime_mapping_rename
106 #Check for replaces image features
107 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
108 remain_features = features.copy()
109 for feature in features:
110 replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature, True) or "").split())
111 remain_features -= replaces
112
113 #Check for conflict image features
114 for feature in remain_features:
115 conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature, True) or "").split())
116 temp = conflicts & remain_features
117 if temp:
118 bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN', True), feature, ' '.join(list(temp))))
119
120 d.setVar('IMAGE_FEATURES', ' '.join(list(remain_features)))
121
122 # Ensure we have the vendor list for complementary package handling
123 ml_vendor_list = ""
124 multilibs = d.getVar('MULTILIBS', True) or ""
125 for ext in multilibs.split():
126 eext = ext.split(':')
127 if len(eext) > 1 and eext[0] == 'multilib':
128 localdata = bb.data.createCopy(d)
129 vendor = localdata.getVar("TARGET_VENDOR_virtclass-multilib-" + eext[1], False)
130 ml_vendor_list += " " + vendor
131 d.setVar('MULTILIB_VENDORS', ml_vendor_list)
132
133 check_image_features(d)
134 initramfs_image = d.getVar('INITRAMFS_IMAGE', True) or ""
135 if initramfs_image != "":
136 d.appendVarFlag('do_build', 'depends', " %s:do_bundle_initramfs" % d.getVar('PN', True))
137 d.appendVarFlag('do_bundle_initramfs', 'depends', " %s:do_rootfs" % initramfs_image)
138}
139
140IMAGE_CLASSES ?= "image_types"
141inherit ${IMAGE_CLASSES}
142
143IMAGE_POSTPROCESS_COMMAND ?= ""
144MACHINE_POSTPROCESS_COMMAND ?= ""
145# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks is enabled
146ROOTFS_POSTPROCESS_COMMAND += '${@base_contains("IMAGE_FEATURES", "debug-tweaks", "ssh_allow_empty_password; ", "",d)}'
147# Enable postinst logging if debug-tweaks is enabled
148ROOTFS_POSTPROCESS_COMMAND += '${@base_contains("IMAGE_FEATURES", "debug-tweaks", "postinst_enable_logging; ", "",d)}'
149# Write manifest
150IMAGE_MANIFEST = "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest"
151ROOTFS_POSTPROCESS_COMMAND =+ "write_image_manifest ; "
152# Set default postinst log file
153POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
154# Set default target for systemd images
155SYSTEMD_DEFAULT_TARGET ?= '${@base_contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
156ROOTFS_POSTPROCESS_COMMAND += '${@base_contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; ", "", d)}'
157
158# some default locales
159IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
160
161LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS', True).split()))}"
162
163PSEUDO_PASSWD = "${IMAGE_ROOTFS}"
164
165do_rootfs[dirs] = "${TOPDIR}"
166do_rootfs[lockfiles] += "${IMAGE_ROOTFS}.lock"
167do_rootfs[cleandirs] += "${S}"
168
169# Must call real_do_rootfs() from inside here, rather than as a separate
170# task, so that we have a single fakeroot context for the whole process.
171do_rootfs[umask] = "022"
172
173# A hook function to support read-only-rootfs IMAGE_FEATURES
174# Currently, it only supports sysvinit system.
175read_only_rootfs_hook () {
176 if ${@base_contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
177 # Tweak the mount option and fs_passno for rootfs in fstab
178 sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
179 # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
180 if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
181 sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS
182 fi
183 # Run populate-volatile.sh at rootfs time to set up basic files
184 # and directories to support read-only rootfs.
185 if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
186 ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
187 fi
188 # If we're using openssh and the /etc/ssh directory has no pre-generated keys,
189 # we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
190 # and the keys under /var/run/ssh.
191 if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
192 if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
193 echo "SYSCONFDIR=/etc/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
194 echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
195 else
196 echo "SYSCONFDIR=/var/run/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
197 echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
198 fi
199 fi
200 fi
201}
202
203PACKAGE_EXCLUDE ??= ""
204PACKAGE_EXCLUDE[type] = "list"
205
206python rootfs_process_ignore() {
207 excl_pkgs = d.getVar("PACKAGE_EXCLUDE", True).split()
208 inst_pkgs = d.getVar("PACKAGE_INSTALL", True).split()
209 inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY", True).split()
210
211 d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
212 d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
213
214 for pkg in excl_pkgs:
215 if pkg in inst_pkgs:
216 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs))
217 inst_pkgs.remove(pkg)
218
219 if pkg in inst_attempt_pkgs:
220 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs))
221 inst_attempt_pkgs.remove(pkg)
222
223 d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
224 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
225}
226do_rootfs[prefuncs] += "rootfs_process_ignore"
227
228# We have to delay the runtime_mapping_rename until just before rootfs runs
229# otherwise, the multilib renaming could step in and squash any fixups that
230# may have occurred.
231python rootfs_runtime_mapping() {
232 pn = d.getVar('PN', True)
233 runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
234 runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
235 runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
236}
237do_rootfs[prefuncs] += "rootfs_runtime_mapping"
238
239fakeroot python do_rootfs () {
240 from oe.rootfs import create_rootfs
241 from oe.image import create_image
242 from oe.manifest import create_manifest
243
244 # generate the initial manifest
245 create_manifest(d)
246
247 # generate rootfs
248 create_rootfs(d)
249
250 # generate final images
251 create_image(d)
252}
253
254insert_feed_uris () {
255
256 echo "Building feeds for [${DISTRO}].."
257
258 for line in ${FEED_URIS}
259 do
260 # strip leading and trailing spaces/tabs, then split into name and uri
261 line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`"
262 feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`"
263 feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`"
264
265 echo "Added $feed_name feed with URL $feed_uri"
266
267 # insert new feed-sources
268 echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/opkg/${feed_name}-feed.conf
269 done
270}
271
272MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|${sysconfdir}|${nonarch_base_libdir}/udev|/lib/modules/[^/]*/modules.*|"
273MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
274MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
275
276# This function is intended to disallow empty root password if 'debug-tweaks' is not in IMAGE_FEATURES.
277zap_empty_root_password () {
278 if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then
279 sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow
280 elif [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
281 sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
282 fi
283}
284
285# allow dropbear/openssh to accept root logins and logins from accounts with an empty password string
286ssh_allow_empty_password () {
287 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then
288 sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
289 sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
290 fi
291
292 if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
293 if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
294 if ! grep -q "DROPBEAR_EXTRA_ARGS=.*-B" ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear ; then
295 sed -i 's/^DROPBEAR_EXTRA_ARGS="*\([^"]*\)"*/DROPBEAR_EXTRA_ARGS="\1 -B"/' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
296 fi
297 else
298 printf '\nDROPBEAR_EXTRA_ARGS="-B"\n' >> ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
299 fi
300 fi
301
302 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then
303 sed -i 's/nullok_secure/nullok/' ${IMAGE_ROOTFS}${sysconfdir}/pam.d/*
304 fi
305}
306
307# Disable DNS lookups, the SSH_DISABLE_DNS_LOOKUP can be overridden to allow
308# distros to choose not to take this change
309SSH_DISABLE_DNS_LOOKUP ?= " ssh_disable_dns_lookup ; "
310ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}"
311ssh_disable_dns_lookup () {
312 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then
313 sed -i -e 's:#UseDNS yes:UseDNS no:' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
314 fi
315}
316
317# Enable postinst logging if debug-tweaks is enabled
318postinst_enable_logging () {
319 mkdir -p ${IMAGE_ROOTFS}${sysconfdir}/default
320 echo "POSTINST_LOGGING=1" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
321 echo "LOGFILE=${POSTINST_LOGFILE}" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
322}
323
324# Modify systemd default target
325set_systemd_default_target () {
326 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ]; then
327 ln -sf ${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
328 fi
329}
330
331# Turn any symbolic /sbin/init link into a file
332remove_init_link () {
333 if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
334 LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
335 rm ${IMAGE_ROOTFS}/sbin/init
336 cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
337 fi
338}
339
340make_zimage_symlink_relative () {
341 if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
342 (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
343 fi
344}
345
346python write_image_manifest () {
347 from oe.rootfs import image_list_installed_packages
348 with open(d.getVar('IMAGE_MANIFEST', True), 'w+') as image_manifest:
349 image_manifest.write(image_list_installed_packages(d, 'ver'))
350}
351
352# Make login manager(s) enable automatic login.
353# Useful for devices where we do not want to log in at all (e.g. phones)
354set_image_autologin () {
355 sed -i 's%^AUTOLOGIN=\"false"%AUTOLOGIN="true"%g' ${IMAGE_ROOTFS}/etc/sysconfig/gpelogin
356}
357
358# Can be use to create /etc/timestamp during image construction to give a reasonably
359# sane default time setting
360rootfs_update_timestamp () {
361 date -u +%4Y%2m%2d%2H%2M >${IMAGE_ROOTFS}/etc/timestamp
362}
363
364# Prevent X from being started
365rootfs_no_x_startup () {
366 if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then
367 chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm
368 fi
369}
370
371rootfs_trim_schemas () {
372 for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas
373 do
374 # Need this in case no files exist
375 if [ -e $schema ]; then
376 oe-trim-schemas $schema > $schema.new
377 mv $schema.new $schema
378 fi
379 done
380}
381
382# Make any absolute links in a sysroot relative
383rootfs_sysroot_relativelinks () {
384 sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT}
385}
386
387do_fetch[noexec] = "1"
388do_unpack[noexec] = "1"
389do_patch[noexec] = "1"
390do_configure[noexec] = "1"
391do_compile[noexec] = "1"
392do_install[noexec] = "1"
393do_populate_sysroot[noexec] = "1"
394do_package[noexec] = "1"
395do_packagedata[noexec] = "1"
396do_package_write_ipk[noexec] = "1"
397do_package_write_deb[noexec] = "1"
398do_package_write_rpm[noexec] = "1"
399
400addtask rootfs before do_build
401# Allow the kernel to be repacked with the initramfs and boot image file as a single file
402do_bundle_initramfs[depends] += "virtual/kernel:do_bundle_initramfs"
403do_bundle_initramfs[nostamp] = "1"
404do_bundle_initramfs[noexec] = "1"
405do_bundle_initramfs () {
406 :
407}
408addtask bundle_initramfs after do_rootfs
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
new file mode 100644
index 0000000000..f38e3b6a11
--- /dev/null
+++ b/meta/classes/image_types.bbclass
@@ -0,0 +1,154 @@
1
2# The default aligment of the size of the rootfs is set to 1KiB. In case
3# you're using the SD card emulation of a QEMU system simulator you may
4# set this value to 2048 (2MiB alignment).
5IMAGE_ROOTFS_ALIGNMENT ?= "1"
6
7def imagetypes_getdepends(d):
8 def adddep(depstr, deps):
9 for i in (depstr or "").split():
10 if i not in deps:
11 deps.append(i)
12
13 deps = []
14 ctypes = d.getVar('COMPRESSIONTYPES', True).split()
15 for type in (d.getVar('IMAGE_FSTYPES', True) or "").split():
16 if type == "vmdk" or type == "live" or type == "iso" or type == "hddimg":
17 type = "ext3"
18 basetype = type
19 for ctype in ctypes:
20 if type.endswith("." + ctype):
21 basetype = type[:-len("." + ctype)]
22 adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps)
23 break
24 adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps)
25
26 depstr = ""
27 for dep in deps:
28 depstr += " " + dep + ":do_populate_sysroot"
29 return depstr
30
31
32XZ_COMPRESSION_LEVEL ?= "-e -6"
33XZ_INTEGRITY_CHECK ?= "crc32"
34XZ_THREADS ?= "-T 0"
35
36JFFS2_SUM_EXTRA_ARGS ?= ""
37IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 ${EXTRA_IMAGECMD}"
38
39IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cramfs ${EXTRA_IMAGECMD}"
40
41oe_mkext234fs () {
42 fstype=$1
43 extra_imagecmd=""
44
45 if [ $# -gt 1 ]; then
46 shift
47 extra_imagecmd=$@
48 fi
49
50 # Create a sparse image block
51 dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$fstype seek=$ROOTFS_SIZE count=0 bs=1k
52 mkfs.$fstype -F $extra_imagecmd ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$fstype -d ${IMAGE_ROOTFS}
53}
54
55IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
56IMAGE_CMD_ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
57IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
58
59IMAGE_CMD_btrfs () {
60 touch ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
61 mkfs.btrfs -b `expr ${ROOTFS_SIZE} \* 1024` ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
62}
63
64IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs ${EXTRA_IMAGECMD} -noappend"
65IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
66IMAGE_CMD_tar = "tar -cvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.tar -C ${IMAGE_ROOTFS} ."
67
68IMAGE_CMD_cpio () {
69 (cd ${IMAGE_ROOTFS} && find . | cpio -o -H newc >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio)
70 if [ ! -e ${IMAGE_ROOTFS}/init ]; then
71 mkdir -p ${WORKDIR}/cpio_append
72 touch ${WORKDIR}/cpio_append/init
73 (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio)
74 fi
75}
76
77ELF_KERNEL ?= "${STAGING_DIR_HOST}/usr/src/kernel/${KERNEL_IMAGETYPE}"
78ELF_APPEND ?= "ramdisk_size=32768 root=/dev/ram0 rw console="
79
80IMAGE_CMD_elf () {
81 test -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf && rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf
82 mkelfImage --kernel=${ELF_KERNEL} --initrd=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio.gz --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD}
83}
84IMAGE_TYPEDEP_elf = "cpio.gz"
85
86UBI_VOLNAME ?= "${MACHINE}-rootfs"
87
88IMAGE_CMD_ubi () {
89 echo \[ubifs\] > ubinize.cfg
90 echo mode=ubi >> ubinize.cfg
91 echo image=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs >> ubinize.cfg
92 echo vol_id=0 >> ubinize.cfg
93 echo vol_type=dynamic >> ubinize.cfg
94 echo vol_name=${UBI_VOLNAME} >> ubinize.cfg
95 echo vol_flags=autoresize >> ubinize.cfg
96 mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS} && ubinize -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubi ${UBINIZE_ARGS} ubinize.cfg
97}
98IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS}"
99
100EXTRA_IMAGECMD = ""
101
102inherit siteinfo
103JFFS2_ENDIANNESS ?= "${@base_conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
104JFFS2_ERASEBLOCK ?= "0x40000"
105EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
106
107# Change these if you want default mkfs behavior (i.e. create minimal inode number)
108EXTRA_IMAGECMD_ext2 ?= "-i 4096"
109EXTRA_IMAGECMD_ext3 ?= "-i 4096"
110EXTRA_IMAGECMD_ext4 ?= "-i 4096"
111EXTRA_IMAGECMD_btrfs ?= ""
112EXTRA_IMAGECMD_elf ?= ""
113
114IMAGE_DEPENDS = ""
115IMAGE_DEPENDS_jffs2 = "mtd-utils-native"
116IMAGE_DEPENDS_cramfs = "util-linux-native"
117IMAGE_DEPENDS_ext2 = "e2fsprogs-native"
118IMAGE_DEPENDS_ext3 = "e2fsprogs-native"
119IMAGE_DEPENDS_ext4 = "e2fsprogs-native"
120IMAGE_DEPENDS_btrfs = "btrfs-tools-native"
121IMAGE_DEPENDS_squashfs = "squashfs-tools-native"
122IMAGE_DEPENDS_squashfs-xz = "squashfs-tools-native"
123IMAGE_DEPENDS_elf = "virtual/kernel mkelfimage-native"
124IMAGE_DEPENDS_ubi = "mtd-utils-native"
125IMAGE_DEPENDS_ubifs = "mtd-utils-native"
126
127# This variable is available to request which values are suitable for IMAGE_FSTYPES
128IMAGE_TYPES = "jffs2 jffs2.sum cramfs ext2 ext2.gz ext2.bz2 ext3 ext3.gz ext2.lzma btrfs iso hddimg squashfs squashfs-xz ubi ubifs tar tar.gz tar.bz2 tar.xz tar.lz4 cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 vmdk elf"
129
130COMPRESSIONTYPES = "gz bz2 lzma xz lz4 sum"
131COMPRESS_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}.rootfs.${type}"
132COMPRESS_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.gz"
133COMPRESS_CMD_bz2 = "bzip2 -f -k ${IMAGE_NAME}.rootfs.${type}"
134COMPRESS_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.xz"
135COMPRESS_CMD_lz4 = "lz4c -9 -c ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.lz4"
136COMPRESS_CMD_sum = "sumtool -i ${IMAGE_NAME}.rootfs.${type} -o ${IMAGE_NAME}.rootfs.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
137COMPRESS_DEPENDS_lzma = "xz-native"
138COMPRESS_DEPENDS_gz = ""
139COMPRESS_DEPENDS_bz2 = ""
140COMPRESS_DEPENDS_xz = "xz-native"
141COMPRESS_DEPENDS_lz4 = "lz4-native"
142COMPRESS_DEPENDS_sum = "mtd-utils-native"
143
144RUNNABLE_IMAGE_TYPES ?= "ext2 ext3"
145RUNNABLE_MACHINE_PATTERNS ?= "qemu"
146
147DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
148
149# Use IMAGE_EXTENSION_xxx to map image type 'xxx' with real image file extension name(s) for Hob
150IMAGE_EXTENSION_live = "hddimg iso"
151
152# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
153# images that will not be built at do_rootfs time: vmdk, hddimg, iso, etc.
154IMAGE_TYPES_MASKED ?= ""
diff --git a/meta/classes/image_types_uboot.bbclass b/meta/classes/image_types_uboot.bbclass
new file mode 100644
index 0000000000..07837b566c
--- /dev/null
+++ b/meta/classes/image_types_uboot.bbclass
@@ -0,0 +1,23 @@
1inherit image_types kernel-arch
2
3oe_mkimage () {
4 mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C $2 -n ${IMAGE_NAME} \
5 -d ${DEPLOY_DIR_IMAGE}/$1 ${DEPLOY_DIR_IMAGE}/$1.u-boot
6}
7
8COMPRESSIONTYPES += "gz.u-boot bz2.u-boot lzma.u-boot u-boot"
9
10COMPRESS_DEPENDS_u-boot = "u-boot-mkimage-native"
11COMPRESS_CMD_u-boot = "oe_mkimage ${IMAGE_NAME}.rootfs.${type} none"
12
13COMPRESS_DEPENDS_gz.u-boot = "u-boot-mkimage-native"
14COMPRESS_CMD_gz.u-boot = "${COMPRESS_CMD_gz}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.gz gzip"
15
16COMPRESS_DEPENDS_bz2.u-boot = "u-boot-mkimage-native"
17COMPRESS_CMD_bz2.u-boot = "${COMPRESS_CMD_bz2}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.bz2 bzip2"
18
19COMPRESS_DEPENDS_lzma.u-boot = "u-boot-mkimage-native"
20COMPRESS_CMD_lzma.u-boot = "${COMPRESS_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma"
21
22IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot"
23
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
new file mode 100644
index 0000000000..9ce336415a
--- /dev/null
+++ b/meta/classes/insane.bbclass
@@ -0,0 +1,1005 @@
1# BB Class inspired by ebuild.sh
2#
3# This class will test files after installation for certain
4# security issues and other kind of issues.
5#
6# Checks we do:
7# -Check the ownership and permissions
8# -Check the RUNTIME path for the $TMPDIR
9# -Check if .la files wrongly point to workdir
10# -Check if .pc files wrongly point to workdir
11# -Check if packages contains .debug directories or .so files
12# where they should be in -dev or -dbg
13# -Check if config.log contains traces to broken autoconf tests
14# -Ensure that binaries in base_[bindir|sbindir|libdir] do not link
15# into exec_prefix
16# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
17# files under exec_prefix
18
19
20PACKAGE_DEPENDS += "${QADEPENDS}"
21PACKAGEFUNCS += " do_package_qa "
22
23# unsafe-references-in-binaries requires prelink-rtld from
24# prelink-native, but we don't want this DEPENDS for -native builds
25QADEPENDS = "prelink-native"
26QADEPENDS_class-native = ""
27QADEPENDS_class-nativesdk = ""
28QA_SANE = "True"
29
30# Elect whether a given type of error is a warning or error, they may
31# have been set by other files.
32WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \
33 textrel already-stripped incompatible-license files-invalid \
34 installed-vs-shipped compile-host-path install-host-path \
35 pn-overrides infodir \
36 "
37ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
38 perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
39 split-strip packages-list pkgv-undefined var-undefined \
40 version-going-backwards \
41 "
42
43ALL_QA = "${WARN_QA} ${ERROR_QA}"
44
45UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot"
46
47#
48# dictionary for elf headers
49#
50# feel free to add and correct.
51#
52# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
53def package_qa_get_machine_dict():
54 return {
55 "darwin9" : {
56 "arm" : (40, 0, 0, True, 32),
57 },
58 "linux" : {
59 "aarch64" : (183, 0, 0, True, 64),
60 "aarch64_be" :(183, 0, 0, False, 64),
61 "arm" : (40, 97, 0, True, 32),
62 "armeb": (40, 97, 0, False, 32),
63 "powerpc": (20, 0, 0, False, 32),
64 "powerpc64": (21, 0, 0, False, 64),
65 "i386": ( 3, 0, 0, True, 32),
66 "i486": ( 3, 0, 0, True, 32),
67 "i586": ( 3, 0, 0, True, 32),
68 "i686": ( 3, 0, 0, True, 32),
69 "x86_64": (62, 0, 0, True, 64),
70 "ia64": (50, 0, 0, True, 64),
71 "alpha": (36902, 0, 0, True, 64),
72 "hppa": (15, 3, 0, False, 32),
73 "m68k": ( 4, 0, 0, False, 32),
74 "mips": ( 8, 0, 0, False, 32),
75 "mipsel": ( 8, 0, 0, True, 32),
76 "mips64": ( 8, 0, 0, False, 64),
77 "mips64el": ( 8, 0, 0, True, 64),
78 "s390": (22, 0, 0, False, 32),
79 "sh4": (42, 0, 0, True, 32),
80 "sparc": ( 2, 0, 0, False, 32),
81 "microblaze": (189, 0, 0, False, 32),
82 "microblazeel":(189, 0, 0, True, 32),
83 },
84 "linux-uclibc" : {
85 "arm" : ( 40, 97, 0, True, 32),
86 "armeb": ( 40, 97, 0, False, 32),
87 "powerpc": ( 20, 0, 0, False, 32),
88 "i386": ( 3, 0, 0, True, 32),
89 "i486": ( 3, 0, 0, True, 32),
90 "i586": ( 3, 0, 0, True, 32),
91 "i686": ( 3, 0, 0, True, 32),
92 "x86_64": ( 62, 0, 0, True, 64),
93 "mips": ( 8, 0, 0, False, 32),
94 "mipsel": ( 8, 0, 0, True, 32),
95 "mips64": ( 8, 0, 0, False, 64),
96 "mips64el": ( 8, 0, 0, True, 64),
97 "avr32": (6317, 0, 0, False, 32),
98 "sh4": (42, 0, 0, True, 32),
99
100 },
101 "uclinux-uclibc" : {
102 "bfin": ( 106, 0, 0, True, 32),
103 },
104 "linux-gnueabi" : {
105 "arm" : (40, 0, 0, True, 32),
106 "armeb" : (40, 0, 0, False, 32),
107 },
108 "linux-uclibceabi" : {
109 "arm" : (40, 0, 0, True, 32),
110 "armeb" : (40, 0, 0, False, 32),
111 },
112 "linux-gnuspe" : {
113 "powerpc": (20, 0, 0, False, 32),
114 },
115 "linux-uclibcspe" : {
116 "powerpc": (20, 0, 0, False, 32),
117 },
118 "linux-gnu" : {
119 "powerpc": (20, 0, 0, False, 32),
120 "sh4": (42, 0, 0, True, 32),
121 },
122 "linux-gnux32" : {
123 "x86_64": (62, 0, 0, True, 32),
124 },
125 "linux-gnun32" : {
126 "mips64": ( 8, 0, 0, False, 32),
127 "mips64el": ( 8, 0, 0, True, 32),
128 },
129 }
130
131
132def package_qa_clean_path(path,d):
133 """ Remove the common prefix from the path. In this case it is the TMPDIR"""
134 return path.replace(d.getVar('TMPDIR',True),"")
135
136def package_qa_write_error(error, d):
137 logfile = d.getVar('QA_LOGFILE', True)
138 if logfile:
139 p = d.getVar('P', True)
140 f = file( logfile, "a+")
141 print >> f, "%s: %s" % (p, error)
142 f.close()
143
144def package_qa_handle_error(error_class, error_msg, d):
145 package_qa_write_error(error_msg, d)
146 if error_class in (d.getVar("ERROR_QA", True) or "").split():
147 bb.error("QA Issue: %s" % error_msg)
148 d.setVar("QA_SANE", False)
149 return False
150 elif error_class in (d.getVar("WARN_QA", True) or "").split():
151 bb.warn("QA Issue: %s" % error_msg)
152 else:
153 bb.note("QA Issue: %s" % error_msg)
154 return True
155
156QAPATHTEST[libexec] = "package_qa_check_libexec"
157def package_qa_check_libexec(path,name, d, elf, messages):
158
159 # Skip the case where the default is explicitly /usr/libexec
160 libexec = d.getVar('libexecdir', True)
161 if libexec == "/usr/libexec":
162 return True
163
164 if 'libexec' in path.split(os.path.sep):
165 messages.append("%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
166 return False
167
168 return True
169
170QAPATHTEST[rpaths] = "package_qa_check_rpath"
171def package_qa_check_rpath(file,name, d, elf, messages):
172 """
173 Check for dangerous RPATHs
174 """
175 if not elf:
176 return
177
178 if os.path.islink(file):
179 return
180
181 bad_dirs = [d.getVar('BASE_WORKDIR', True), d.getVar('STAGING_DIR_TARGET', True)]
182
183 phdrs = elf.run_objdump("-p", d)
184
185 import re
186 rpath_re = re.compile("\s+RPATH\s+(.*)")
187 for line in phdrs.split("\n"):
188 m = rpath_re.match(line)
189 if m:
190 rpath = m.group(1)
191 for dir in bad_dirs:
192 if dir in rpath:
193 messages.append("package %s contains bad RPATH %s in file %s" % (name, rpath, file))
194
195QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
196def package_qa_check_useless_rpaths(file, name, d, elf, messages):
197 """
198 Check for RPATHs that are useless but not dangerous
199 """
200 def rpath_eq(a, b):
201 return os.path.normpath(a) == os.path.normpath(b)
202
203 if not elf:
204 return
205
206 if os.path.islink(file):
207 return
208
209 libdir = d.getVar("libdir", True)
210 base_libdir = d.getVar("base_libdir", True)
211
212 phdrs = elf.run_objdump("-p", d)
213
214 import re
215 rpath_re = re.compile("\s+RPATH\s+(.*)")
216 for line in phdrs.split("\n"):
217 m = rpath_re.match(line)
218 if m:
219 rpath = m.group(1)
220 if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
221 # The dynamic linker searches both these places anyway. There is no point in
222 # looking there again.
223 messages.append("%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
224
225QAPATHTEST[dev-so] = "package_qa_check_dev"
226def package_qa_check_dev(path, name, d, elf, messages):
227 """
228 Check for ".so" library symlinks in non-dev packages
229 """
230
231 if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
232 messages.append("non -dev/-dbg/-nativesdk package contains symlink .so: %s path '%s'" % \
233 (name, package_qa_clean_path(path,d)))
234
235QAPATHTEST[staticdev] = "package_qa_check_staticdev"
236def package_qa_check_staticdev(path, name, d, elf, messages):
237 """
238 Check for ".a" library in non-staticdev packages
239 There are a number of exceptions to this rule, -pic packages can contain
240 static libraries, the _nonshared.a belong with their -dev packages and
241 libgcc.a, libgcov.a will be skipped in their packages
242 """
243
244 if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a"):
245 messages.append("non -staticdev package contains static .a library: %s path '%s'" % \
246 (name, package_qa_clean_path(path,d)))
247
248def package_qa_check_libdir(d):
249 """
250 Check for wrong library installation paths. For instance, catch
251 recipes installing /lib/bar.so when ${base_libdir}="lib32" or
252 installing in /usr/lib64 when ${libdir}="/usr/lib"
253 """
254 import re
255
256 pkgdest = d.getVar('PKGDEST', True)
257 base_libdir = d.getVar("base_libdir",True) + os.sep
258 libdir = d.getVar("libdir", True) + os.sep
259 exec_prefix = d.getVar("exec_prefix", True) + os.sep
260
261 messages = []
262
263 lib_re = re.compile("^/lib.+\.so(\..+)?$")
264 exec_re = re.compile("^%s.*/lib.+\.so(\..+)?$" % exec_prefix)
265
266 for root, dirs, files in os.walk(pkgdest):
267 if root == pkgdest:
268 # Skip subdirectories for any packages with libdir in INSANE_SKIP
269 skippackages = []
270 for package in dirs:
271 if 'libdir' in (d.getVar('INSANE_SKIP_' + package, True) or "").split():
272 bb.note("Package %s skipping libdir QA test" % (package))
273 skippackages.append(package)
274 for package in skippackages:
275 dirs.remove(package)
276 for file in files:
277 full_path = os.path.join(root, file)
278 rel_path = os.path.relpath(full_path, pkgdest)
279 if os.sep in rel_path:
280 package, rel_path = rel_path.split(os.sep, 1)
281 rel_path = os.sep + rel_path
282 if lib_re.match(rel_path):
283 if base_libdir not in rel_path:
284 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
285 if exec_re.match(rel_path):
286 if libdir not in rel_path:
287 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
288
289 if messages:
290 package_qa_handle_error("libdir", "\n".join(messages), d)
291
292QAPATHTEST[debug-files] = "package_qa_check_dbg"
293def package_qa_check_dbg(path, name, d, elf, messages):
294 """
295 Check for ".debug" files or directories outside of the dbg package
296 """
297
298 if not "-dbg" in name and not "-ptest" in name:
299 if '.debug' in path.split(os.path.sep):
300 messages.append("non debug package contains .debug directory: %s path %s" % \
301 (name, package_qa_clean_path(path,d)))
302
303QAPATHTEST[perms] = "package_qa_check_perm"
304def package_qa_check_perm(path,name,d, elf, messages):
305 """
306 Check the permission of files
307 """
308 return
309
310QAPATHTEST[unsafe-references-in-binaries] = "package_qa_check_unsafe_references_in_binaries"
311def package_qa_check_unsafe_references_in_binaries(path, name, d, elf, messages):
312 """
313 Ensure binaries in base_[bindir|sbindir|libdir] do not link to files under exec_prefix
314 """
315 if unsafe_references_skippable(path, name, d):
316 return
317
318 if elf:
319 import subprocess as sub
320 pn = d.getVar('PN', True)
321
322 exec_prefix = d.getVar('exec_prefix', True)
323 sysroot_path = d.getVar('STAGING_DIR_TARGET', True)
324 sysroot_path_usr = sysroot_path + exec_prefix
325
326 try:
327 ldd_output = bb.process.Popen(["prelink-rtld", "--root", sysroot_path, path], stdout=sub.PIPE).stdout.read()
328 except bb.process.CmdError:
329 error_msg = pn + ": prelink-rtld aborted when processing %s" % path
330 package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
331 return False
332
333 if sysroot_path_usr in ldd_output:
334 ldd_output = ldd_output.replace(sysroot_path, "")
335
336 pkgdest = d.getVar('PKGDEST', True)
337 packages = d.getVar('PACKAGES', True)
338
339 for package in packages.split():
340 short_path = path.replace('%s/%s' % (pkgdest, package), "", 1)
341 if (short_path != path):
342 break
343
344 base_err = pn + ": %s, installed in the base_prefix, requires a shared library under exec_prefix (%s)" % (short_path, exec_prefix)
345 for line in ldd_output.split('\n'):
346 if exec_prefix in line:
347 error_msg = "%s: %s" % (base_err, line.strip())
348 package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
349
350 return False
351
352QAPATHTEST[unsafe-references-in-scripts] = "package_qa_check_unsafe_references_in_scripts"
353def package_qa_check_unsafe_references_in_scripts(path, name, d, elf, messages):
354 """
355 Warn if scripts in base_[bindir|sbindir|libdir] reference files under exec_prefix
356 """
357 if unsafe_references_skippable(path, name, d):
358 return
359
360 if not elf:
361 import stat
362 import subprocess
363 pn = d.getVar('PN', True)
364
365 # Ensure we're checking an executable script
366 statinfo = os.stat(path)
367 if bool(statinfo.st_mode & stat.S_IXUSR):
368 # grep shell scripts for possible references to /exec_prefix/
369 exec_prefix = d.getVar('exec_prefix', True)
370 statement = "grep -e '%s/' %s > /dev/null" % (exec_prefix, path)
371 if subprocess.call(statement, shell=True) == 0:
372 error_msg = pn + ": Found a reference to %s/ in %s" % (exec_prefix, path)
373 package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
374 error_msg = "Shell scripts in base_bindir and base_sbindir should not reference anything in exec_prefix"
375 package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
376
377def unsafe_references_skippable(path, name, d):
378 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d):
379 return True
380
381 if "-dbg" in name or "-dev" in name:
382 return True
383
384 # Other package names to skip:
385 if name.startswith("kernel-module-"):
386 return True
387
388 # Skip symlinks
389 if os.path.islink(path):
390 return True
391
392 # Skip unusual rootfs layouts which make these tests irrelevant
393 exec_prefix = d.getVar('exec_prefix', True)
394 if exec_prefix == "":
395 return True
396
397 pkgdest = d.getVar('PKGDEST', True)
398 pkgdest = pkgdest + "/" + name
399 pkgdest = os.path.abspath(pkgdest)
400 base_bindir = pkgdest + d.getVar('base_bindir', True)
401 base_sbindir = pkgdest + d.getVar('base_sbindir', True)
402 base_libdir = pkgdest + d.getVar('base_libdir', True)
403 bindir = pkgdest + d.getVar('bindir', True)
404 sbindir = pkgdest + d.getVar('sbindir', True)
405 libdir = pkgdest + d.getVar('libdir', True)
406
407 if base_bindir == bindir and base_sbindir == sbindir and base_libdir == libdir:
408 return True
409
410 # Skip files not in base_[bindir|sbindir|libdir]
411 path = os.path.abspath(path)
412 if not (base_bindir in path or base_sbindir in path or base_libdir in path):
413 return True
414
415 return False
416
417QAPATHTEST[arch] = "package_qa_check_arch"
418def package_qa_check_arch(path,name,d, elf, messages):
419 """
420 Check if archs are compatible
421 """
422 if not elf:
423 return
424
425 target_os = d.getVar('TARGET_OS', True)
426 target_arch = d.getVar('TARGET_ARCH', True)
427 provides = d.getVar('PROVIDES', True)
428 bpn = d.getVar('BPN', True)
429
430 # FIXME: Cross package confuse this check, so just skip them
431 for s in ['cross', 'nativesdk', 'cross-canadian']:
432 if bb.data.inherits_class(s, d):
433 return
434
435 # avoid following links to /usr/bin (e.g. on udev builds)
436 # we will check the files pointed to anyway...
437 if os.path.islink(path):
438 return
439
440 #if this will throw an exception, then fix the dict above
441 (machine, osabi, abiversion, littleendian, bits) \
442 = package_qa_get_machine_dict()[target_os][target_arch]
443
444 # Check the architecture and endiannes of the binary
445 if not ((machine == elf.machine()) or \
446 ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32"))):
447 messages.append("Architecture did not match (%d to %d) on %s" % \
448 (machine, elf.machine(), package_qa_clean_path(path,d)))
449 elif not ((bits == elf.abiSize()) or \
450 ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32"))):
451 messages.append("Bit size did not match (%d to %d) %s on %s" % \
452 (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
453 elif not littleendian == elf.isLittleEndian():
454 messages.append("Endiannes did not match (%d to %d) on %s" % \
455 (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
456
457QAPATHTEST[desktop] = "package_qa_check_desktop"
458def package_qa_check_desktop(path, name, d, elf, messages):
459 """
460 Run all desktop files through desktop-file-validate.
461 """
462 if path.endswith(".desktop"):
463 desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE',True),'desktop-file-validate')
464 output = os.popen("%s %s" % (desktop_file_validate, path))
465 # This only produces output on errors
466 for l in output:
467 messages.append("Desktop file issue: " + l.strip())
468
469QAPATHTEST[textrel] = "package_qa_textrel"
470def package_qa_textrel(path, name, d, elf, messages):
471 """
472 Check if the binary contains relocations in .text
473 """
474
475 if not elf:
476 return
477
478 if os.path.islink(path):
479 return
480
481 phdrs = elf.run_objdump("-p", d)
482 sane = True
483
484 import re
485 textrel_re = re.compile("\s+TEXTREL\s+")
486 for line in phdrs.split("\n"):
487 if textrel_re.match(line):
488 sane = False
489
490 if not sane:
491 messages.append("ELF binary '%s' has relocations in .text" % path)
492
493QAPATHTEST[ldflags] = "package_qa_hash_style"
494def package_qa_hash_style(path, name, d, elf, messages):
495 """
496 Check if the binary has the right hash style...
497 """
498
499 if not elf:
500 return
501
502 if os.path.islink(path):
503 return
504
505 gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS', True)
506 if not gnu_hash:
507 gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS', True)
508 if not gnu_hash:
509 return
510
511 sane = False
512 has_syms = False
513
514 phdrs = elf.run_objdump("-p", d)
515
516 # If this binary has symbols, we expect it to have GNU_HASH too.
517 for line in phdrs.split("\n"):
518 if "SYMTAB" in line:
519 has_syms = True
520 if "GNU_HASH" in line:
521 sane = True
522 if "[mips32]" in line or "[mips64]" in line:
523 sane = True
524
525 if has_syms and not sane:
526 messages.append("No GNU_HASH in the elf binary: '%s'" % path)
527
528
529QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
530def package_qa_check_buildpaths(path, name, d, elf, messages):
531 """
532 Check for build paths inside target files and error if not found in the whitelist
533 """
534 # Ignore .debug files, not interesting
535 if path.find(".debug") != -1:
536 return
537
538 # Ignore symlinks
539 if os.path.islink(path):
540 return
541
542 tmpdir = d.getVar('TMPDIR', True)
543 with open(path) as f:
544 file_content = f.read()
545 if tmpdir in file_content:
546 messages.append("File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d))
547
548
549QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
550def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
551 """
552 Check that all packages containing Xorg drivers have ABI dependencies
553 """
554
555 # Skip dev, dbg or nativesdk packages
556 if name.endswith("-dev") or name.endswith("-dbg") or name.startswith("nativesdk-"):
557 return
558
559 driverdir = d.expand("${libdir}/xorg/modules/drivers/")
560 if driverdir in path and path.endswith(".so"):
561 mlprefix = d.getVar('MLPREFIX', True) or ''
562 for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name, True) or ""):
563 if rdep.startswith("%sxorg-abi-" % mlprefix):
564 return
565 messages.append("Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
566
567QAPATHTEST[infodir] = "package_qa_check_infodir"
568def package_qa_check_infodir(path, name, d, elf, messages):
569 """
570 Check that /usr/share/info/dir isn't shipped in a particular package
571 """
572 infodir = d.expand("${infodir}/dir")
573
574 if infodir in path:
575 messages.append("The /usr/share/info/dir file is not meant to be shipped in a particular package.")
576
577QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
578def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
579 """
580 Check that the package doesn't contain any absolute symlinks to the sysroot.
581 """
582 if os.path.islink(path):
583 target = os.readlink(path)
584 if os.path.isabs(target):
585 tmpdir = d.getVar('TMPDIR', True)
586 if target.startswith(tmpdir):
587 trimmed = path.replace(os.path.join (d.getVar("PKGDEST", True), name), "")
588 messages.append("Symlink %s in %s points to TMPDIR" % (trimmed, name))
589
590def package_qa_check_license(workdir, d):
591 """
592 Check for changes in the license files
593 """
594 import tempfile
595 sane = True
596
597 lic_files = d.getVar('LIC_FILES_CHKSUM', True)
598 lic = d.getVar('LICENSE', True)
599 pn = d.getVar('PN', True)
600
601 if lic == "CLOSED":
602 return True
603
604 if not lic_files:
605 bb.error(pn + ": Recipe file does not have license file information (LIC_FILES_CHKSUM)")
606 return False
607
608 srcdir = d.getVar('S', True)
609
610 for url in lic_files.split():
611 try:
612 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
613 except bb.fetch.MalformedUrl:
614 raise bb.build.FuncFailed( pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
615 srclicfile = os.path.join(srcdir, path)
616 if not os.path.isfile(srclicfile):
617 raise bb.build.FuncFailed( pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile)
618
619 recipemd5 = parm.get('md5', '')
620 beginline, endline = 0, 0
621 if 'beginline' in parm:
622 beginline = int(parm['beginline'])
623 if 'endline' in parm:
624 endline = int(parm['endline'])
625
626 if (not beginline) and (not endline):
627 md5chksum = bb.utils.md5_file(srclicfile)
628 else:
629 fi = open(srclicfile, 'rb')
630 fo = tempfile.NamedTemporaryFile(mode='wb', prefix='poky.', suffix='.tmp', delete=False)
631 tmplicfile = fo.name;
632 lineno = 0
633 linesout = 0
634 for line in fi:
635 lineno += 1
636 if (lineno >= beginline):
637 if ((lineno <= endline) or not endline):
638 fo.write(line)
639 linesout += 1
640 else:
641 break
642 fo.flush()
643 fo.close()
644 fi.close()
645 md5chksum = bb.utils.md5_file(tmplicfile)
646 os.unlink(tmplicfile)
647
648 if recipemd5 == md5chksum:
649 bb.note (pn + ": md5 checksum matched for ", url)
650 else:
651 if recipemd5:
652 bb.error(pn + ": md5 data is not matching for ", url)
653 bb.error(pn + ": The new md5 checksum is ", md5chksum)
654 if beginline:
655 if endline:
656 srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
657 else:
658 srcfiledesc = "%s (beginning on line %d)" % (srclicfile, beginline)
659 elif endline:
660 srcfiledesc = "%s (ending on line %d)" % (srclicfile, endline)
661 else:
662 srcfiledesc = srclicfile
663 bb.error(pn + ": Check if the license information has changed in %s to verify that the LICENSE value \"%s\" remains valid" % (srcfiledesc, lic))
664 else:
665 bb.error(pn + ": md5 checksum is not specified for ", url)
666 bb.error(pn + ": The md5 checksum is ", md5chksum)
667 sane = False
668
669 return sane
670
671def package_qa_check_staged(path,d):
672 """
673 Check staged la and pc files for sanity
674 -e.g. installed being false
675
676 As this is run after every stage we should be able
677 to find the one responsible for the errors easily even
678 if we look at every .pc and .la file
679 """
680
681 sane = True
682 tmpdir = d.getVar('TMPDIR', True)
683 workdir = os.path.join(tmpdir, "work")
684
685 installed = "installed=yes"
686 if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
687 pkgconfigcheck = workdir
688 else:
689 pkgconfigcheck = tmpdir
690
691 # find all .la and .pc files
692 # read the content
693 # and check for stuff that looks wrong
694 for root, dirs, files in os.walk(path):
695 for file in files:
696 path = os.path.join(root,file)
697 if file.endswith(".la"):
698 with open(path) as f:
699 file_content = f.read()
700 if workdir in file_content:
701 error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
702 sane = package_qa_handle_error("la", error_msg, d)
703 elif file.endswith(".pc"):
704 with open(path) as f:
705 file_content = f.read()
706 if pkgconfigcheck in file_content:
707 error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
708 sane = package_qa_handle_error("pkgconfig", error_msg, d)
709
710 return sane
711
712# Walk over all files in a directory and call func
713def package_qa_walk(path, warnfuncs, errorfuncs, skip, package, d):
714 import oe.qa
715
716 #if this will throw an exception, then fix the dict above
717 target_os = d.getVar('TARGET_OS', True)
718 target_arch = d.getVar('TARGET_ARCH', True)
719
720 warnings = []
721 errors = []
722 for path in pkgfiles[package]:
723 elf = oe.qa.ELFFile(path)
724 try:
725 elf.open()
726 except:
727 elf = None
728 for func in warnfuncs:
729 func(path, package, d, elf, warnings)
730 for func in errorfuncs:
731 func(path, package, d, elf, errors)
732
733 for w in warnings:
734 bb.warn("QA Issue: %s" % w)
735 package_qa_write_error(w, d)
736 for e in errors:
737 bb.error("QA Issue: %s" % e)
738 package_qa_write_error(e, d)
739
740 return len(errors) == 0
741
742def package_qa_check_rdepends(pkg, pkgdest, skip, d):
743 # Don't do this check for kernel/module recipes, there aren't too many debug/development
744 # packages and you can get false positives e.g. on kernel-module-lirc-dev
745 if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
746 return True
747
748 sane = True
749 if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg:
750 localdata = bb.data.createCopy(d)
751 localdata.setVar('OVERRIDES', pkg)
752 bb.data.update_data(localdata)
753
754 # Now check the RDEPENDS
755 rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS', True) or "")
756
757 # Now do the sanity check!!!
758 for rdepend in rdepends:
759 if "-dbg" in rdepend and "debug-deps" not in skip:
760 error_msg = "%s rdepends on %s" % (pkg,rdepend)
761 sane = package_qa_handle_error("debug-deps", error_msg, d)
762 if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
763 error_msg = "%s rdepends on %s" % (pkg, rdepend)
764 sane = package_qa_handle_error("dev-deps", error_msg, d)
765
766 return sane
767
768def package_qa_check_deps(pkg, pkgdest, skip, d):
769 sane = True
770
771 localdata = bb.data.createCopy(d)
772 localdata.setVar('OVERRIDES', pkg)
773 bb.data.update_data(localdata)
774
775 def check_valid_deps(var):
776 sane = True
777 try:
778 rvar = bb.utils.explode_dep_versions2(localdata.getVar(var, True) or "")
779 except ValueError as e:
780 bb.fatal("%s_%s: %s" % (var, pkg, e))
781 for dep in rvar:
782 for v in rvar[dep]:
783 if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
784 error_msg = "%s_%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
785 sane = package_qa_handle_error("dep-cmp", error_msg, d)
786 return sane
787
788 sane = True
789 if not check_valid_deps('RDEPENDS'):
790 sane = False
791 if not check_valid_deps('RRECOMMENDS'):
792 sane = False
793 if not check_valid_deps('RSUGGESTS'):
794 sane = False
795 if not check_valid_deps('RPROVIDES'):
796 sane = False
797 if not check_valid_deps('RREPLACES'):
798 sane = False
799 if not check_valid_deps('RCONFLICTS'):
800 sane = False
801
802 return sane
803
804# The PACKAGE FUNC to scan each package
805python do_package_qa () {
806 import subprocess
807
808 bb.note("DO PACKAGE QA")
809
810 logdir = d.getVar('T', True)
811 pkg = d.getVar('PN', True)
812
813 # Check the compile log for host contamination
814 compilelog = os.path.join(logdir,"log.do_compile")
815
816 if os.path.exists(compilelog):
817 statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
818 if subprocess.call(statement, shell=True) == 0:
819 msg = "%s: The compile log indicates that host include and/or library paths were used.\n \
820 Please check the log '%s' for more information." % (pkg, compilelog)
821 package_qa_handle_error("compile-host-path", msg, d)
822
823 # Check the install log for host contamination
824 installlog = os.path.join(logdir,"log.do_install")
825
826 if os.path.exists(installlog):
827 statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
828 if subprocess.call(statement, shell=True) == 0:
829 msg = "%s: The install log indicates that host include and/or library paths were used.\n \
830 Please check the log '%s' for more information." % (pkg, installlog)
831 package_qa_handle_error("install-host-path", msg, d)
832
833 # Scan the packages...
834 pkgdest = d.getVar('PKGDEST', True)
835 packages = d.getVar('PACKAGES', True)
836
837 # no packages should be scanned
838 if not packages:
839 return
840
841 testmatrix = d.getVarFlags("QAPATHTEST")
842 import re
843 # The package name matches the [a-z0-9.+-]+ regular expression
844 pkgname_pattern = re.compile("^[a-z0-9.+-]+$")
845
846 g = globals()
847 walk_sane = True
848 rdepends_sane = True
849 deps_sane = True
850 for package in packages.split():
851 skip = (d.getVar('INSANE_SKIP_' + package, True) or "").split()
852 if skip:
853 bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
854 warnchecks = []
855 for w in (d.getVar("WARN_QA", True) or "").split():
856 if w in skip:
857 continue
858 if w in testmatrix and testmatrix[w] in g:
859 warnchecks.append(g[testmatrix[w]])
860 errorchecks = []
861 for e in (d.getVar("ERROR_QA", True) or "").split():
862 if e in skip:
863 continue
864 if e in testmatrix and testmatrix[e] in g:
865 errorchecks.append(g[testmatrix[e]])
866
867 bb.note("Checking Package: %s" % package)
868 # Check package name
869 if not pkgname_pattern.match(package):
870 package_qa_handle_error("pkgname",
871 "%s doesn't match the [a-z0-9.+-]+ regex\n" % package, d)
872
873 path = "%s/%s" % (pkgdest, package)
874 if not package_qa_walk(path, warnchecks, errorchecks, skip, package, d):
875 walk_sane = False
876 if not package_qa_check_rdepends(package, pkgdest, skip, d):
877 rdepends_sane = False
878 if not package_qa_check_deps(package, pkgdest, skip, d):
879 deps_sane = False
880
881
882 if 'libdir' in d.getVar("ALL_QA", True).split():
883 package_qa_check_libdir(d)
884
885 qa_sane = d.getVar("QA_SANE", True)
886 if not walk_sane or not rdepends_sane or not deps_sane or not qa_sane:
887 bb.fatal("QA run found fatal errors. Please consider fixing them.")
888 bb.note("DONE with PACKAGE QA")
889}
890
891
892python do_qa_staging() {
893 bb.note("QA checking staging")
894
895 if not package_qa_check_staged(d.expand('${SYSROOT_DESTDIR}${STAGING_LIBDIR}'), d):
896 bb.fatal("QA staging was broken by the package built above")
897}
898
899python do_qa_configure() {
900 import subprocess
901
902 ###########################################################################
903 # Check config.log for cross compile issues
904 ###########################################################################
905
906 configs = []
907 workdir = d.getVar('WORKDIR', True)
908 bb.note("Checking autotools environment for common misconfiguration")
909 for root, dirs, files in os.walk(workdir):
910 statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % \
911 os.path.join(root,"config.log")
912 if "config.log" in files:
913 if subprocess.call(statement, shell=True) == 0:
914 bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
915Rerun configure task after fixing this. The path was '%s'""" % root)
916
917 if "configure.ac" in files:
918 configs.append(os.path.join(root,"configure.ac"))
919 if "configure.in" in files:
920 configs.append(os.path.join(root, "configure.in"))
921
922 ###########################################################################
923 # Check gettext configuration and dependencies are correct
924 ###########################################################################
925
926 cnf = d.getVar('EXTRA_OECONF', True) or ""
927 if "gettext" not in d.getVar('P', True) and "gcc-runtime" not in d.getVar('P', True) and "--disable-nls" not in cnf:
928 ml = d.getVar("MLPREFIX", True) or ""
929 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk', d):
930 gt = "gettext-native"
931 elif bb.data.inherits_class('cross-canadian', d):
932 gt = "nativesdk-gettext"
933 else:
934 gt = "virtual/" + ml + "gettext"
935 deps = bb.utils.explode_deps(d.getVar('DEPENDS', True) or "")
936 if gt not in deps:
937 for config in configs:
938 gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
939 if subprocess.call(gnu, shell=True) == 0:
940 bb.fatal("""%s required but not in DEPENDS for file %s.
941Missing inherit gettext?""" % (gt, config))
942
943 ###########################################################################
944 # Check license variables
945 ###########################################################################
946
947 if not package_qa_check_license(workdir, d):
948 bb.fatal("Licensing Error: LIC_FILES_CHKSUM does not match, please fix")
949
950 ###########################################################################
951 # Check unrecognised configure options (with a white list)
952 ###########################################################################
953 if bb.data.inherits_class("autotools", d):
954 bb.note("Checking configure output for unrecognised options")
955 try:
956 flag = "WARNING: unrecognized options:"
957 log = os.path.join(d.getVar('B', True), 'config.log')
958 output = subprocess.check_output(['grep', '-F', flag, log]).replace(', ', ' ')
959 options = set()
960 for line in output.splitlines():
961 options |= set(line.partition(flag)[2].split())
962 whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST", True).split())
963 options -= whitelist
964 if options:
965 pn = d.getVar('PN', True)
966 error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
967 package_qa_handle_error("unknown-configure-option", error_msg, d)
968 except subprocess.CalledProcessError:
969 pass
970}
971# The Staging Func, to check all staging
972#addtask qa_staging after do_populate_sysroot before do_build
973do_populate_sysroot[postfuncs] += "do_qa_staging "
974
975# Check broken config.log files, for packages requiring Gettext which don't
976# have it in DEPENDS and for correct LIC_FILES_CHKSUM
977#addtask qa_configure after do_configure before do_compile
978do_configure[postfuncs] += "do_qa_configure "
979
980python () {
981 tests = d.getVar('ALL_QA', True).split()
982 if "desktop" in tests:
983 d.appendVar("PACKAGE_DEPENDS", "desktop-file-utils-native")
984
985 ###########################################################################
986 # Check various variables
987 ###########################################################################
988
989 if d.getVar('do_stage', True) is not None:
990 bb.fatal("Legacy staging found for %s as it has a do_stage function. This will need conversion to a do_install or often simply removal to work with OE-core" % d.getVar("FILE", True))
991
992 overrides = d.getVar('OVERRIDES', True).split(':')
993 pn = d.getVar('PN', True)
994 if pn in overrides:
995 msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE", True), pn)
996 package_qa_handle_error("pn-overrides", msg, d)
997
998 issues = []
999 if (d.getVar('PACKAGES', True) or "").split():
1000 for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY':
1001 if d.getVar(var):
1002 issues.append(var)
1003 for i in issues:
1004 package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE", True), i), d)
1005}
diff --git a/meta/classes/insserv.bbclass b/meta/classes/insserv.bbclass
new file mode 100644
index 0000000000..14290a77e2
--- /dev/null
+++ b/meta/classes/insserv.bbclass
@@ -0,0 +1,5 @@
1do_rootfs[depends] += "insserv-native:do_populate_sysroot"
2run_insserv () {
3 insserv -p ${IMAGE_ROOTFS}/etc/init.d -c ${STAGING_ETCDIR_NATIVE}/insserv.conf
4}
5ROOTFS_POSTPROCESS_COMMAND += " run_insserv ; "
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
new file mode 100644
index 0000000000..bbcfa15b84
--- /dev/null
+++ b/meta/classes/kernel-arch.bbclass
@@ -0,0 +1,60 @@
1#
2# set the ARCH environment variable for kernel compilation (including
3# modules). return value must match one of the architecture directories
4# in the kernel source "arch" directory
5#
6
7valid_archs = "alpha cris ia64 \
8 i386 x86 \
9 m68knommu m68k ppc powerpc powerpc64 ppc64 \
10 sparc sparc64 \
11 arm aarch64 \
12 m32r mips \
13 sh sh64 um h8300 \
14 parisc s390 v850 \
15 avr32 blackfin \
16 microblaze"
17
18def map_kernel_arch(a, d):
19 import re
20
21 valid_archs = d.getVar('valid_archs', True).split()
22
23 if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
24 elif re.match('armeb$', a): return 'arm'
25 elif re.match('aarch64$', a): return 'arm64'
26 elif re.match('aarch64_be$', a): return 'arm64'
27 elif re.match('mips(el|64|64el)$', a): return 'mips'
28 elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
29 elif re.match('sh(3|4)$', a): return 'sh'
30 elif re.match('bfin', a): return 'blackfin'
31 elif re.match('microblazeel', a): return 'microblaze'
32 elif a in valid_archs: return a
33 else:
34 bb.error("cannot map '%s' to a linux kernel architecture" % a)
35
36export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH', True), d)}"
37
38def map_uboot_arch(a, d):
39 import re
40
41 if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
42 elif re.match('i.86$', a): return 'x86'
43 elif re.match('arm64$', a): return 'arm'
44 return a
45
46export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH', True), d)}"
47
48# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
49# specific options necessary for building the kernel and modules.
50TARGET_CC_KERNEL_ARCH ?= ""
51HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
52TARGET_LD_KERNEL_ARCH ?= ""
53HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
54TARGET_AR_KERNEL_ARCH ?= ""
55HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
56
57KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH}"
58KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
59KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
60
diff --git a/meta/classes/kernel-grub.bbclass b/meta/classes/kernel-grub.bbclass
new file mode 100644
index 0000000000..85721ffd70
--- /dev/null
+++ b/meta/classes/kernel-grub.bbclass
@@ -0,0 +1,91 @@
1#
2# While installing a rpm to update kernel on a deployed target, it will update
3# the boot area and the boot menu with the kernel as the priority but allow
4# you to fall back to the original kernel as well.
5#
6# - In kernel-image's preinstall scriptlet, it backs up original kernel to avoid
7# probable confliction with the new one.
8#
9# - In kernel-image's postinstall scriptlet, it modifies grub's config file to
10# updates the new kernel as the boot priority.
11#
12
13pkg_preinst_kernel-image_append () {
14 # Parsing confliction
15 [ -f "$D/boot/grub/menu.list" ] && grubcfg="$D/boot/grub/menu.list"
16 [ -f "$D/boot/grub/grub.cfg" ] && grubcfg="$D/boot/grub/grub.cfg"
17 if [ -n "$grubcfg" ]; then
18 # Dereference symlink to avoid confliction with new kernel name.
19 if grep -q "/${KERNEL_IMAGETYPE} \+root=" $grubcfg; then
20 if [ -L "$D/boot/${KERNEL_IMAGETYPE}" ]; then
21 kimage=`realpath $D/boot/${KERNEL_IMAGETYPE} 2>/dev/null`
22 if [ -f "$D$kimage" ]; then
23 sed -i "s:${KERNEL_IMAGETYPE} \+root=:${kimage##*/} root=:" $grubcfg
24 fi
25 fi
26 fi
27
28 # Rename old kernel if it conflicts with new kernel name.
29 if grep -q "/${KERNEL_IMAGETYPE}-${KERNEL_VERSION} \+root=" $grubcfg; then
30 if [ -f "$D/boot/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}" ]; then
31 timestamp=`date +%s`
32 kimage="$D/boot/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}-$timestamp-back"
33 sed -i "s:${KERNEL_IMAGETYPE}-${KERNEL_VERSION} \+root=:${kimage##*/} root=:" $grubcfg
34 mv "$D/boot/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}" "$kimage"
35 fi
36 fi
37 fi
38}
39
40pkg_postinst_kernel-image_prepend () {
41 get_new_grub_cfg() {
42 grubcfg="$1"
43 old_image="$2"
44 title="Update ${KERNEL_IMAGETYPE}-${KERNEL_VERSION}-${PV}"
45 if [ "${grubcfg##*/}" = "grub.cfg" ]; then
46 rootfs=`grep " *linux \+[^ ]\+ \+root=" $grubcfg -m 1 | \
47 sed "s#${old_image}#${old_image%/*}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}#"`
48
49 echo "menuentry \"$title\" {"
50 echo " set root=(hd0,1)"
51 echo "$rootfs"
52 echo "}"
53 elif [ "${grubcfg##*/}" = "menu.list" ]; then
54 rootfs=`grep "kernel \+[^ ]\+ \+root=" $grubcfg -m 1 | \
55 sed "s#${old_image}#${old_image%/*}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}#"`
56
57 echo "default 0"
58 echo "timeout 30"
59 echo "title $title"
60 echo "root (hd0,0)"
61 echo "$rootfs"
62 fi
63 }
64
65 get_old_grub_cfg() {
66 grubcfg="$1"
67 if [ "${grubcfg##*/}" = "grub.cfg" ]; then
68 cat "$grubcfg"
69 elif [ "${grubcfg##*/}" = "menu.list" ]; then
70 cat "$grubcfg" | sed -e '/^default/d' -e '/^timeout/d'
71 fi
72 }
73
74 if [ -f "$D/boot/grub/grub.cfg" ]; then
75 grubcfg="$D/boot/grub/grub.cfg"
76 old_image=`grep ' *linux \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
77 elif [ -f "$D/boot/grub/menu.list" ]; then
78 grubcfg="$D/boot/grub/menu.list"
79 old_image=`grep '^kernel \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
80 fi
81
82 # Don't update grubcfg at first install while old bzImage doesn't exist.
83 if [ -f "$D/boot/${old_image##*/}" ]; then
84 grubcfgtmp="$grubcfg.tmp"
85 get_new_grub_cfg "$grubcfg" "$old_image" > $grubcfgtmp
86 get_old_grub_cfg "$grubcfg" >> $grubcfgtmp
87 mv $grubcfgtmp $grubcfg
88 echo "Caution! Update kernel may affect kernel-module!"
89 fi
90}
91
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
new file mode 100644
index 0000000000..d43f7431cb
--- /dev/null
+++ b/meta/classes/kernel-module-split.bbclass
@@ -0,0 +1,187 @@
1pkg_postinst_modules () {
2if [ -z "$D" ]; then
3 depmod -a ${KERNEL_VERSION}
4else
5 # image.bbclass will call depmodwrapper after everything is installed,
6 # no need to do it here as well
7 :
8fi
9}
10
11pkg_postrm_modules () {
12if [ -z "$D" ]; then
13 depmod -a ${KERNEL_VERSION}
14else
15 depmodwrapper -a -b $D ${KERNEL_VERSION}
16fi
17}
18
19autoload_postinst_fragment() {
20if [ x"$D" = "x" ]; then
21 modprobe %s || true
22fi
23}
24
25do_install_append() {
26 install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
27}
28
29PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
30
31KERNEL_MODULES_META_PACKAGE ?= "kernel-modules"
32
33python split_kernel_module_packages () {
34 import re
35
36 modinfoexp = re.compile("([^=]+)=(.*)")
37 kerverrexp = re.compile('^(.*-hh.*)[\.\+].*$')
38 depmodpat0 = re.compile("^(.*\.k?o):..*$")
39 depmodpat1 = re.compile("^(.*\.k?o):\s*(.*\.k?o)\s*$")
40 depmodpat2 = re.compile("^(.*\.k?o):\s*(.*\.k?o)\s*\\\$")
41 depmodpat3 = re.compile("^\t(.*\.k?o)\s*\\\$")
42 depmodpat4 = re.compile("^\t(.*\.k?o)\s*$")
43
44 def extract_modinfo(file):
45 import tempfile, subprocess
46 tempfile.tempdir = d.getVar("WORKDIR", True)
47 tf = tempfile.mkstemp()
48 tmpfile = tf[1]
49 cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX", True) or "", file, tmpfile)
50 subprocess.call(cmd, shell=True)
51 f = open(tmpfile)
52 l = f.read().split("\000")
53 f.close()
54 os.close(tf[0])
55 os.unlink(tmpfile)
56 vals = {}
57 for i in l:
58 m = modinfoexp.match(i)
59 if not m:
60 continue
61 vals[m.group(1)] = m.group(2)
62 return vals
63
64 def parse_depmod():
65
66 dvar = d.getVar('PKGD', True)
67
68 kernelver = d.getVar('KERNEL_VERSION', True)
69 kernelver_stripped = kernelver
70 m = kerverrexp.match(kernelver)
71 if m:
72 kernelver_stripped = m.group(1)
73 staging_kernel_dir = d.getVar("STAGING_KERNEL_DIR", True)
74 system_map_file = "%s/boot/System.map-%s" % (dvar, kernelver)
75 if not os.path.exists(system_map_file):
76 system_map_file = "%s/System.map-%s" % (staging_kernel_dir, kernelver)
77 if not os.path.exists(system_map_file):
78 bb.fatal("System.map-%s does not exist in '%s/boot' nor STAGING_KERNEL_DIR '%s'" % (kernelver, dvar, staging_kernel_dir))
79
80 cmd = "depmod -n -a -b %s -F %s %s" % (dvar, system_map_file, kernelver_stripped)
81 f = os.popen(cmd, 'r')
82
83 deps = {}
84 line = f.readline()
85 while line:
86 if not depmodpat0.match(line):
87 line = f.readline()
88 continue
89 m1 = depmodpat1.match(line)
90 if m1:
91 deps[m1.group(1)] = m1.group(2).split()
92 else:
93 m2 = depmodpat2.match(line)
94 if m2:
95 deps[m2.group(1)] = m2.group(2).split()
96 line = f.readline()
97 m3 = depmodpat3.match(line)
98 while m3:
99 deps[m2.group(1)].extend(m3.group(1).split())
100 line = f.readline()
101 m3 = depmodpat3.match(line)
102 m4 = depmodpat4.match(line)
103 deps[m2.group(1)].extend(m4.group(1).split())
104 line = f.readline()
105 f.close()
106 return deps
107
108 def get_dependencies(file, pattern, format):
109 # file no longer includes PKGD
110 file = file.replace(d.getVar('PKGD', True) or '', '', 1)
111 # instead is prefixed with /lib/modules/${KERNEL_VERSION}
112 file = file.replace("/lib/modules/%s/" % d.getVar('KERNEL_VERSION', True) or '', '', 1)
113
114 if file in module_deps:
115 dependencies = []
116 for i in module_deps[file]:
117 m = re.match(pattern, os.path.basename(i))
118 if not m:
119 continue
120 on = legitimize_package_name(m.group(1))
121 dependency_pkg = format % on
122 dependencies.append(dependency_pkg)
123 return dependencies
124 return []
125
126 def frob_metadata(file, pkg, pattern, format, basename):
127 vals = extract_modinfo(file)
128
129 dvar = d.getVar('PKGD', True)
130
131 # If autoloading is requested, output /etc/modules-load.d/<name>.conf and append
132 # appropriate modprobe commands to the postinst
133 autoload = d.getVar('module_autoload_%s' % basename, True)
134 if autoload:
135 name = '%s/etc/modules-load.d/%s.conf' % (dvar, basename)
136 f = open(name, 'w')
137 for m in autoload.split():
138 f.write('%s\n' % m)
139 f.close()
140 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
141 if not postinst:
142 bb.fatal("pkg_postinst_%s not defined" % pkg)
143 postinst += d.getVar('autoload_postinst_fragment', True) % autoload
144 d.setVar('pkg_postinst_%s' % pkg, postinst)
145
146 # Write out any modconf fragment
147 modconf = d.getVar('module_conf_%s' % basename, True)
148 if modconf:
149 name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
150 f = open(name, 'w')
151 f.write("%s\n" % modconf)
152 f.close()
153
154 files = d.getVar('FILES_%s' % pkg, True)
155 files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
156 d.setVar('FILES_%s' % pkg, files)
157
158 if "description" in vals:
159 old_desc = d.getVar('DESCRIPTION_' + pkg, True) or ""
160 d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
161
162 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
163 for dep in get_dependencies(file, pattern, format):
164 if not dep in rdepends:
165 rdepends[dep] = []
166 d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
167
168 module_deps = parse_depmod()
169 module_regex = '^(.*)\.k?o$'
170 module_pattern = 'kernel-module-%s'
171
172 postinst = d.getVar('pkg_postinst_modules', True)
173 postrm = d.getVar('pkg_postrm_modules', True)
174
175 modules = do_split_packages(d, root='/lib/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='kernel-%s' % (d.getVar("KERNEL_VERSION", True)))
176 if modules:
177 metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE', True)
178 d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules))
179
180 # If modules-load.d and modprobe.d are empty at this point, remove them to
181 # avoid warnings. removedirs only raises an OSError if an empty
182 # directory cannot be removed.
183 dvar = d.getVar('PKGD', True)
184 for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]:
185 if len(os.listdir(dir)) == 0:
186 os.rmdir(dir)
187}
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
new file mode 100644
index 0000000000..53bc6d443c
--- /dev/null
+++ b/meta/classes/kernel-yocto.bbclass
@@ -0,0 +1,416 @@
1S = "${WORKDIR}/linux"
2
3# remove tasks that modify the source tree in case externalsrc is inherited
4SRCTREECOVEREDTASKS += "do_kernel_link_vmlinux do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_patch"
5
6# returns local (absolute) path names for all valid patches in the
7# src_uri
8def find_patches(d):
9 patches = src_patches(d)
10 patch_list=[]
11 for p in patches:
12 _, _, local, _, _, _ = bb.fetch.decodeurl(p)
13 patch_list.append(local)
14
15 return patch_list
16
17# returns all the elements from the src uri that are .scc files
18def find_sccs(d):
19 sources=src_patches(d, True)
20 sources_list=[]
21 for s in sources:
22 base, ext = os.path.splitext(os.path.basename(s))
23 if ext and ext in [".scc", ".cfg"]:
24 sources_list.append(s)
25 elif base and base in 'defconfig':
26 sources_list.append(s)
27
28 return sources_list
29
30# check the SRC_URI for "kmeta" type'd git repositories. Return the name of
31# the repository as it will be found in WORKDIR
32def find_kernel_feature_dirs(d):
33 feature_dirs=[]
34 fetch = bb.fetch2.Fetch([], d)
35 for url in fetch.urls:
36 urldata = fetch.ud[url]
37 parm = urldata.parm
38 if "type" in parm:
39 type = parm["type"]
40 if "destsuffix" in parm:
41 destdir = parm["destsuffix"]
42 if type == "kmeta":
43 feature_dirs.append(destdir)
44
45 return feature_dirs
46
47# find the master/machine source branch. In the same way that the fetcher proceses
48# git repositories in the SRC_URI we take the first repo found, first branch.
49def get_machine_branch(d, default):
50 fetch = bb.fetch2.Fetch([], d)
51 for url in fetch.urls:
52 urldata = fetch.ud[url]
53 parm = urldata.parm
54 if "branch" in parm:
55 branches = urldata.parm.get("branch").split(',')
56 return branches[0]
57
58 return default
59
60do_patch() {
61 cd ${S}
62 export KMETA=${KMETA}
63
64 # if kernel tools are available in-tree, they are preferred
65 # and are placed on the path before any external tools. Unless
66 # the external tools flag is set, in that case we do nothing.
67 if [ -f "${S}/scripts/util/configme" ]; then
68 if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then
69 PATH=${S}/scripts/util:${PATH}
70 fi
71 fi
72
73 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
74
75 # if we have a defined/set meta branch we should not be generating
76 # any meta data. The passed branch has what we need.
77 if [ -n "${KMETA}" ]; then
78 createme_flags="--disable-meta-gen --meta ${KMETA}"
79 fi
80
81 createme ${createme_flags} ${ARCH} ${machine_branch}
82 if [ $? -ne 0 ]; then
83 echo "ERROR. Could not create ${machine_branch}"
84 exit 1
85 fi
86
87 sccs="${@" ".join(find_sccs(d))}"
88 patches="${@" ".join(find_patches(d))}"
89 feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
90
91 set +e
92 # add any explicitly referenced features onto the end of the feature
93 # list that is passed to the kernel build scripts.
94 if [ -n "${KERNEL_FEATURES}" ]; then
95 for feat in ${KERNEL_FEATURES}; do
96 addon_features="$addon_features --feature $feat"
97 done
98 fi
99
100 # check for feature directories/repos/branches that were part of the
101 # SRC_URI. If they were supplied, we convert them into include directives
102 # for the update part of the process
103 if [ -n "${feat_dirs}" ]; then
104 for f in ${feat_dirs}; do
105 if [ -d "${WORKDIR}/$f/meta" ]; then
106 includes="$includes -I${WORKDIR}/$f/meta"
107 elif [ -d "${WORKDIR}/$f" ]; then
108 includes="$includes -I${WORKDIR}/$f"
109 fi
110 done
111 fi
112
113 if [ "${machine_branch}" != "${KBRANCH_DEFAULT}" ]; then
114 updateme_flags="--branch ${machine_branch}"
115 fi
116
117 # updates or generates the target description
118 updateme ${updateme_flags} -DKDESC=${KMACHINE}:${LINUX_KERNEL_TYPE} \
119 ${includes} ${addon_features} ${ARCH} ${KMACHINE} ${sccs} ${patches}
120 if [ $? -ne 0 ]; then
121 echo "ERROR. Could not update ${machine_branch}"
122 exit 1
123 fi
124
125 # executes and modifies the source tree as required
126 patchme ${KMACHINE}
127 if [ $? -ne 0 ]; then
128 echo "ERROR. Could not apply patches for ${KMACHINE}."
129 echo " Patch failures can be resolved in the devshell (bitbake -c devshell ${PN})"
130 exit 1
131 fi
132
133 # Perform a final check. If something other than the default kernel
134 # branch was requested, and that's not where we ended up, then we
135 # should thrown an error, since we aren't building what was expected
136 final_branch="$(git symbolic-ref HEAD 2>/dev/null)"
137 final_branch=${final_branch##refs/heads/}
138 if [ "${machine_branch}" != "${KBRANCH_DEFAULT}" ] &&
139 [ "${final_branch}" != "${machine_branch}" ]; then
140 echo "ERROR: branch ${machine_branch} was requested, but was not properly"
141 echo " configured to be built. The current branch is ${final_branch}"
142 exit 1
143 fi
144}
145
146do_kernel_checkout() {
147 set +e
148
149 # A linux yocto SRC_URI should use the bareclone option. That
150 # ensures that all the branches are available in the WORKDIR version
151 # of the repository.
152 source_dir=`echo ${S} | sed 's%/$%%'`
153 source_workdir="${WORKDIR}/git"
154 if [ -d "${WORKDIR}/git/" ] && [ -d "${WORKDIR}/git/.git" ]; then
155 # case2: the repository is a non-bare clone
156
157 # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
158 if [ "${source_dir}" != "${source_workdir}" ]; then
159 rm -rf ${S}
160 mv ${WORKDIR}/git ${S}
161 fi
162 cd ${S}
163 elif [ -d "${WORKDIR}/git/" ] && [ ! -d "${WORKDIR}/git/.git" ]; then
164 # case2: the repository is a bare clone
165
166 # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
167 if [ "${source_dir}" != "${source_workdir}" ]; then
168 rm -rf ${S}
169 mkdir -p ${S}/.git
170 mv ${WORKDIR}/git/* ${S}/.git
171 rm -rf ${WORKDIR}/git/
172 fi
173 cd ${S}
174 git config core.bare false
175 else
176 # case 3: we have no git repository at all.
177 # To support low bandwidth options for building the kernel, we'll just
178 # convert the tree to a git repo and let the rest of the process work unchanged
179
180 # if ${S} hasn't been set to the proper subdirectory a default of "linux" is
181 # used, but we can't initialize that empty directory. So check it and throw a
182 # clear error
183
184 cd ${S}
185 if [ ! -f "Makefile" ]; then
186 echo "[ERROR]: S is not set to the linux source directory. Check "
187 echo " the recipe and set S to the proper extracted subdirectory"
188 exit 1
189 fi
190 git init
191 git add .
192 git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
193 fi
194 # end debare
195
196 # If KMETA is defined, the branch must exist, but a machine branch
197 # can be missing since it may be created later by the tools.
198 if [ -n "${KMETA}" ]; then
199 git branch -a --no-color | grep -q ${KMETA}
200 if [ $? -ne 0 ]; then
201 echo "ERROR. The branch '${KMETA}' is required and was not"
202 echo "found. Ensure that the SRC_URI points to a valid linux-yocto"
203 echo "kernel repository"
204 exit 1
205 fi
206 fi
207
208 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
209
210 if [ "${KBRANCH}" != "${machine_branch}" ]; then
211 echo "WARNING: The SRC_URI machine branch and KBRANCH are not the same."
212 echo " KBRANCH will be adjusted to match, but this typically is a"
213 echo " misconfiguration and should be checked."
214 fi
215
216 # convert any remote branches to local tracking ones
217 for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
218 b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
219 git show-ref --quiet --verify -- "refs/heads/$b"
220 if [ $? -ne 0 ]; then
221 git branch $b $i > /dev/null
222 fi
223 done
224
225 # Create a working tree copy of the kernel by checking out a branch
226 git show-ref --quiet --verify -- "refs/heads/${machine_branch}"
227 if [ $? -eq 0 ]; then
228 # checkout and clobber any unimportant files
229 git checkout -f ${machine_branch}
230 else
231 echo "Not checking out ${machine_branch}, it will be created later"
232 git checkout -f master
233 fi
234}
235do_kernel_checkout[dirs] = "${S}"
236
237addtask kernel_checkout before do_patch after do_unpack
238
239do_kernel_configme[dirs] += "${S} ${B}"
240do_kernel_configme() {
241 echo "[INFO] doing kernel configme"
242 export KMETA=${KMETA}
243
244 if [ -n ${KCONFIG_MODE} ]; then
245 configmeflags=${KCONFIG_MODE}
246 else
247 # If a defconfig was passed, use =n as the baseline, which is achieved
248 # via --allnoconfig
249 if [ -f ${WORKDIR}/defconfig ]; then
250 configmeflags="--allnoconfig"
251 fi
252 fi
253
254 cd ${S}
255 PATH=${PATH}:${S}/scripts/util
256 configme ${configmeflags} --reconfig --output ${B} ${LINUX_KERNEL_TYPE} ${KMACHINE}
257 if [ $? -ne 0 ]; then
258 echo "ERROR. Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
259 exit 1
260 fi
261
262 echo "# Global settings from linux recipe" >> ${B}/.config
263 echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
264}
265
266addtask kernel_configme after do_patch
267
268python do_kernel_configcheck() {
269 import re, string, sys
270
271 bb.plain("NOTE: validating kernel config, see log.do_kernel_configcheck for details")
272
273 # if KMETA isn't set globally by a recipe using this routine, we need to
274 # set the default to 'meta'. Otherwise, kconf_check is not passed a valid
275 # meta-series for processing
276 kmeta = d.getVar( "KMETA", True ) or "meta"
277 if not os.path.exists(kmeta):
278 kmeta = "." + kmeta
279
280 pathprefix = "export PATH=%s:%s; " % (d.getVar('PATH', True), "${S}/scripts/util/")
281 cmd = d.expand("cd ${S}; kconf_check -config- %s/meta-series ${S} ${B}" % kmeta)
282 ret, result = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
283
284 config_check_visibility = d.getVar( "KCONF_AUDIT_LEVEL", True ) or 1
285 if config_check_visibility == 1:
286 bb.debug( 1, "%s" % result )
287 else:
288 bb.note( "%s" % result )
289}
290
291# Ensure that the branches (BSP and meta) are on the locations specified by
292# their SRCREV values. If they are NOT on the right commits, the branches
293# are corrected to the proper commit.
294do_validate_branches() {
295 cd ${S}
296 export KMETA=${KMETA}
297
298 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
299
300 set +e
301 # if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
302 # check and we can exit early
303 if [ "${SRCREV_machine}" = "AUTOINC" ] || [ "${SRCREV_machine}" = "INVALID" ] ||
304 [ "${SRCREV_machine}" = "" ]; then
305 return
306 fi
307
308 # If something other than the default branch was requested, it must
309 # exist in the tree, and it's a hard error if it wasn't
310 git show-ref --quiet --verify -- "refs/heads/${machine_branch}"
311 if [ $? -eq 1 ]; then
312 if [ -n "${KBRANCH_DEFAULT}" ] &&
313 [ "${machine_branch}" != "${KBRANCH_DEFAULT}" ]; then
314 echo "ERROR: branch ${machine_branch} was set for kernel compilation, "
315 echo " but it does not exist in the kernel repository."
316 echo " Check the value of KBRANCH and ensure that it describes"
317 echo " a valid banch in the source kernel repository"
318 exit 1
319 fi
320 fi
321
322 if [ -z "${SRCREV_machine}" ]; then
323 target_branch_head="${SRCREV}"
324 else
325 target_branch_head="${SRCREV_machine}"
326 fi
327
328 # $SRCREV could have also been AUTOINC, so check again
329 if [ "${target_branch_head}" = "AUTOINC" ]; then
330 return
331 fi
332
333 ref=`git show ${target_branch_head} 2>&1 | head -n1 || true`
334 if [ "$ref" = "fatal: bad object ${target_meta_head}" ]; then
335 echo "ERROR ${target_branch_head} is not a valid commit ID."
336 echo "The kernel source tree may be out of sync"
337 exit 1
338 fi
339
340 containing_branches=`git branch --contains $target_branch_head | sed 's/^..//'`
341 if [ -z "$containing_branches" ]; then
342 echo "ERROR: SRCREV was set to \"$target_branch_head\", but no branches"
343 echo " contain this commit"
344 exit 1
345 fi
346
347 # force the SRCREV in each branch that contains the specified
348 # SRCREV (if it isn't the current HEAD of that branch)
349 git checkout -q master
350 for b in $containing_branches; do
351 branch_head=`git show-ref -s --heads ${b}`
352 if [ "$branch_head" != "$target_branch_head" ]; then
353 echo "[INFO] Setting branch $b to ${target_branch_head}"
354 if [ "$b" = "master" ]; then
355 git reset --hard $target_branch_head > /dev/null
356 else
357 git branch -D $b > /dev/null
358 git branch $b $target_branch_head > /dev/null
359 fi
360 fi
361 done
362
363 ## KMETA branch validation.
364 ## We do validation if the meta branch exists, and AUTOREV hasn't been set
365 meta_head=`git show-ref -s --heads ${KMETA}`
366 target_meta_head="${SRCREV_meta}"
367 git show-ref --quiet --verify -- "refs/heads/${KMETA}"
368 if [ $? -eq 0 ] && [ "${target_meta_head}" != "AUTOINC" ]; then
369 if [ "$meta_head" != "$target_meta_head" ]; then
370 ref=`git show ${target_meta_head} 2>&1 | head -n1 || true`
371 if [ "$ref" = "fatal: bad object ${target_meta_head}" ]; then
372 echo "ERROR ${target_meta_head} is not a valid commit ID"
373 echo "The kernel source tree may be out of sync"
374 exit 1
375 else
376 echo "[INFO] Setting branch ${KMETA} to ${target_meta_head}"
377 git branch -m ${KMETA} ${KMETA}-orig
378 git checkout -q -b ${KMETA} ${target_meta_head}
379 if [ $? -ne 0 ];then
380 echo "ERROR: could not checkout ${KMETA} branch from known hash ${target_meta_head}"
381 exit 1
382 fi
383 fi
384 fi
385 fi
386
387 git show-ref --quiet --verify -- "refs/heads/${machine_branch}"
388 if [ $? -eq 0 ]; then
389 # restore the branch for builds
390 git checkout -q -f ${machine_branch}
391 else
392 git checkout -q master
393 fi
394}
395
396# Many scripts want to look in arch/$arch/boot for the bootable
397# image. This poses a problem for vmlinux based booting. This
398# task arranges to have vmlinux appear in the normalized directory
399# location.
400do_kernel_link_vmlinux() {
401 if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
402 mkdir ${B}/arch/${ARCH}/boot
403 fi
404 cd ${B}/arch/${ARCH}/boot
405 ln -sf ../../../vmlinux
406}
407
408OE_TERMINAL_EXPORTS += "GUILT_BASE KBUILD_OUTPUT"
409GUILT_BASE = "meta"
410KBUILD_OUTPUT = "${B}"
411
412python () {
413 # If diffconfig is available, ensure it runs after kernel_configme
414 if 'do_diffconfig' in d:
415 bb.build.addtask('do_diffconfig', None, 'do_kernel_configme', d)
416}
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
new file mode 100644
index 0000000000..19b159b492
--- /dev/null
+++ b/meta/classes/kernel.bbclass
@@ -0,0 +1,502 @@
1inherit linux-kernel-base kernel-module-split
2
3PROVIDES += "virtual/kernel"
4DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native depmodwrapper-cross"
5
6# we include gcc above, we dont need virtual/libc
7INHIBIT_DEFAULT_DEPS = "1"
8
9KERNEL_IMAGETYPE ?= "zImage"
10INITRAMFS_IMAGE ?= ""
11INITRAMFS_TASK ?= ""
12INITRAMFS_IMAGE_BUNDLE ?= ""
13
14python __anonymous () {
15 kerneltype = d.getVar('KERNEL_IMAGETYPE', True) or ''
16 if kerneltype == 'uImage':
17 depends = d.getVar("DEPENDS", True)
18 depends = "%s u-boot-mkimage-native" % depends
19 d.setVar("DEPENDS", depends)
20
21 image = d.getVar('INITRAMFS_IMAGE', True)
22 if image:
23 d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_rootfs')
24
25 # NOTE: setting INITRAMFS_TASK is for backward compatibility
26 # The preferred method is to set INITRAMFS_IMAGE, because
27 # this INITRAMFS_TASK has circular dependency problems
28 # if the initramfs requires kernel modules
29 image_task = d.getVar('INITRAMFS_TASK', True)
30 if image_task:
31 d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
32}
33
34inherit kernel-arch deploy
35
36PACKAGES_DYNAMIC += "^kernel-module-.*"
37PACKAGES_DYNAMIC += "^kernel-image-.*"
38PACKAGES_DYNAMIC += "^kernel-firmware-.*"
39
40export OS = "${TARGET_OS}"
41export CROSS_COMPILE = "${TARGET_PREFIX}"
42
43KERNEL_PRIORITY ?= "${@int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[0]) * 10000 + \
44 int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[1]) * 100 + \
45 int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[-1])}"
46
47KERNEL_RELEASE ?= "${KERNEL_VERSION}"
48
49# Where built kernel lies in the kernel tree
50KERNEL_OUTPUT ?= "arch/${ARCH}/boot/${KERNEL_IMAGETYPE}"
51KERNEL_IMAGEDEST = "boot"
52
53#
54# configuration
55#
56export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE",1) or "ttyS0"}"
57
58KERNEL_VERSION = "${@get_kernelversion('${B}')}"
59
60KERNEL_LOCALVERSION ?= ""
61
62# kernels are generally machine specific
63PACKAGE_ARCH = "${MACHINE_ARCH}"
64
65# U-Boot support
66UBOOT_ENTRYPOINT ?= "20008000"
67UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
68
69# Some Linux kernel configurations need additional parameters on the command line
70KERNEL_EXTRA_ARGS ?= ""
71
72# For the kernel, we don't want the '-e MAKEFLAGS=' in EXTRA_OEMAKE.
73# We don't want to override kernel Makefile variables from the environment
74EXTRA_OEMAKE = ""
75
76KERNEL_ALT_IMAGETYPE ??= ""
77
78# Define where the kernel headers are installed on the target as well as where
79# they are staged.
80KERNEL_SRC_PATH = "/usr/src/kernel"
81
82KERNEL_IMAGETYPE_FOR_MAKE = "${@(lambda s: s[:-3] if s[-3:] == ".gz" else s)(d.getVar('KERNEL_IMAGETYPE', True))}"
83
84copy_initramfs() {
85 echo "Copying initramfs into ./usr ..."
86 # In case the directory is not created yet from the first pass compile:
87 mkdir -p ${B}/usr
88 # Find and use the first initramfs image archive type we find
89 rm -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
90 for img in cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do
91 if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img" ]; then
92 cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img ${B}/usr/.
93 case $img in
94 *gz)
95 echo "gzip decompressing image"
96 gunzip -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
97 break
98 ;;
99 *lz4)
100 echo "lz4 decompressing image"
101 lz4 -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
102 break
103 ;;
104 *lzo)
105 echo "lzo decompressing image"
106 lzop -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
107 break
108 ;;
109 *lzma)
110 echo "lzma decompressing image"
111 lzma -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
112 break
113 ;;
114 *xz)
115 echo "xz decompressing image"
116 xz -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
117 break
118 ;;
119 esac
120 fi
121 done
122 echo "Finished copy of initramfs into ./usr"
123}
124
125INITRAMFS_BASE_NAME = "${KERNEL_IMAGETYPE}-initramfs-${PV}-${PR}-${MACHINE}-${DATETIME}"
126INITRAMFS_BASE_NAME[vardepsexclude] = "DATETIME"
127do_bundle_initramfs () {
128 if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
129 echo "Creating a kernel image with a bundled initramfs..."
130 copy_initramfs
131 if [ -e ${KERNEL_OUTPUT} ] ; then
132 mv -f ${KERNEL_OUTPUT} ${KERNEL_OUTPUT}.bak
133 fi
134 use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
135 kernel_do_compile
136 mv -f ${KERNEL_OUTPUT} ${KERNEL_OUTPUT}.initramfs
137 mv -f ${KERNEL_OUTPUT}.bak ${KERNEL_OUTPUT}
138 # Update install area
139 echo "There is kernel image bundled with initramfs: ${B}/${KERNEL_OUTPUT}.initramfs"
140 install -m 0644 ${B}/${KERNEL_OUTPUT}.initramfs ${D}/boot/${KERNEL_IMAGETYPE}-initramfs-${MACHINE}.bin
141 echo "${B}/${KERNEL_OUTPUT}.initramfs"
142 fi
143}
144
145python do_devshell_prepend () {
146 os.environ["LDFLAGS"] = ''
147}
148
149addtask bundle_initramfs after do_install before do_deploy
150
151kernel_do_compile() {
152 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
153 # The $use_alternate_initrd is only set from
154 # do_bundle_initramfs() This variable is specifically for the
155 # case where we are making a second pass at the kernel
156 # compilation and we want to force the kernel build to use a
157 # different initramfs image. The way to do that in the kernel
158 # is to specify:
159 # make ...args... CONFIG_INITRAMFS_SOURCE=some_other_initramfs.cpio
160 if [ "$use_alternate_initrd" = "" ] && [ "${INITRAMFS_TASK}" != "" ] ; then
161 # The old style way of copying an prebuilt image and building it
162 # is turned on via INTIRAMFS_TASK != ""
163 copy_initramfs
164 use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
165 fi
166 oe_runmake ${KERNEL_IMAGETYPE_FOR_MAKE} ${KERNEL_ALT_IMAGETYPE} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
167 if test "${KERNEL_IMAGETYPE_FOR_MAKE}.gz" = "${KERNEL_IMAGETYPE}"; then
168 gzip -9c < "${KERNEL_IMAGETYPE_FOR_MAKE}" > "${KERNEL_OUTPUT}"
169 fi
170}
171
172do_compile_kernelmodules() {
173 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
174 if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
175 oe_runmake ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
176 else
177 bbnote "no modules to compile"
178 fi
179}
180addtask compile_kernelmodules after do_compile before do_strip
181
182kernel_do_install() {
183 #
184 # First install the modules
185 #
186 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
187 if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
188 oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" modules_install
189 rm "${D}/lib/modules/${KERNEL_VERSION}/build"
190 rm "${D}/lib/modules/${KERNEL_VERSION}/source"
191 # If the kernel/ directory is empty remove it to prevent QA issues
192 rmdir --ignore-fail-on-non-empty "${D}/lib/modules/${KERNEL_VERSION}/kernel"
193 else
194 bbnote "no modules to install"
195 fi
196
197 #
198 # Install various kernel output (zImage, map file, config, module support files)
199 #
200 install -d ${D}/${KERNEL_IMAGEDEST}
201 install -d ${D}/boot
202 install -m 0644 ${KERNEL_OUTPUT} ${D}/${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}
203 install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
204 install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
205 install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
206 [ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/boot/Module.symvers-${KERNEL_VERSION}
207 install -d ${D}${sysconfdir}/modules-load.d
208 install -d ${D}${sysconfdir}/modprobe.d
209
210 #
211 # Support for external module building - create a minimal copy of the
212 # kernel source tree.
213 #
214 kerneldir=${D}${KERNEL_SRC_PATH}
215 install -d $kerneldir
216
217 #
218 # Store the kernel version in sysroots for module-base.bbclass
219 #
220
221 echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion
222
223 #
224 # Store kernel image name to allow use during image generation
225 #
226
227 echo "${KERNEL_IMAGE_BASE_NAME}" >$kerneldir/kernel-image-name
228
229 #
230 # Copy the entire source tree. In case an external build directory is
231 # used, copy the build directory over first, then copy over the source
232 # dir. This ensures the original Makefiles are used and not the
233 # redirecting Makefiles in the build directory.
234 #
235 find . -depth -not -name "*.cmd" -not -name "*.o" -not -path "./Documentation*" -not -path "./.*" -print0 | cpio --null -pdlu $kerneldir
236 cp .config $kerneldir
237 if [ "${S}" != "${B}" ]; then
238 pwd="$PWD"
239 cd "${S}"
240 find . -depth -not -path "./Documentation*" -not -path "./.*" -print0 | cpio --null -pdlu $kerneldir
241 cd "$pwd"
242 fi
243
244 # Test to ensure that the output file and image type are not actually
245 # the same file. If hardlinking is used, they will be the same, and there's
246 # no need to install.
247 ! [ ${KERNEL_OUTPUT} -ef $kerneldir/${KERNEL_IMAGETYPE} ] && install -m 0644 ${KERNEL_OUTPUT} $kerneldir/${KERNEL_IMAGETYPE}
248 install -m 0644 System.map $kerneldir/System.map-${KERNEL_VERSION}
249
250 # Dummy Makefile so the clean below works
251 mkdir $kerneldir/Documentation
252 touch $kerneldir/Documentation/Makefile
253
254 #
255 # Clean and remove files not needed for building modules.
256 # Some distributions go through a lot more trouble to strip out
257 # unecessary headers, for now, we just prune the obvious bits.
258 #
259 # We don't want to leave host-arch binaries in /sysroots, so
260 # we clean the scripts dir while leaving the generated config
261 # and include files.
262 #
263 oe_runmake -C $kerneldir CC="${KERNEL_CC}" LD="${KERNEL_LD}" clean _mrproper_scripts
264
265 # hide directories that shouldn't have their .c, s and S files deleted
266 for d in tools scripts lib; do
267 mv $kerneldir/$d $kerneldir/.$d
268 done
269
270 # delete .c, .s and .S files, unless we hid a directory as .<dir>. This technique is
271 # much faster than find -prune and -exec
272 find $kerneldir -not -path '*/\.*' -type f -name "*.[csS]" -delete
273
274 # put the hidden dirs back
275 for d in tools scripts lib; do
276 mv $kerneldir/.$d $kerneldir/$d
277 done
278
279 # As of Linux kernel version 3.0.1, the clean target removes
280 # arch/powerpc/lib/crtsavres.o which is present in
281 # KBUILD_LDFLAGS_MODULE, making it required to build external modules.
282 if [ ${ARCH} = "powerpc" ]; then
283 cp -l arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
284 fi
285
286 # Necessary for building modules like compat-wireless.
287 if [ -f include/generated/bounds.h ]; then
288 cp -l include/generated/bounds.h $kerneldir/include/generated/bounds.h
289 fi
290 if [ -d arch/${ARCH}/include/generated ]; then
291 mkdir -p $kerneldir/arch/${ARCH}/include/generated/
292 cp -flR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
293 fi
294
295 # Remove the following binaries which cause strip or arch QA errors
296 # during do_package for cross-compiled platforms
297 bin_files="arch/powerpc/boot/addnote arch/powerpc/boot/hack-coff \
298 arch/powerpc/boot/mktree scripts/kconfig/zconf.tab.o \
299 scripts/kconfig/conf.o scripts/kconfig/kxgettext.o"
300 for entry in $bin_files; do
301 rm -f $kerneldir/$entry
302 done
303
304 # kernels <2.6.30 don't have $kerneldir/tools directory so we check if it exists before calling sed
305 if [ -f $kerneldir/tools/perf/Makefile ]; then
306 # Fix SLANG_INC for slang.h
307 sed -i 's#-I/usr/include/slang#-I=/usr/include/slang#g' $kerneldir/tools/perf/Makefile
308 fi
309}
310do_install[prefuncs] += "package_get_auto_pr"
311
312python sysroot_stage_all () {
313 oe.path.copyhardlinktree(d.expand("${D}${KERNEL_SRC_PATH}"), d.expand("${SYSROOT_DESTDIR}${KERNEL_SRC_PATH}"))
314}
315
316KERNEL_CONFIG_COMMAND ?= "oe_runmake_call oldnoconfig || yes '' | oe_runmake oldconfig"
317
318kernel_do_configure() {
319 # fixes extra + in /lib/modules/2.6.37+
320 # $ scripts/setlocalversion . => +
321 # $ make kernelversion => 2.6.37
322 # $ make kernelrelease => 2.6.37+
323 touch ${B}/.scmversion ${S}/.scmversion
324
325 # Copy defconfig to .config if .config does not exist. This allows
326 # recipes to manage the .config themselves in do_configure_prepend().
327 if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
328 cp "${WORKDIR}/defconfig" "${B}/.config"
329 fi
330 eval ${KERNEL_CONFIG_COMMAND}
331}
332
333do_savedefconfig() {
334 oe_runmake savedefconfig
335}
336do_savedefconfig[nostamp] = "1"
337addtask savedefconfig after do_configure
338
339inherit cml1
340
341EXPORT_FUNCTIONS do_compile do_install do_configure
342
343# kernel-base becomes kernel-${KERNEL_VERSION}
344# kernel-image becomes kernel-image-${KERNEL_VERISON}
345PACKAGES = "kernel kernel-base kernel-vmlinux kernel-image kernel-dev kernel-modules"
346FILES_${PN} = ""
347FILES_kernel-base = "/lib/modules/${KERNEL_VERSION}/modules.order /lib/modules/${KERNEL_VERSION}/modules.builtin"
348FILES_kernel-image = "/boot/${KERNEL_IMAGETYPE}*"
349FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH}"
350FILES_kernel-vmlinux = "/boot/vmlinux*"
351FILES_kernel-modules = ""
352RDEPENDS_kernel = "kernel-base"
353# Allow machines to override this dependency if kernel image files are
354# not wanted in images as standard
355RDEPENDS_kernel-base ?= "kernel-image"
356PKG_kernel-image = "kernel-image-${@legitimize_package_name('${KERNEL_VERSION}')}"
357PKG_kernel-base = "kernel-${@legitimize_package_name('${KERNEL_VERSION}')}"
358RPROVIDES_kernel-base += "kernel-${KERNEL_VERSION}"
359ALLOW_EMPTY_kernel = "1"
360ALLOW_EMPTY_kernel-base = "1"
361ALLOW_EMPTY_kernel-image = "1"
362ALLOW_EMPTY_kernel-modules = "1"
363DESCRIPTION_kernel-modules = "Kernel modules meta package"
364
365pkg_postinst_kernel-base () {
366 if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
367 mkdir -p $D/lib/modules/${KERNEL_VERSION}
368 fi
369 if [ -n "$D" ]; then
370 depmodwrapper -a -b $D ${KERNEL_VERSION}
371 else
372 depmod -a ${KERNEL_VERSION}
373 fi
374}
375
376pkg_postinst_kernel-image () {
377 update-alternatives --install /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE}-${KERNEL_VERSION} ${KERNEL_PRIORITY} || true
378}
379
380pkg_postrm_kernel-image () {
381 update-alternatives --remove ${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE}-${KERNEL_VERSION} || true
382}
383
384PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
385
386python split_kernel_packages () {
387 do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.bin$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
388 do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.fw$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
389 do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.cis$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
390}
391
392do_strip() {
393 if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then
394 if [[ "${KERNEL_IMAGETYPE}" != "vmlinux" ]]; then
395 bbwarn "image type will not be stripped (not supported): ${KERNEL_IMAGETYPE}"
396 return
397 fi
398
399 cd ${B}
400 headers=`"$CROSS_COMPILE"readelf -S ${KERNEL_OUTPUT} | \
401 grep "^ \{1,\}\[[0-9 ]\{1,\}\] [^ ]" | \
402 sed "s/^ \{1,\}\[[0-9 ]\{1,\}\] //" | \
403 gawk '{print $1}'`
404
405 for str in ${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}; do {
406 if [[ "$headers" != *"$str"* ]]; then
407 bbwarn "Section not found: $str";
408 fi
409
410 "$CROSS_COMPILE"strip -s -R $str ${KERNEL_OUTPUT}
411 }; done
412
413 bbnote "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections:" \
414 "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}"
415 fi;
416}
417do_strip[dirs] = "${B}"
418
419addtask do_strip before do_sizecheck after do_kernel_link_vmlinux
420
421# Support checking the kernel size since some kernels need to reside in partitions
422# with a fixed length or there is a limit in transferring the kernel to memory
423do_sizecheck() {
424 if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
425 cd ${B}
426 size=`ls -lL ${KERNEL_OUTPUT} | awk '{ print $5}'`
427 if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
428 die "This kernel (size=$size > ${KERNEL_IMAGE_MAXSIZE}) is too big for your device. Please reduce the size of the kernel by making more of it modular."
429 fi
430 fi
431}
432do_sizecheck[dirs] = "${B}"
433
434addtask sizecheck before do_install after do_strip
435
436KERNEL_IMAGE_BASE_NAME ?= "${KERNEL_IMAGETYPE}-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}"
437# Don't include the DATETIME variable in the sstate package signatures
438KERNEL_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
439KERNEL_IMAGE_SYMLINK_NAME ?= "${KERNEL_IMAGETYPE}-${MACHINE}"
440MODULE_IMAGE_BASE_NAME ?= "modules-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}"
441MODULE_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
442MODULE_TARBALL_BASE_NAME ?= "${MODULE_IMAGE_BASE_NAME}.tgz"
443# Don't include the DATETIME variable in the sstate package signatures
444MODULE_TARBALL_SYMLINK_NAME ?= "modules-${MACHINE}.tgz"
445MODULE_TARBALL_DEPLOY ?= "1"
446
447do_uboot_mkimage() {
448 if test "x${KERNEL_IMAGETYPE}" = "xuImage" ; then
449 if test "x${KEEPUIMAGE}" != "xyes" ; then
450 ENTRYPOINT=${UBOOT_ENTRYPOINT}
451 if test -n "${UBOOT_ENTRYSYMBOL}"; then
452 ENTRYPOINT=`${HOST_PREFIX}nm ${S}/vmlinux | \
453 awk '$3=="${UBOOT_ENTRYSYMBOL}" {print $1}'`
454 fi
455 if test -e arch/${ARCH}/boot/compressed/vmlinux ; then
456 ${OBJCOPY} -O binary -R .note -R .comment -S arch/${ARCH}/boot/compressed/vmlinux linux.bin
457 uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C none -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin arch/${ARCH}/boot/uImage
458 rm -f linux.bin
459 else
460 ${OBJCOPY} -O binary -R .note -R .comment -S vmlinux linux.bin
461 rm -f linux.bin.gz
462 gzip -9 linux.bin
463 uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C gzip -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin.gz arch/${ARCH}/boot/uImage
464 rm -f linux.bin.gz
465 fi
466 fi
467 fi
468}
469
470addtask uboot_mkimage before do_install after do_compile
471
472kernel_do_deploy() {
473 install -m 0644 ${KERNEL_OUTPUT} ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin
474 if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
475 mkdir -p ${D}/lib
476 tar -cvzf ${DEPLOYDIR}/${MODULE_TARBALL_BASE_NAME} -C ${D} lib
477 ln -sf ${MODULE_TARBALL_BASE_NAME} ${DEPLOYDIR}/${MODULE_TARBALL_SYMLINK_NAME}
478 fi
479
480 ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${DEPLOYDIR}/${KERNEL_IMAGE_SYMLINK_NAME}.bin
481 ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${DEPLOYDIR}/${KERNEL_IMAGETYPE}
482
483 cp ${COREBASE}/meta/files/deploydir_readme.txt ${DEPLOYDIR}/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt
484
485 cd ${B}
486 # Update deploy directory
487 if [ -e "${KERNEL_OUTPUT}.initramfs" ]; then
488 echo "Copying deploy kernel-initramfs image and setting up links..."
489 initramfs_base_name=${INITRAMFS_BASE_NAME}
490 initramfs_symlink_name=${KERNEL_IMAGETYPE}-initramfs-${MACHINE}
491 install -m 0644 ${KERNEL_OUTPUT}.initramfs ${DEPLOYDIR}/${initramfs_base_name}.bin
492 cd ${DEPLOYDIR}
493 ln -sf ${initramfs_base_name}.bin ${initramfs_symlink_name}.bin
494 fi
495}
496do_deploy[dirs] = "${DEPLOYDIR} ${B}"
497do_deploy[prefuncs] += "package_get_auto_pr"
498
499addtask deploy before do_build after do_install
500
501EXPORT_FUNCTIONS do_deploy
502
diff --git a/meta/classes/lib_package.bbclass b/meta/classes/lib_package.bbclass
new file mode 100644
index 0000000000..8849f59042
--- /dev/null
+++ b/meta/classes/lib_package.bbclass
@@ -0,0 +1,7 @@
1#
2# ${PN}-bin is defined in bitbake.conf
3#
4# We need to allow the other packages to be greedy with what they
5# want out of /usr/bin and /usr/sbin before ${PN}-bin gets greedy.
6#
7PACKAGE_BEFORE_PN = "${PN}-bin"
diff --git a/meta/classes/libc-common.bbclass b/meta/classes/libc-common.bbclass
new file mode 100644
index 0000000000..daf499d3eb
--- /dev/null
+++ b/meta/classes/libc-common.bbclass
@@ -0,0 +1,36 @@
1do_install() {
2 oe_runmake install_root=${D} install
3 for r in ${rpcsvc}; do
4 h=`echo $r|sed -e's,\.x$,.h,'`
5 install -m 0644 ${S}/sunrpc/rpcsvc/$h ${D}/${includedir}/rpcsvc/
6 done
7 install -d ${D}/${sysconfdir}/
8 install -m 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/
9 install -d ${D}${localedir}
10 make -f ${WORKDIR}/generate-supported.mk IN="${S}/localedata/SUPPORTED" OUT="${WORKDIR}/SUPPORTED"
11 # get rid of some broken files...
12 for i in ${GLIBC_BROKEN_LOCALES}; do
13 grep -v $i ${WORKDIR}/SUPPORTED > ${WORKDIR}/SUPPORTED.tmp
14 mv ${WORKDIR}/SUPPORTED.tmp ${WORKDIR}/SUPPORTED
15 done
16 rm -f ${D}${sysconfdir}/rpc
17 rm -rf ${D}${datadir}/zoneinfo
18 rm -rf ${D}${libexecdir}/getconf
19}
20
21def get_libc_fpu_setting(bb, d):
22 if d.getVar('TARGET_FPU', True) in [ 'soft', 'ppc-efd' ]:
23 return "--without-fp"
24 return ""
25
26python populate_packages_prepend () {
27 if d.getVar('DEBIAN_NAMES', True):
28 bpn = d.getVar('BPN', True)
29 d.setVar('PKG_'+bpn, 'libc6')
30 d.setVar('PKG_'+bpn+'-dev', 'libc6-dev')
31 d.setVar('PKG_'+bpn+'-dbg', 'libc6-dbg')
32 # For backward compatibility with old -dbg package
33 d.appendVar('RPROVIDES_' + bpn + '-dbg', ' libc-dbg')
34 d.appendVar('RCONFLICTS_' + bpn + '-dbg', ' libc-dbg')
35 d.appendVar('RREPLACES_' + bpn + '-dbg', ' libc-dbg')
36}
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
new file mode 100644
index 0000000000..0af42a002c
--- /dev/null
+++ b/meta/classes/libc-package.bbclass
@@ -0,0 +1,390 @@
1#
2# This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains
3# may need packaging and its pointless to duplicate this code.
4#
5# Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of:
6# "compile" - Use QEMU to generate the binary locale files
7# "precompiled" - The binary locale files are pregenerated and already present
8# "ondevice" - The device will build the locale files upon first boot through the postinst
9
10GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
11
12python __anonymous () {
13 enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION", True)
14
15 pn = d.getVar("PN", True)
16 if pn.endswith("-initial"):
17 enabled = False
18
19 if enabled and int(enabled):
20 import re
21
22 target_arch = d.getVar("TARGET_ARCH", True)
23 binary_arches = d.getVar("BINARY_LOCALE_ARCHES", True) or ""
24 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or ""
25
26 for regexp in binary_arches.split(" "):
27 r = re.compile(regexp)
28
29 if r.match(target_arch):
30 depends = d.getVar("DEPENDS", True)
31 if use_cross_localedef == "1" :
32 depends = "%s cross-localedef-native" % depends
33 else:
34 depends = "%s qemu-native" % depends
35 d.setVar("DEPENDS", depends)
36 d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
37 break
38
39 # try to fix disable charsets/locales/locale-code compile fail
40 if oe.utils.contains('DISTRO_FEATURES', 'libc-charsets', True, False, d) and \
41 oe.utils.contains('DISTRO_FEATURES', 'libc-locales', True, False, d) and \
42 oe.utils.contains('DISTRO_FEATURES', 'libc-locale-code', True, False, d):
43 d.setVar('PACKAGE_NO_GCONV', '0')
44 else:
45 d.setVar('PACKAGE_NO_GCONV', '1')
46}
47
48OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
49
50do_configure_prepend() {
51 if [ -e ${S}/elf/ldd.bash.in ]; then
52 sed -e "s#@BASH@#/bin/sh#" -i ${S}/elf/ldd.bash.in
53 fi
54}
55
56
57
58# indentation removed on purpose
59locale_base_postinst() {
60#!/bin/sh
61
62if [ "x$D" != "x" ]; then
63 exit 1
64fi
65
66rm -rf ${TMP_LOCALE}
67mkdir -p ${TMP_LOCALE}
68if [ -f ${localedir}/locale-archive ]; then
69 cp ${localedir}/locale-archive ${TMP_LOCALE}/
70fi
71localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s --prefix=/tmp/locale %s
72mkdir -p ${localedir}/
73mv ${TMP_LOCALE}/locale-archive ${localedir}/
74rm -rf ${TMP_LOCALE}
75}
76
77# indentation removed on purpose
78locale_base_postrm() {
79#!/bin/sh
80
81rm -rf ${TMP_LOCALE}
82mkdir -p ${TMP_LOCALE}
83if [ -f ${localedir}/locale-archive ]; then
84 cp ${localedir}/locale-archive ${TMP_LOCALE}/
85fi
86localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s --prefix=/tmp/locale %s
87mv ${TMP_LOCALE}/locale-archive ${localedir}/
88rm -rf ${TMP_LOCALE}
89}
90
91
92TMP_LOCALE="/tmp/locale${localedir}"
93LOCALETREESRC ?= "${PKGD}"
94
95do_prep_locale_tree() {
96 treedir=${WORKDIR}/locale-tree
97 rm -rf $treedir
98 mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${localedir}
99 tar -cf - -C ${LOCALETREESRC}${datadir} -p i18n | tar -xf - -C $treedir/${datadir}
100 # unzip to avoid parsing errors
101 for i in $treedir/${datadir}/i18n/charmaps/*gz; do
102 gunzip $i
103 done
104 tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir}
105 if [ -f ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.* ]; then
106 tar -cf - -C ${STAGING_DIR_NATIVE}/${prefix_native}/${base_libdir} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
107 fi
108 install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
109}
110
111do_collect_bins_from_locale_tree() {
112 treedir=${WORKDIR}/locale-tree
113
114 parent=$(dirname ${localedir})
115 mkdir -p ${PKGD}/$parent
116 tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent
117}
118
119inherit qemu
120
121python package_do_split_gconvs () {
122 import re
123 if (d.getVar('PACKAGE_NO_GCONV', True) == '1'):
124 bb.note("package requested not splitting gconvs")
125 return
126
127 if not d.getVar('PACKAGES', True):
128 return
129
130 mlprefix = d.getVar("MLPREFIX", True) or ""
131
132 bpn = d.getVar('BPN', True)
133 libdir = d.getVar('libdir', True)
134 if not libdir:
135 bb.error("libdir not defined")
136 return
137 datadir = d.getVar('datadir', True)
138 if not datadir:
139 bb.error("datadir not defined")
140 return
141
142 gconv_libdir = base_path_join(libdir, "gconv")
143 charmap_dir = base_path_join(datadir, "i18n", "charmaps")
144 locales_dir = base_path_join(datadir, "i18n", "locales")
145 binary_locales_dir = d.getVar('localedir', True)
146
147 def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
148 deps = []
149 f = open(fn, "rb")
150 c_re = re.compile('^copy "(.*)"')
151 i_re = re.compile('^include "(\w+)".*')
152 for l in f.readlines():
153 m = c_re.match(l) or i_re.match(l)
154 if m:
155 dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1)))
156 if not dp in deps:
157 deps.append(dp)
158 f.close()
159 if deps != []:
160 d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
161 if bpn != 'glibc':
162 d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
163
164 do_split_packages(d, gconv_libdir, file_regex='^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
165 description='gconv module for character set %s', hook=calc_gconv_deps, \
166 extra_depends=bpn+'-gconv')
167
168 def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
169 deps = []
170 f = open(fn, "rb")
171 c_re = re.compile('^copy "(.*)"')
172 i_re = re.compile('^include "(\w+)".*')
173 for l in f.readlines():
174 m = c_re.match(l) or i_re.match(l)
175 if m:
176 dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1)))
177 if not dp in deps:
178 deps.append(dp)
179 f.close()
180 if deps != []:
181 d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
182 if bpn != 'glibc':
183 d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
184
185 do_split_packages(d, charmap_dir, file_regex='^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
186 description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
187
188 def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
189 deps = []
190 f = open(fn, "rb")
191 c_re = re.compile('^copy "(.*)"')
192 i_re = re.compile('^include "(\w+)".*')
193 for l in f.readlines():
194 m = c_re.match(l) or i_re.match(l)
195 if m:
196 dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1))
197 if not dp in deps:
198 deps.append(dp)
199 f.close()
200 if deps != []:
201 d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
202 if bpn != 'glibc':
203 d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
204
205 do_split_packages(d, locales_dir, file_regex='(.*)', output_pattern=bpn+'-localedata-%s', \
206 description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
207 d.setVar('PACKAGES', d.getVar('PACKAGES') + ' ' + d.getVar('MLPREFIX') + bpn + '-gconv')
208
209 use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", True)
210
211 dot_re = re.compile("(.*)\.(.*)")
212
213 # Read in supported locales and associated encodings
214 supported = {}
215 with open(base_path_join(d.getVar('WORKDIR', True), "SUPPORTED")) as f:
216 for line in f.readlines():
217 try:
218 locale, charset = line.rstrip().split()
219 except ValueError:
220 continue
221 supported[locale] = charset
222
223 # GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
224 to_generate = d.getVar('GLIBC_GENERATE_LOCALES', True)
225 if not to_generate or to_generate == 'all':
226 to_generate = supported.keys()
227 else:
228 to_generate = to_generate.split()
229 for locale in to_generate:
230 if locale not in supported:
231 if '.' in locale:
232 charset = locale.split('.')[1]
233 else:
234 charset = 'UTF-8'
235 bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset))
236 supported[locale] = charset
237
238 def output_locale_source(name, pkgname, locale, encoding):
239 d.setVar('RDEPENDS_%s' % pkgname, 'localedef %s-localedata-%s %s-charmap-%s' % \
240 (mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
241 d.setVar('pkg_postinst_%s' % pkgname, d.getVar('locale_base_postinst', True) \
242 % (locale, encoding, locale))
243 d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm', True) % \
244 (locale, encoding, locale))
245
246 def output_locale_binary_rdepends(name, pkgname, locale, encoding):
247 m = re.match("(.*)\.(.*)", name)
248 if m:
249 libc_name = "%s.%s" % (m.group(1), m.group(2).lower())
250 else:
251 libc_name = name
252 d.setVar('RDEPENDS_%s' % pkgname, legitimize_package_name('%s-binary-localedata-%s' \
253 % (mlprefix+bpn, libc_name)))
254
255 commands = {}
256
257 def output_locale_binary(name, pkgname, locale, encoding):
258 treedir = base_path_join(d.getVar("WORKDIR", True), "locale-tree")
259 ldlibdir = base_path_join(treedir, d.getVar("base_libdir", True))
260 path = d.getVar("PATH", True)
261 i18npath = base_path_join(treedir, datadir, "i18n")
262 gconvpath = base_path_join(treedir, "iconvdata")
263 outputpath = base_path_join(treedir, binary_locales_dir)
264
265 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or "0"
266 if use_cross_localedef == "1":
267 target_arch = d.getVar('TARGET_ARCH', True)
268 locale_arch_options = { \
269 "arm": " --uint32-align=4 --little-endian ", \
270 "armeb": " --uint32-align=4 --big-endian ", \
271 "aarch64_be": " --uint32-align=4 --big-endian ", \
272 "sh4": " --uint32-align=4 --big-endian ", \
273 "powerpc": " --uint32-align=4 --big-endian ", \
274 "powerpc64": " --uint32-align=4 --big-endian ", \
275 "mips": " --uint32-align=4 --big-endian ", \
276 "mips64": " --uint32-align=4 --big-endian ", \
277 "mipsel": " --uint32-align=4 --little-endian ", \
278 "mips64el":" --uint32-align=4 --little-endian ", \
279 "i586": " --uint32-align=4 --little-endian ", \
280 "i686": " --uint32-align=4 --little-endian ", \
281 "x86_64": " --uint32-align=4 --little-endian " }
282
283 if target_arch in locale_arch_options:
284 localedef_opts = locale_arch_options[target_arch]
285 else:
286 bb.error("locale_arch_options not found for target_arch=" + target_arch)
287 raise bb.build.FuncFailed("unknown arch:" + target_arch + " for locale_arch_options")
288
289 localedef_opts += " --force --old-style --no-archive --prefix=%s \
290 --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
291 % (treedir, treedir, datadir, locale, encoding, outputpath, name)
292
293 cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \
294 (path, i18npath, gconvpath, localedef_opts)
295 else: # earlier slower qemu way
296 qemu = qemu_target_binary(d)
297 localedef_opts = "--force --old-style --no-archive --prefix=%s \
298 --inputfile=%s/i18n/locales/%s --charmap=%s %s" \
299 % (treedir, datadir, locale, encoding, name)
300
301 qemu_options = d.getVar("QEMU_OPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True)
302 if not qemu_options:
303 qemu_options = d.getVar('QEMU_OPTIONS', True)
304
305 cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
306 -E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \
307 (path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
308
309 commands["%s/%s" % (outputpath, name)] = cmd
310
311 bb.note("generating locale %s (%s)" % (locale, encoding))
312
313 def output_locale(name, locale, encoding):
314 pkgname = d.getVar('MLPREFIX') + 'locale-base-' + legitimize_package_name(name)
315 d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
316 d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES', True)))
317 rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
318 m = re.match("(.*)_(.*)", name)
319 if m:
320 rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
321 d.setVar('RPROVIDES_%s' % pkgname, rprovides)
322
323 if use_bin == "compile":
324 output_locale_binary_rdepends(name, pkgname, locale, encoding)
325 output_locale_binary(name, pkgname, locale, encoding)
326 elif use_bin == "precompiled":
327 output_locale_binary_rdepends(name, pkgname, locale, encoding)
328 else:
329 output_locale_source(name, pkgname, locale, encoding)
330
331 if use_bin == "compile":
332 bb.note("preparing tree for binary locale generation")
333 bb.build.exec_func("do_prep_locale_tree", d)
334
335 utf8_only = int(d.getVar('LOCALE_UTF8_ONLY', True) or 0)
336 encodings = {}
337 for locale in to_generate:
338 charset = supported[locale]
339 if utf8_only and charset != 'UTF-8':
340 continue
341
342 m = dot_re.match(locale)
343 if m:
344 base = m.group(1)
345 else:
346 base = locale
347
348 # Precompiled locales are kept as is, obeying SUPPORTED, while
349 # others are adjusted, ensuring that the non-suffixed locales
350 # are utf-8, while the suffixed are not.
351 if use_bin == "precompiled":
352 output_locale(locale, base, charset)
353 else:
354 if charset == 'UTF-8':
355 output_locale(base, base, charset)
356 else:
357 output_locale('%s.%s' % (base, charset), base, charset)
358
359 if use_bin == "compile":
360 makefile = base_path_join(d.getVar("WORKDIR", True), "locale-tree", "Makefile")
361 m = open(makefile, "w")
362 m.write("all: %s\n\n" % " ".join(commands.keys()))
363 for cmd in commands:
364 m.write(cmd + ":\n")
365 m.write("\t" + commands[cmd] + "\n\n")
366 m.close()
367 d.setVar("B", os.path.dirname(makefile))
368 d.setVar("EXTRA_OEMAKE", "${PARALLEL_MAKE}")
369 bb.note("Executing binary locale generation makefile")
370 bb.build.exec_func("oe_runmake", d)
371 bb.note("collecting binary locales from locale tree")
372 bb.build.exec_func("do_collect_bins_from_locale_tree", d)
373 do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
374 output_pattern=bpn+'-binary-localedata-%s', \
375 description='binary locale definition for %s', extra_depends='', allow_dirs=True)
376 elif use_bin == "precompiled":
377 do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
378 output_pattern=bpn+'-binary-localedata-%s', \
379 description='binary locale definition for %s', extra_depends='', allow_dirs=True)
380 else:
381 bb.note("generation of binary locales disabled. this may break i18n!")
382
383}
384
385# We want to do this indirection so that we can safely 'return'
386# from the called function even though we're prepending
387python populate_packages_prepend () {
388 bb.build.exec_func('package_do_split_gconvs', d)
389}
390
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
new file mode 100644
index 0000000000..08f0665e7d
--- /dev/null
+++ b/meta/classes/license.bbclass
@@ -0,0 +1,373 @@
1# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
2# LIC_FILES_CHKSUM.
3# TODO:
4# - There is a real issue revolving around license naming standards.
5
6LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
7LICSSTATEDIR = "${WORKDIR}/license-destdir/"
8
9# Create extra package with license texts and add it to RRECOMMENDS_${PN}
10LICENSE_CREATE_PACKAGE[type] = "boolean"
11LICENSE_CREATE_PACKAGE ??= "0"
12LICENSE_PACKAGE_SUFFIX ??= "-lic"
13LICENSE_FILES_DIRECTORY ??= "${datadir}/licenses/"
14
15addtask populate_lic after do_patch before do_build
16do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
17do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
18
19python write_package_manifest() {
20 # Get list of installed packages
21 license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
22 bb.utils.mkdirhier(license_image_dir)
23 from oe.rootfs import image_list_installed_packages
24 open(os.path.join(license_image_dir, 'package.manifest'),
25 'w+').write(image_list_installed_packages(d))
26}
27
28license_create_manifest() {
29 # Test if BUILD_IMAGES_FROM_FEEDS is defined in env
30 if [ -n "${BUILD_IMAGES_FROM_FEEDS}" ]; then
31 exit 0
32 fi
33
34 INSTALLED_PKGS=`cat ${LICENSE_DIRECTORY}/${IMAGE_NAME}/package.manifest`
35 LICENSE_MANIFEST="${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest"
36 # remove existing license.manifest file
37 if [ -f ${LICENSE_MANIFEST} ]; then
38 rm ${LICENSE_MANIFEST}
39 fi
40 touch ${LICENSE_MANIFEST}
41 for pkg in ${INSTALLED_PKGS}; do
42 filename=`ls ${PKGDATA_DIR}/runtime-reverse/${pkg}| head -1`
43 pkged_pn="$(sed -n 's/^PN: //p' ${filename})"
44
45 # check to see if the package name exists in the manifest. if so, bail.
46 if grep -q "^PACKAGE NAME: ${pkg}" ${LICENSE_MANIFEST}; then
47 continue
48 fi
49
50 pkged_pv="$(sed -n 's/^PV: //p' ${filename})"
51 pkged_name="$(basename $(readlink ${filename}))"
52 pkged_lic="$(sed -n "/^LICENSE_${pkged_name}: /{ s/^LICENSE_${pkged_name}: //; s/[|&()*]/ /g; s/ */ /g; p }" ${filename})"
53 if [ -z ${pkged_lic} ]; then
54 # fallback checking value of LICENSE
55 pkged_lic="$(sed -n "/^LICENSE: /{ s/^LICENSE: //; s/[|&()*]/ /g; s/ */ /g; p }" ${filename})"
56 fi
57
58 echo "PACKAGE NAME:" ${pkg} >> ${LICENSE_MANIFEST}
59 echo "PACKAGE VERSION:" ${pkged_pv} >> ${LICENSE_MANIFEST}
60 echo "RECIPE NAME:" ${pkged_pn} >> ${LICENSE_MANIFEST}
61 printf "LICENSE:" >> ${LICENSE_MANIFEST}
62 for lic in ${pkged_lic}; do
63 # to reference a license file trim trailing + symbol
64 if ! [ -e "${LICENSE_DIRECTORY}/${pkged_pn}/generic_${lic%+}" ]; then
65 bbwarn "The license listed ${lic} was not in the licenses collected for ${pkged_pn}"
66 fi
67 printf " ${lic}" >> ${LICENSE_MANIFEST}
68 done
69 printf "\n\n" >> ${LICENSE_MANIFEST}
70 done
71
72 # Two options here:
73 # - Just copy the manifest
74 # - Copy the manifest and the license directories
75 # With both options set we see a .5 M increase in core-image-minimal
76 if [ "${COPY_LIC_MANIFEST}" = "1" ]; then
77 mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/
78 cp ${LICENSE_MANIFEST} ${IMAGE_ROOTFS}/usr/share/common-licenses/license.manifest
79 if [ "${COPY_LIC_DIRS}" = "1" ]; then
80 for pkg in ${INSTALLED_PKGS}; do
81 mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}
82 pkged_pn="$(oe-pkgdata-util lookup-recipe ${PKGDATA_DIR} ${pkg})"
83 for lic in `ls ${LICENSE_DIRECTORY}/${pkged_pn}`; do
84 # Really don't need to copy the generics as they're
85 # represented in the manifest and in the actual pkg licenses
86 # Doing so would make your image quite a bit larger
87 if [ "${lic#generic_}" = "${lic}" ]; then
88 cp ${LICENSE_DIRECTORY}/${pkged_pn}/${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}/${lic}
89 else
90 if [ ! -f ${IMAGE_ROOTFS}/usr/share/common-licenses/${lic} ]; then
91 cp ${LICENSE_DIRECTORY}/${pkged_pn}/${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/
92 fi
93 ln -sf ../${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}/${lic}
94 fi
95 done
96 done
97 fi
98 fi
99
100}
101
102python do_populate_lic() {
103 """
104 Populate LICENSE_DIRECTORY with licenses.
105 """
106 lic_files_paths = find_license_files(d)
107
108 # The base directory we wrangle licenses to
109 destdir = os.path.join(d.getVar('LICSSTATEDIR', True), d.getVar('PN', True))
110 copy_license_files(lic_files_paths, destdir)
111}
112
113# it would be better to copy them in do_install_append, but find_license_filesa is python
114python perform_packagecopy_prepend () {
115 enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
116 if d.getVar('CLASSOVERRIDE', True) == 'class-target' and enabled:
117 lic_files_paths = find_license_files(d)
118
119 # LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
120 destdir = d.getVar('D', True) + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY', True), d.getVar('PN', True))
121 copy_license_files(lic_files_paths, destdir)
122 add_package_and_files(d)
123}
124
125def add_package_and_files(d):
126 packages = d.getVar('PACKAGES', True)
127 files = d.getVar('LICENSE_FILES_DIRECTORY', True)
128 pn = d.getVar('PN', True)
129 pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX'))
130 if pn_lic in packages:
131 bb.warn("%s package already existed in %s." % (pn_lic, pn))
132 else:
133 # first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
134 d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
135 d.setVar('FILES_' + pn_lic, files)
136 rrecommends_pn = d.getVar('RRECOMMENDS_' + pn, True)
137 if rrecommends_pn:
138 d.setVar('RRECOMMENDS_' + pn, "%s %s" % (pn_lic, rrecommends_pn))
139 else:
140 d.setVar('RRECOMMENDS_' + pn, "%s" % (pn_lic))
141
142def copy_license_files(lic_files_paths, destdir):
143 import shutil
144
145 bb.utils.mkdirhier(destdir)
146 for (basename, path) in lic_files_paths:
147 try:
148 ret = shutil.copyfile(path, os.path.join(destdir, basename))
149 except Exception as e:
150 bb.warn("Could not copy license file %s: %s" % (basename, e))
151
152def find_license_files(d):
153 """
154 Creates list of files used in LIC_FILES_CHKSUM and generic LICENSE files.
155 """
156 import shutil
157 import oe.license
158
159 pn = d.getVar('PN', True)
160 for package in d.getVar('PACKAGES', True):
161 if d.getVar('LICENSE_' + package, True):
162 license_types = license_types + ' & ' + \
163 d.getVar('LICENSE_' + package, True)
164
165 #If we get here with no license types, then that means we have a recipe
166 #level license. If so, we grab only those.
167 try:
168 license_types
169 except NameError:
170 # All the license types at the recipe level
171 license_types = d.getVar('LICENSE', True)
172
173 # All the license files for the package
174 lic_files = d.getVar('LIC_FILES_CHKSUM', True)
175 pn = d.getVar('PN', True)
176 # The license files are located in S/LIC_FILE_CHECKSUM.
177 srcdir = d.getVar('S', True)
178 # Directory we store the generic licenses as set in the distro configuration
179 generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
180 # List of basename, path tuples
181 lic_files_paths = []
182 license_source_dirs = []
183 license_source_dirs.append(generic_directory)
184 try:
185 additional_lic_dirs = d.getVar('LICENSE_PATH', True).split()
186 for lic_dir in additional_lic_dirs:
187 license_source_dirs.append(lic_dir)
188 except:
189 pass
190
191 class FindVisitor(oe.license.LicenseVisitor):
192 def visit_Str(self, node):
193 #
194 # Until I figure out what to do with
195 # the two modifiers I support (or greater = +
196 # and "with exceptions" being *
197 # we'll just strip out the modifier and put
198 # the base license.
199 find_license(node.s.replace("+", "").replace("*", ""))
200 self.generic_visit(node)
201
202 def find_license(license_type):
203 try:
204 bb.utils.mkdirhier(gen_lic_dest)
205 except:
206 pass
207 spdx_generic = None
208 license_source = None
209 # If the generic does not exist we need to check to see if there is an SPDX mapping to it
210 for lic_dir in license_source_dirs:
211 if not os.path.isfile(os.path.join(lic_dir, license_type)):
212 if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
213 # Great, there is an SPDXLICENSEMAP. We can copy!
214 bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
215 spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
216 license_source = lic_dir
217 break
218 elif os.path.isfile(os.path.join(lic_dir, license_type)):
219 spdx_generic = license_type
220 license_source = lic_dir
221 break
222
223 if spdx_generic and license_source:
224 # we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
225 # audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
226
227 lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic)))
228 else:
229 # And here is where we warn people that their licenses are lousy
230 bb.warn("%s: No generic license file exists for: %s in any provider" % (pn, license_type))
231 pass
232
233 if not generic_directory:
234 raise bb.build.FuncFailed("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
235
236 if not lic_files:
237 # No recipe should have an invalid license file. This is checked else
238 # where, but let's be pedantic
239 bb.note(pn + ": Recipe file does not have license file information.")
240 return lic_files_paths
241
242 for url in lic_files.split():
243 try:
244 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
245 except bb.fetch.MalformedUrl:
246 raise bb.build.FuncFailed("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF', True), url))
247 # We want the license filename and path
248 srclicfile = os.path.join(srcdir, path)
249 lic_files_paths.append((os.path.basename(path), srclicfile))
250
251 v = FindVisitor()
252 try:
253 v.visit_string(license_types)
254 except oe.license.InvalidLicense as exc:
255 bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
256 except SyntaxError:
257 bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF', True)))
258
259 return lic_files_paths
260
261def return_spdx(d, license):
262 """
263 This function returns the spdx mapping of a license if it exists.
264 """
265 return d.getVarFlag('SPDXLICENSEMAP', license, True)
266
267def incompatible_license(d, dont_want_licenses, package=None):
268 """
269 This function checks if a recipe has only incompatible licenses. It also take into consideration 'or'
270 operand.
271 """
272 import re
273 import oe.license
274 from fnmatch import fnmatchcase as fnmatch
275 license = d.getVar("LICENSE_%s" % package, True) if package else None
276 if not license:
277 license = d.getVar('LICENSE', True)
278
279 def license_ok(license):
280 for dwl in dont_want_licenses:
281 # If you want to exclude license named generically 'X', we
282 # surely want to exclude 'X+' as well. In consequence, we
283 # will exclude a trailing '+' character from LICENSE in
284 # case INCOMPATIBLE_LICENSE is not a 'X+' license.
285 lic = license
286 if not re.search('\+$', dwl):
287 lic = re.sub('\+', '', license)
288 if fnmatch(lic, dwl):
289 return False
290 return True
291
292 # Handles an "or" or two license sets provided by
293 # flattened_licenses(), pick one that works if possible.
294 def choose_lic_set(a, b):
295 return a if all(license_ok(lic) for lic in a) else b
296
297 try:
298 licenses = oe.license.flattened_licenses(license, choose_lic_set)
299 except oe.license.LicenseError as exc:
300 bb.fatal('%s: %s' % (d.getVar('P', True), exc))
301 return any(not license_ok(l) for l in licenses)
302
303def check_license_flags(d):
304 """
305 This function checks if a recipe has any LICENSE_FLAGS that
306 aren't whitelisted.
307
308 If it does, it returns the first LICENSE_FLAGS item missing from the
309 whitelist, or all of the LICENSE_FLAGS if there is no whitelist.
310
311 If everything is is properly whitelisted, it returns None.
312 """
313
314 def license_flag_matches(flag, whitelist, pn):
315 """
316 Return True if flag matches something in whitelist, None if not.
317
318 Before we test a flag against the whitelist, we append _${PN}
319 to it. We then try to match that string against the
320 whitelist. This covers the normal case, where we expect
321 LICENSE_FLAGS to be a simple string like 'commercial', which
322 the user typically matches exactly in the whitelist by
323 explicitly appending the package name e.g 'commercial_foo'.
324 If we fail the match however, we then split the flag across
325 '_' and append each fragment and test until we either match or
326 run out of fragments.
327 """
328 flag_pn = ("%s_%s" % (flag, pn))
329 for candidate in whitelist:
330 if flag_pn == candidate:
331 return True
332
333 flag_cur = ""
334 flagments = flag_pn.split("_")
335 flagments.pop() # we've already tested the full string
336 for flagment in flagments:
337 if flag_cur:
338 flag_cur += "_"
339 flag_cur += flagment
340 for candidate in whitelist:
341 if flag_cur == candidate:
342 return True
343 return False
344
345 def all_license_flags_match(license_flags, whitelist):
346 """ Return first unmatched flag, None if all flags match """
347 pn = d.getVar('PN', True)
348 split_whitelist = whitelist.split()
349 for flag in license_flags.split():
350 if not license_flag_matches(flag, split_whitelist, pn):
351 return flag
352 return None
353
354 license_flags = d.getVar('LICENSE_FLAGS', True)
355 if license_flags:
356 whitelist = d.getVar('LICENSE_FLAGS_WHITELIST', True)
357 if not whitelist:
358 return license_flags
359 unmatched_flag = all_license_flags_match(license_flags, whitelist)
360 if unmatched_flag:
361 return unmatched_flag
362 return None
363
364SSTATETASKS += "do_populate_lic"
365do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
366do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
367
368ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
369
370python do_populate_lic_setscene () {
371 sstate_setscene(d)
372}
373addtask do_populate_lic_setscene
diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass
new file mode 100644
index 0000000000..4f2b0a4a98
--- /dev/null
+++ b/meta/classes/linux-kernel-base.bbclass
@@ -0,0 +1,32 @@
1# parse kernel ABI version out of <linux/version.h>
2def get_kernelversion(p):
3 import re
4
5 fn = p + '/include/linux/utsrelease.h'
6 if not os.path.isfile(fn):
7 # after 2.6.33-rc1
8 fn = p + '/include/generated/utsrelease.h'
9 if not os.path.isfile(fn):
10 fn = p + '/include/linux/version.h'
11
12 import re
13 try:
14 f = open(fn, 'r')
15 except IOError:
16 return None
17
18 l = f.readlines()
19 f.close()
20 r = re.compile("#define UTS_RELEASE \"(.*)\"")
21 for s in l:
22 m = r.match(s)
23 if m:
24 return m.group(1)
25 return None
26
27def linux_module_packages(s, d):
28 suffix = ""
29 return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
30
31# that's all
32
diff --git a/meta/classes/logging.bbclass b/meta/classes/logging.bbclass
new file mode 100644
index 0000000000..78d65bda3a
--- /dev/null
+++ b/meta/classes/logging.bbclass
@@ -0,0 +1,72 @@
1# The following logging mechanisms are to be used in bash functions of recipes.
2# They are intended to map one to one in intention and output format with the
3# python recipe logging functions of a similar naming convention: bb.plain(),
4# bb.note(), etc.
5#
6# For the time being, all of these print only to the task logs. Future
7# enhancements may integrate these calls with the bitbake logging
8# infrastructure, allowing for printing to the console as appropriate. The
9# interface and intention statements reflect that future goal. Once it is
10# in place, no changes will be necessary to recipes using these logging
11# mechanisms.
12
13# Print the output exactly as it is passed in. Typically used for output of
14# tasks that should be seen on the console. Use sparingly.
15# Output: logs console
16# NOTE: console output is not currently implemented.
17bbplain() {
18 echo "$*"
19}
20
21# Notify the user of a noteworthy condition.
22# Output: logs console
23# NOTE: console output is not currently implemented.
24bbnote() {
25 echo "NOTE: $*"
26}
27
28# Print a warning to the log. Warnings are non-fatal, and do not
29# indicate a build failure.
30# Output: logs
31bbwarn() {
32 echo "WARNING: $*"
33}
34
35# Print an error to the log. Errors are non-fatal in that the build can
36# continue, but they do indicate a build failure.
37# Output: logs
38bberror() {
39 echo "ERROR: $*"
40}
41
42# Print a fatal error to the log. Fatal errors indicate build failure
43# and halt the build, exiting with an error code.
44# Output: logs
45bbfatal() {
46 echo "ERROR: $*"
47 exit 1
48}
49
50# Print debug messages. These are appropriate for progress checkpoint
51# messages to the logs. Depending on the debug log level, they may also
52# go to the console.
53# Output: logs console
54# Usage: bbdebug 1 "first level debug message"
55# bbdebug 2 "second level debug message"
56# NOTE: console output is not currently implemented.
57bbdebug() {
58 USAGE='Usage: bbdebug [123] "message"'
59 if [ $# -lt 2 ]; then
60 bbfatal "$USAGE"
61 fi
62
63 # Strip off the debug level and ensure it is an integer
64 DBGLVL=$1; shift
65 if ! [[ "$DBGLVL" =~ ^[0-9]+ ]]; then
66 bbfatal "$USAGE"
67 fi
68
69 # All debug output is printed to the logs
70 echo "DEBUG: $*"
71}
72
diff --git a/meta/classes/meta.bbclass b/meta/classes/meta.bbclass
new file mode 100644
index 0000000000..d35c40bccd
--- /dev/null
+++ b/meta/classes/meta.bbclass
@@ -0,0 +1,4 @@
1
2PACKAGES = ""
3
4do_build[recrdeptask] = "do_build" \ No newline at end of file
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
new file mode 100644
index 0000000000..ba0edf9486
--- /dev/null
+++ b/meta/classes/metadata_scm.bbclass
@@ -0,0 +1,82 @@
1METADATA_BRANCH ?= "${@base_detect_branch(d)}"
2METADATA_REVISION ?= "${@base_detect_revision(d)}"
3
4def base_detect_revision(d):
5 path = base_get_scmbasepath(d)
6
7 scms = [base_get_metadata_git_revision, \
8 base_get_metadata_svn_revision]
9
10 for scm in scms:
11 rev = scm(path, d)
12 if rev != "<unknown>":
13 return rev
14
15 return "<unknown>"
16
17def base_detect_branch(d):
18 path = base_get_scmbasepath(d)
19
20 scms = [base_get_metadata_git_branch]
21
22 for scm in scms:
23 rev = scm(path, d)
24 if rev != "<unknown>":
25 return rev.strip()
26
27 return "<unknown>"
28
29def base_get_scmbasepath(d):
30 return d.getVar( 'COREBASE', True)
31
32def base_get_metadata_monotone_branch(path, d):
33 monotone_branch = "<unknown>"
34 try:
35 with open("%s/_MTN/options" % path) as f:
36 monotone_branch = f.read().strip()
37 if monotone_branch.startswith( "database" ):
38 monotone_branch_words = monotone_branch.split()
39 monotone_branch = monotone_branch_words[ monotone_branch_words.index( "branch" )+1][1:-1]
40 except:
41 pass
42 return monotone_branch
43
44def base_get_metadata_monotone_revision(path, d):
45 monotone_revision = "<unknown>"
46 try:
47 with open("%s/_MTN/revision" % path) as f:
48 monotone_revision = f.read().strip()
49 if monotone_revision.startswith( "format_version" ):
50 monotone_revision_words = monotone_revision.split()
51 monotone_revision = monotone_revision_words[ monotone_revision_words.index( "old_revision" )+1][1:-1]
52 except IOError:
53 pass
54 return monotone_revision
55
56def base_get_metadata_svn_revision(path, d):
57 # This only works with older subversion. For newer versions
58 # this function will need to be fixed by someone interested
59 revision = "<unknown>"
60 try:
61 with open("%s/.svn/entries" % path) as f:
62 revision = f.readlines()[3].strip()
63 except IOError, IndexError:
64 pass
65 return revision
66
67def base_get_metadata_git_branch(path, d):
68 branch = os.popen('cd %s; git branch 2>&1 | grep "^* " | tr -d "* "' % path).read()
69
70 if len(branch) != 0:
71 return branch
72 return "<unknown>"
73
74def base_get_metadata_git_revision(path, d):
75 f = os.popen("cd %s; git log -n 1 --pretty=oneline -- 2>&1" % path)
76 data = f.read()
77 if f.close() is None:
78 rev = data.split(" ")[0]
79 if len(rev) != 0:
80 return rev
81 return "<unknown>"
82
diff --git a/meta/classes/migrate_localcount.bbclass b/meta/classes/migrate_localcount.bbclass
new file mode 100644
index 0000000000..aa0df8bb76
--- /dev/null
+++ b/meta/classes/migrate_localcount.bbclass
@@ -0,0 +1,46 @@
1PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
2LOCALCOUNT_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv-localcount-exports.inc"
3
4python migrate_localcount_handler () {
5 import bb.event
6 if not e.data:
7 return
8
9 pv = e.data.getVar('PV', True)
10 if not 'AUTOINC' in pv:
11 return
12
13 localcounts = bb.persist_data.persist('BB_URI_LOCALCOUNT', e.data)
14 pn = e.data.getVar('PN', True)
15 revs = localcounts.get_by_pattern('%%-%s_rev' % pn)
16 counts = localcounts.get_by_pattern('%%-%s_count' % pn)
17 if not revs or not counts:
18 return
19
20 if len(revs) != len(counts):
21 bb.warn("The number of revs and localcounts don't match in %s" % pn)
22 return
23
24 version = e.data.getVar('PRAUTOINX', True)
25 srcrev = bb.fetch2.get_srcrev(e.data)
26 base_ver = 'AUTOINC-%s' % version[:version.find(srcrev)]
27 pkgarch = e.data.getVar('PACKAGE_ARCH', True)
28 value = max(int(count) for count in counts)
29
30 if len(revs) == 1:
31 if srcrev != ('AUTOINC+%s' % revs[0]):
32 value += 1
33 else:
34 value += 1
35
36 bb.utils.mkdirhier(e.data.getVar('PRSERV_DUMPDIR', True))
37 df = e.data.getVar('LOCALCOUNT_DUMPFILE', True)
38 flock = bb.utils.lockfile("%s.lock" % df)
39 with open(df, 'a') as fd:
40 fd.write('PRAUTO$%s$%s$%s = "%s"\n' %
41 (base_ver, pkgarch, srcrev, str(value)))
42 bb.utils.unlockfile(flock)
43}
44
45addhandler migrate_localcount_handler
46migrate_localcount_handler[eventmask] = "bb.event.RecipeParsed"
diff --git a/meta/classes/mime.bbclass b/meta/classes/mime.bbclass
new file mode 100644
index 0000000000..721c73fcff
--- /dev/null
+++ b/meta/classes/mime.bbclass
@@ -0,0 +1,56 @@
1DEPENDS += "shared-mime-info-native shared-mime-info"
2
3mime_postinst() {
4if [ "$1" = configure ]; then
5 UPDATEMIMEDB=`which update-mime-database`
6 if [ -x "$UPDATEMIMEDB" ] ; then
7 echo "Updating MIME database... this may take a while."
8 $UPDATEMIMEDB $D${datadir}/mime
9 else
10 echo "Missing update-mime-database, update of mime database failed!"
11 exit 1
12 fi
13fi
14}
15
16mime_postrm() {
17if [ "$1" = remove ] || [ "$1" = upgrade ]; then
18 UPDATEMIMEDB=`which update-mime-database`
19 if [ -x "$UPDATEMIMEDB" ] ; then
20 echo "Updating MIME database... this may take a while."
21 $UPDATEMIMEDB $D${datadir}/mime
22 else
23 echo "Missing update-mime-database, update of mime database failed!"
24 exit 1
25 fi
26fi
27}
28
29python populate_packages_append () {
30 import re
31 packages = d.getVar('PACKAGES', True).split()
32 pkgdest = d.getVar('PKGDEST', True)
33
34 for pkg in packages:
35 mime_dir = '%s/%s/usr/share/mime/packages' % (pkgdest, pkg)
36 mimes = []
37 mime_re = re.compile(".*\.xml$")
38 if os.path.exists(mime_dir):
39 for f in os.listdir(mime_dir):
40 if mime_re.match(f):
41 mimes.append(f)
42 if mimes:
43 bb.note("adding mime postinst and postrm scripts to %s" % pkg)
44 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
45 if not postinst:
46 postinst = '#!/bin/sh\n'
47 postinst += d.getVar('mime_postinst', True)
48 d.setVar('pkg_postinst_%s' % pkg, postinst)
49 postrm = d.getVar('pkg_postrm_%s' % pkg, True)
50 if not postrm:
51 postrm = '#!/bin/sh\n'
52 postrm += d.getVar('mime_postrm', True)
53 d.setVar('pkg_postrm_%s' % pkg, postrm)
54 bb.note("adding shared-mime-info-data dependency to %s" % pkg)
55 d.appendVar('RDEPENDS_' + pkg, " shared-mime-info-data")
56}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
new file mode 100644
index 0000000000..a626669315
--- /dev/null
+++ b/meta/classes/mirrors.bbclass
@@ -0,0 +1,78 @@
1MIRRORS += "\
2${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \n \
3${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \n \
4${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \n \
5${DEBIAN_MIRROR} ftp://ftp.de.debian.org/debian/pool \n \
6${DEBIAN_MIRROR} ftp://ftp.au.debian.org/debian/pool \n \
7${DEBIAN_MIRROR} ftp://ftp.cl.debian.org/debian/pool \n \
8${DEBIAN_MIRROR} ftp://ftp.hr.debian.org/debian/pool \n \
9${DEBIAN_MIRROR} ftp://ftp.fi.debian.org/debian/pool \n \
10${DEBIAN_MIRROR} ftp://ftp.hk.debian.org/debian/pool \n \
11${DEBIAN_MIRROR} ftp://ftp.hu.debian.org/debian/pool \n \
12${DEBIAN_MIRROR} ftp://ftp.ie.debian.org/debian/pool \n \
13${DEBIAN_MIRROR} ftp://ftp.it.debian.org/debian/pool \n \
14${DEBIAN_MIRROR} ftp://ftp.jp.debian.org/debian/pool \n \
15${DEBIAN_MIRROR} ftp://ftp.no.debian.org/debian/pool \n \
16${DEBIAN_MIRROR} ftp://ftp.pl.debian.org/debian/pool \n \
17${DEBIAN_MIRROR} ftp://ftp.ro.debian.org/debian/pool \n \
18${DEBIAN_MIRROR} ftp://ftp.si.debian.org/debian/pool \n \
19${DEBIAN_MIRROR} ftp://ftp.es.debian.org/debian/pool \n \
20${DEBIAN_MIRROR} ftp://ftp.se.debian.org/debian/pool \n \
21${DEBIAN_MIRROR} ftp://ftp.tr.debian.org/debian/pool \n \
22${GNU_MIRROR} ftp://mirrors.kernel.org/gnu \n \
23${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
24ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt/ \n \
25ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.surfnet.nl/pub/security/gnupg/ \n \
26ftp://ftp.gnupg.org/gcrypt/ http://gulus.USherbrooke.ca/pub/appl/GnuPG/ \n \
27ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
28ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
29ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
30ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.gnupg.org/gcrypt/gnutls/ \n \
31ftp://ftp.gnutls.org/pub/gnutls http://www.mirrors.wiretapped.net/security/network-security/gnutls/ \n \
32ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.mirrors.wiretapped.net/pub/security/network-security/gnutls/ \n \
33ftp://ftp.gnutls.org/pub/gnutls http://josefsson.org/gnutls/releases/ \n \
34http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
35http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
36ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/ \n \
37ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tau.ac.il/pub/unix/admin/ \n \
38ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cert.dfn.de/pub/tools/admin/lsof/ \n \
39ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.fu-berlin.de/pub/unix/tools/lsof/ \n \
40ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.kaizo.org/pub/lsof/ \n \
41ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tu-darmstadt.de/pub/sysadmin/lsof/ \n \
42ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tux.org/pub/sites/vic.cc.purdue.edu/tools/unix/lsof/ \n \
43ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://gd.tuwien.ac.at/utils/admin-tools/lsof/ \n \
44ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://sunsite.ualberta.ca/pub/Mirror/lsof/ \n \
45ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://the.wiretapped.net/pub/security/host-security/lsof/ \n \
46http://www.apache.org/dist http://archive.apache.org/dist \n \
47http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \
48cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
49svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
50git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
51hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
52bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
53svk://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
54p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
55osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
56https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
57ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
58cvs://.*/.* http://sources.openembedded.org/ \n \
59svn://.*/.* http://sources.openembedded.org/ \n \
60git://.*/.* http://sources.openembedded.org/ \n \
61hg://.*/.* http://sources.openembedded.org/ \n \
62bzr://.*/.* http://sources.openembedded.org/ \n \
63svk://.*/.* http://sources.openembedded.org/ \n \
64p4://.*/.* http://sources.openembedded.org/ \n \
65osc://.*/.* http://sources.openembedded.org/ \n \
66https?$://.*/.* http://sources.openembedded.org/ \n \
67ftp://.*/.* http://sources.openembedded.org/ \n \
68cvs://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
69svn://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
70git://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
71hg://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
72bzr://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
73svk://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
74p4://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
75osc://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
76https?$://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
77ftp://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
78"
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
new file mode 100644
index 0000000000..9537ba9f43
--- /dev/null
+++ b/meta/classes/module-base.bbclass
@@ -0,0 +1,18 @@
1inherit kernel-arch
2
3export OS = "${TARGET_OS}"
4export CROSS_COMPILE = "${TARGET_PREFIX}"
5
6export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-abiversion')}"
7KERNEL_OBJECT_SUFFIX = ".ko"
8
9# kernel modules are generally machine specific
10PACKAGE_ARCH = "${MACHINE_ARCH}"
11
12# Function to ensure the kernel scripts are created. Expected to
13# be called before do_compile. See module.bbclass for an exmaple.
14do_make_scripts() {
15 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
16 make CC="${KERNEL_CC}" LD="${KERNEL_LD}" AR="${KERNEL_AR}" \
17 -C ${STAGING_KERNEL_DIR} scripts
18}
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
new file mode 100644
index 0000000000..ad6f7af1bb
--- /dev/null
+++ b/meta/classes/module.bbclass
@@ -0,0 +1,32 @@
1DEPENDS += "virtual/kernel"
2
3inherit module-base kernel-module-split
4
5addtask make_scripts after do_patch before do_compile
6do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
7do_make_scripts[deptask] = "do_populate_sysroot"
8
9module_do_compile() {
10 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
11 oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
12 KERNEL_SRC=${STAGING_KERNEL_DIR} \
13 KERNEL_VERSION=${KERNEL_VERSION} \
14 CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
15 AR="${KERNEL_AR}" \
16 ${MAKE_TARGETS}
17}
18
19module_do_install() {
20 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
21 oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" \
22 KERNEL_SRC=${STAGING_KERNEL_DIR} \
23 CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
24 modules_install
25}
26
27EXPORT_FUNCTIONS do_compile do_install
28
29# add all splitted modules to PN RDEPENDS, PN can be empty now
30KERNEL_MODULES_META_PACKAGE = "${PN}"
31FILES_${PN} = ""
32ALLOW_EMPTY_${PN} = "1"
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
new file mode 100644
index 0000000000..9a1cb1d916
--- /dev/null
+++ b/meta/classes/multilib.bbclass
@@ -0,0 +1,141 @@
1python multilib_virtclass_handler () {
2 cls = e.data.getVar("BBEXTENDCURR", True)
3 variant = e.data.getVar("BBEXTENDVARIANT", True)
4 if cls != "multilib" or not variant:
5 return
6
7 e.data.setVar('STAGING_KERNEL_DIR', e.data.getVar('STAGING_KERNEL_DIR', True))
8
9 # There should only be one kernel in multilib configs
10 # We also skip multilib setup for module packages.
11 provides = (e.data.getVar("PROVIDES", True) or "").split()
12 if "virtual/kernel" in provides or bb.data.inherits_class('module-base', e.data):
13 raise bb.parse.SkipPackage("We shouldn't have multilib variants for the kernel")
14
15 save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME", True) or ""
16 for name in save_var_name.split():
17 val=e.data.getVar(name, True)
18 if val:
19 e.data.setVar(name + "_MULTILIB_ORIGINAL", val)
20
21 if bb.data.inherits_class('image', e.data):
22 e.data.setVar("MLPREFIX", variant + "-")
23 e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
24 return
25
26 if bb.data.inherits_class('cross-canadian', e.data):
27 e.data.setVar("MLPREFIX", variant + "-")
28 override = ":virtclass-multilib-" + variant
29 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
30 bb.data.update_data(e.data)
31 return
32
33 if bb.data.inherits_class('native', e.data):
34 raise bb.parse.SkipPackage("We can't extend native recipes")
35
36 if bb.data.inherits_class('nativesdk', e.data) or bb.data.inherits_class('crosssdk', e.data):
37 raise bb.parse.SkipPackage("We can't extend nativesdk recipes")
38
39 if bb.data.inherits_class('allarch', e.data) and not bb.data.inherits_class('packagegroup', e.data):
40 raise bb.parse.SkipPackage("Don't extend allarch recipes which are not packagegroups")
41
42
43 # Expand this since this won't work correctly once we set a multilib into place
44 e.data.setVar("ALL_MULTILIB_PACKAGE_ARCHS", e.data.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
45
46 override = ":virtclass-multilib-" + variant
47
48 e.data.setVar("MLPREFIX", variant + "-")
49 e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
50 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
51
52 # Expand the WHITELISTs with multilib prefix
53 for whitelist in ["HOSTTOOLS_WHITELIST_GPLv3", "WHITELIST_GPLv3", "LGPLv2_WHITELIST_GPLv3"]:
54 pkgs = e.data.getVar(whitelist, True)
55 for pkg in pkgs.split():
56 pkgs += " " + variant + "-" + pkg
57 e.data.setVar(whitelist, pkgs)
58
59 # DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
60 newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False)
61 if newtune:
62 e.data.setVar("DEFAULTTUNE", newtune)
63}
64
65addhandler multilib_virtclass_handler
66multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
67
68STAGINGCC_prepend = "${BBEXTENDVARIANT}-"
69
70python __anonymous () {
71 variant = d.getVar("BBEXTENDVARIANT", True)
72
73 import oe.classextend
74
75 clsextend = oe.classextend.ClassExtender(variant, d)
76
77 if bb.data.inherits_class('image', d):
78 clsextend.map_depends_variable("PACKAGE_INSTALL")
79 clsextend.map_depends_variable("LINGUAS_INSTALL")
80 clsextend.map_depends_variable("RDEPENDS")
81 pinstall = d.getVar("LINGUAS_INSTALL", True) + " " + d.getVar("PACKAGE_INSTALL", True)
82 d.setVar("PACKAGE_INSTALL", pinstall)
83 d.setVar("LINGUAS_INSTALL", "")
84 # FIXME, we need to map this to something, not delete it!
85 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
86
87 if bb.data.inherits_class('populate_sdk_base', d):
88 clsextend.map_depends_variable("TOOLCHAIN_TARGET_TASK")
89 clsextend.map_depends_variable("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY")
90
91 if bb.data.inherits_class('image', d):
92 return
93
94 clsextend.map_depends_variable("DEPENDS")
95 clsextend.map_variable("PROVIDES")
96
97 if bb.data.inherits_class('cross-canadian', d):
98 return
99
100 clsextend.rename_packages()
101 clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
102
103 clsextend.map_packagevars()
104 clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
105 clsextend.map_variable("PACKAGE_INSTALL")
106 clsextend.map_variable("INITSCRIPT_PACKAGES")
107 clsextend.map_variable("USERADD_PACKAGES")
108 clsextend.map_variable("SYSTEMD_PACKAGES")
109}
110
111PACKAGEFUNCS_append = " do_package_qa_multilib"
112
113python do_package_qa_multilib() {
114
115 def check_mlprefix(pkg, var, mlprefix):
116 values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg), True) or d.getVar(var, True) or "")
117 candidates = []
118 for i in values:
119 if i.startswith('virtual/'):
120 i = i[len('virtual/'):]
121 if (not i.startswith('kernel-module')) and (not i.startswith(mlprefix)) and \
122 (not 'cross-canadian' in i) and (not i.startswith("nativesdk-")) and \
123 (not i.startswith("rtld")) and (not i.startswith('kernel-vmlinux')):
124 candidates.append(i)
125 if len(candidates) > 0:
126 bb.warn("Multilib QA Issue: %s package %s - suspicious values '%s' in %s"
127 % (d.getVar('PN', True), pkg, ' '.join(candidates), var))
128
129 ml = d.getVar('MLPREFIX', True)
130 if not ml:
131 return
132
133 packages = d.getVar('PACKAGES', True)
134 for pkg in packages.split():
135 check_mlprefix(pkg, 'RDEPENDS', ml)
136 check_mlprefix(pkg, 'RPROVIDES', ml)
137 check_mlprefix(pkg, 'RRECOMMENDS', ml)
138 check_mlprefix(pkg, 'RSUGGESTS', ml)
139 check_mlprefix(pkg, 'RREPLACES', ml)
140 check_mlprefix(pkg, 'RCONFLICTS', ml)
141}
diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass
new file mode 100644
index 0000000000..3315ba9327
--- /dev/null
+++ b/meta/classes/multilib_global.bbclass
@@ -0,0 +1,47 @@
1python multilib_virtclass_handler_global () {
2 if not e.data:
3 return
4
5 if isinstance(e, bb.event.RecipePreFinalise):
6 for v in e.data.getVar("MULTILIB_VARIANTS", True).split():
7 if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None:
8 e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
9
10 variant = e.data.getVar("BBEXTENDVARIANT", True)
11
12 if isinstance(e, bb.event.RecipeParsed) and not variant:
13 if bb.data.inherits_class('kernel', e.data) or \
14 bb.data.inherits_class('module-base', e.data) or \
15 (bb.data.inherits_class('allarch', e.data) and\
16 not bb.data.inherits_class('packagegroup', e.data)):
17 variants = (e.data.getVar("MULTILIB_VARIANTS", True) or "").split()
18
19 import oe.classextend
20 clsextends = []
21 for variant in variants:
22 clsextends.append(oe.classextend.ClassExtender(variant, e.data))
23
24 # Process PROVIDES
25 origprovs = provs = e.data.getVar("PROVIDES", True) or ""
26 for clsextend in clsextends:
27 provs = provs + " " + clsextend.map_variable("PROVIDES", setvar=False)
28 e.data.setVar("PROVIDES", provs)
29
30 # Process RPROVIDES
31 origrprovs = rprovs = e.data.getVar("RPROVIDES", True) or ""
32 for clsextend in clsextends:
33 rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES", setvar=False)
34 e.data.setVar("RPROVIDES", rprovs)
35
36 # Process RPROVIDES_${PN}...
37 for pkg in (e.data.getVar("PACKAGES", True) or "").split():
38 origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg, True) or ""
39 for clsextend in clsextends:
40 rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
41 rprovs = rprovs + " " + clsextend.extname + "-" + pkg
42 e.data.setVar("RPROVIDES_%s" % pkg, rprovs)
43}
44
45addhandler multilib_virtclass_handler_global
46multilib_virtclass_handler_global[eventmask] = "bb.event.RecipePreFinalise bb.event.RecipeParsed"
47
diff --git a/meta/classes/multilib_header.bbclass b/meta/classes/multilib_header.bbclass
new file mode 100644
index 0000000000..4d049a82e6
--- /dev/null
+++ b/meta/classes/multilib_header.bbclass
@@ -0,0 +1,47 @@
1inherit siteinfo
2
3# If applicable on the architecture, this routine will rename the header and
4# add a unique identifier to the name for the ABI/bitsize that is being used.
5# A wrapper will be generated for the architecture that knows how to call
6# all of the ABI variants for that given architecture.
7#
8oe_multilib_header() {
9 # We use
10 # For ARM: We don't support multilib builds.
11 # For MIPS: "n32" is a special case, which needs to be
12 # distinct from both 64-bit and 32-bit.
13 case ${TARGET_ARCH} in
14 arm*) return
15 ;;
16 mips*) case "${MIPSPKGSFX_ABI}" in
17 "-n32")
18 ident=n32
19 ;;
20 *)
21 ident=${SITEINFO_BITS}
22 ;;
23 esac
24 ;;
25 *) ident=${SITEINFO_BITS}
26 esac
27 if echo ${TARGET_ARCH} | grep -q arm; then
28 return
29 fi
30 for each_header in "$@" ; do
31 if [ ! -f "${D}/${includedir}/$each_header" ]; then
32 bberror "oe_multilib_header: Unable to find header $each_header."
33 continue
34 fi
35 stem=$(echo $each_header | sed 's#\.h$##')
36 # if mips64/n32 set ident to n32
37 mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h
38
39 sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header
40 done
41}
42
43# Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic.
44# We don't need multilib headers for native builds so brute force things.
45oe_multilib_header_class-native () {
46 return
47}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
new file mode 100644
index 0000000000..5a318d21d9
--- /dev/null
+++ b/meta/classes/native.bbclass
@@ -0,0 +1,164 @@
1# We want native packages to be relocatable
2inherit relocatable
3
4# Native packages are built indirectly via dependency,
5# no need for them to be a direct target of 'world'
6EXCLUDE_FROM_WORLD = "1"
7
8PACKAGES = ""
9PACKAGES_class-native = ""
10PACKAGES_DYNAMIC = ""
11PACKAGES_DYNAMIC_class-native = ""
12PACKAGE_ARCH = "${BUILD_ARCH}"
13
14# used by cmake class
15OECMAKE_RPATH = "${libdir}"
16OECMAKE_RPATH_class-native = "${libdir}"
17
18# When this class has packaging enabled, setting
19# RPROVIDES becomes unnecessary.
20RPROVIDES = "${PN}"
21
22TARGET_ARCH = "${BUILD_ARCH}"
23TARGET_OS = "${BUILD_OS}"
24TARGET_VENDOR = "${BUILD_VENDOR}"
25TARGET_PREFIX = "${BUILD_PREFIX}"
26TARGET_CC_ARCH = "${BUILD_CC_ARCH}"
27TARGET_LD_ARCH = "${BUILD_LD_ARCH}"
28TARGET_AS_ARCH = "${BUILD_AS_ARCH}"
29TARGET_FPU = ""
30
31HOST_ARCH = "${BUILD_ARCH}"
32HOST_OS = "${BUILD_OS}"
33HOST_VENDOR = "${BUILD_VENDOR}"
34HOST_PREFIX = "${BUILD_PREFIX}"
35HOST_CC_ARCH = "${BUILD_CC_ARCH}"
36HOST_LD_ARCH = "${BUILD_LD_ARCH}"
37HOST_AS_ARCH = "${BUILD_AS_ARCH}"
38
39CPPFLAGS = "${BUILD_CPPFLAGS}"
40CFLAGS = "${BUILD_CFLAGS}"
41CXXFLAGS = "${BUILD_CFLAGS}"
42LDFLAGS = "${BUILD_LDFLAGS}"
43LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE} "
44
45STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
46STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
47
48# native pkg doesn't need the TOOLCHAIN_OPTIONS.
49TOOLCHAIN_OPTIONS = ""
50
51DEPENDS_GETTEXT = "gettext-native"
52
53# Don't build ptest natively
54PTEST_ENABLED = "0"
55
56# Don't use site files for native builds
57export CONFIG_SITE = "${COREBASE}/meta/site/native"
58
59# set the compiler as well. It could have been set to something else
60export CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_ARCH}"
61export CXX = "${CCACHE}${HOST_PREFIX}g++ ${HOST_CC_ARCH}"
62export FC = "${CCACHE}${HOST_PREFIX}gfortran ${HOST_CC_ARCH}"
63export CPP = "${HOST_PREFIX}gcc ${HOST_CC_ARCH} -E"
64export LD = "${HOST_PREFIX}ld ${HOST_LD_ARCH} "
65export CCLD = "${CC}"
66export AR = "${HOST_PREFIX}ar"
67export AS = "${HOST_PREFIX}as ${HOST_AS_ARCH}"
68export RANLIB = "${HOST_PREFIX}ranlib"
69export STRIP = "${HOST_PREFIX}strip"
70
71# Path prefixes
72base_prefix = "${STAGING_DIR_NATIVE}"
73prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
74exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
75
76libdir = "${STAGING_DIR_NATIVE}${libdir_native}"
77
78baselib = "lib"
79
80# Libtool's default paths are correct for the native machine
81lt_cv_sys_lib_dlsearch_path_spec[unexport] = "1"
82
83NATIVE_PACKAGE_PATH_SUFFIX ?= ""
84bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
85libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
86libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
87
88do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
89do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_NATIVE}/"
90
91# Since we actually install these into situ there is no staging prefix
92STAGING_DIR_HOST = ""
93STAGING_DIR_TARGET = ""
94PKG_CONFIG_DIR = "${libdir}/pkgconfig"
95
96EXTRA_NATIVE_PKGCONFIG_PATH ?= ""
97PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}"
98PKG_CONFIG_SYSROOT_DIR = ""
99
100# we dont want libc-uclibc or libc-glibc to kick in for native recipes
101LIBCOVERRIDE = ""
102CLASSOVERRIDE = "class-native"
103
104PATH_prepend = "${COREBASE}/scripts/native-intercept:"
105
106python native_virtclass_handler () {
107 classextend = e.data.getVar('BBCLASSEXTEND', True) or ""
108 if "native" not in classextend:
109 return
110
111 pn = e.data.getVar("PN", True)
112 if not pn.endswith("-native"):
113 return
114
115 def map_dependencies(varname, d, suffix = ""):
116 if suffix:
117 varname = varname + "_" + suffix
118 deps = d.getVar(varname, True)
119 if not deps:
120 return
121 deps = bb.utils.explode_deps(deps)
122 newdeps = []
123 for dep in deps:
124 if dep.endswith("-cross"):
125 newdeps.append(dep.replace("-cross", "-native"))
126 elif not dep.endswith("-native"):
127 newdeps.append(dep + "-native")
128 else:
129 newdeps.append(dep)
130 d.setVar(varname, " ".join(newdeps))
131
132 map_dependencies("DEPENDS", e.data)
133 for pkg in [e.data.getVar("PN", True), "", "${PN}"]:
134 map_dependencies("RDEPENDS", e.data, pkg)
135 map_dependencies("RRECOMMENDS", e.data, pkg)
136 map_dependencies("RSUGGESTS", e.data, pkg)
137 map_dependencies("RPROVIDES", e.data, pkg)
138 map_dependencies("RREPLACES", e.data, pkg)
139
140 provides = e.data.getVar("PROVIDES", True)
141 for prov in provides.split():
142 if prov.find(pn) != -1:
143 continue
144 if not prov.endswith("-native"):
145 provides = provides.replace(prov, prov + "-native")
146 e.data.setVar("PROVIDES", provides)
147
148 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-native")
149}
150
151addhandler native_virtclass_handler
152native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
153
154deltask package
155deltask packagedata
156deltask package_write_ipk
157deltask package_write_deb
158deltask package_write_rpm
159deltask package_write
160
161do_packagedata[stamp-extra-info] = ""
162do_populate_sysroot[stamp-extra-info] = ""
163
164USE_NLS = "no"
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
new file mode 100644
index 0000000000..7f94258bf1
--- /dev/null
+++ b/meta/classes/nativesdk.bbclass
@@ -0,0 +1,94 @@
1# SDK packages are built either explicitly by the user,
2# or indirectly via dependency. No need to be in 'world'.
3EXCLUDE_FROM_WORLD = "1"
4
5STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}"
6
7# libc for the SDK can be different to that of the target
8NATIVESDKLIBC ?= "libc-glibc"
9LIBCOVERRIDE = ":${NATIVESDKLIBC}"
10CLASSOVERRIDE = "class-nativesdk"
11
12#
13# Update PACKAGE_ARCH and PACKAGE_ARCHS
14#
15PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
16PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
17
18#
19# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
20# binaries
21#
22DEPENDS_append = " chrpath-replacement-native"
23EXTRANATIVEPATH += "chrpath-native"
24
25STAGING_DIR_HOST = "${STAGING_DIR}/${MULTIMACH_HOST_SYS}"
26STAGING_DIR_TARGET = "${STAGING_DIR}/${MULTIMACH_TARGET_SYS}"
27
28HOST_ARCH = "${SDK_ARCH}"
29HOST_VENDOR = "${SDK_VENDOR}"
30HOST_OS = "${SDK_OS}"
31HOST_PREFIX = "${SDK_PREFIX}"
32HOST_CC_ARCH = "${SDK_CC_ARCH}"
33HOST_LD_ARCH = "${SDK_LD_ARCH}"
34HOST_AS_ARCH = "${SDK_AS_ARCH}"
35#HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}"
36
37TARGET_ARCH = "${SDK_ARCH}"
38TARGET_VENDOR = "${SDK_VENDOR}"
39TARGET_OS = "${SDK_OS}"
40TARGET_PREFIX = "${SDK_PREFIX}"
41TARGET_CC_ARCH = "${SDK_CC_ARCH}"
42TARGET_LD_ARCH = "${SDK_LD_ARCH}"
43TARGET_AS_ARCH = "${SDK_AS_ARCH}"
44TARGET_FPU = ""
45EXTRA_OECONF_FPU = ""
46
47CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
48CFLAGS = "${BUILDSDK_CFLAGS}"
49CXXFLAGS = "${BUILDSDK_CFLAGS}"
50LDFLAGS = "${BUILDSDK_LDFLAGS}"
51
52# Change to place files in SDKPATH
53base_prefix = "${SDKPATHNATIVE}"
54prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
55exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
56baselib = "lib"
57
58export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig"
59export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
60
61python nativesdk_virtclass_handler () {
62 pn = e.data.getVar("PN", True)
63 if not (pn.endswith("-nativesdk") or pn.startswith("nativesdk-")):
64 return
65
66 e.data.setVar("MLPREFIX", "nativesdk-")
67 e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN", True).replace("-nativesdk", "").replace("nativesdk-", ""))
68 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-nativesdk")
69}
70
71python () {
72 pn = d.getVar("PN", True)
73 if not pn.startswith("nativesdk-"):
74 return
75
76 import oe.classextend
77
78 clsextend = oe.classextend.NativesdkClassExtender("nativesdk", d)
79 clsextend.rename_packages()
80 clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
81
82 clsextend.map_depends_variable("DEPENDS")
83 clsextend.map_packagevars()
84 clsextend.map_variable("PROVIDES")
85 clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
86}
87
88addhandler nativesdk_virtclass_handler
89nativesdk_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
90
91do_populate_sysroot[stamp-extra-info] = ""
92do_packagedata[stamp-extra-info] = ""
93
94USE_NLS = "${SDKUSE_NLS}"
diff --git a/meta/classes/oelint.bbclass b/meta/classes/oelint.bbclass
new file mode 100644
index 0000000000..f2e7540dcf
--- /dev/null
+++ b/meta/classes/oelint.bbclass
@@ -0,0 +1,174 @@
1addtask lint before do_fetch
2do_lint[nostamp] = "1"
3python do_lint() {
4 def testVar(var, explain=None):
5 try:
6 s = d[var]
7 return s["content"]
8 except KeyError:
9 bb.error("%s is not set" % var)
10 if explain: bb.note(explain)
11 return None
12
13
14 ##############################
15 # Test that DESCRIPTION exists
16 #
17 testVar("DESCRIPTION")
18
19
20 ##############################
21 # Test that HOMEPAGE exists
22 #
23 s = testVar("HOMEPAGE")
24 if s=="unknown":
25 bb.error("HOMEPAGE is not set")
26 elif not s.startswith("http://"):
27 bb.error("HOMEPAGE doesn't start with http://")
28
29
30
31 ##############################
32 # Test for valid LICENSE
33 #
34 valid_licenses = {
35 "GPL-2" : "GPLv2",
36 "GPL LGPL FDL" : True,
37 "GPL PSF" : True,
38 "GPL/QPL" : True,
39 "GPL" : True,
40 "GPLv2" : True,
41 "IBM" : True,
42 "LGPL GPL" : True,
43 "LGPL" : True,
44 "MIT" : True,
45 "OSL" : True,
46 "Perl" : True,
47 "Public Domain" : True,
48 "QPL" : "GPL/QPL",
49 }
50 s = testVar("LICENSE")
51 if s=="unknown":
52 bb.error("LICENSE is not set")
53 elif s.startswith("Vendor"):
54 pass
55 else:
56 try:
57 newlic = valid_licenses[s]
58 if newlic == False:
59 bb.note("LICENSE '%s' is not recommended" % s)
60 elif newlic != True:
61 bb.note("LICENSE '%s' is not recommended, better use '%s'" % (s, newsect))
62 except:
63 bb.note("LICENSE '%s' is not recommended" % s)
64
65
66 ##############################
67 # Test for valid MAINTAINER
68 #
69 s = testVar("MAINTAINER")
70 if s=="OpenEmbedded Team <openembedded-devel@openembedded.org>":
71 bb.error("explicit MAINTAINER is missing, using default")
72 elif s and s.find("@") == -1:
73 bb.error("You forgot to put an e-mail address into MAINTAINER")
74
75
76 ##############################
77 # Test for valid SECTION
78 #
79 # if Correct section: True section name is valid
80 # False section name is invalid, no suggestion
81 # string section name is invalid, better name suggested
82 #
83 valid_sections = {
84 # Current Section Correct section
85 "apps" : True,
86 "audio" : True,
87 "base" : True,
88 "console/games" : True,
89 "console/net" : "console/network",
90 "console/network" : True,
91 "console/utils" : True,
92 "devel" : True,
93 "developing" : "devel",
94 "devel/python" : True,
95 "fonts" : True,
96 "games" : True,
97 "games/libs" : True,
98 "gnome/base" : True,
99 "gnome/libs" : True,
100 "gpe" : True,
101 "gpe/libs" : True,
102 "gui" : False,
103 "libc" : "libs",
104 "libs" : True,
105 "libs/net" : True,
106 "multimedia" : True,
107 "net" : "network",
108 "NET" : "network",
109 "network" : True,
110 "opie/applets" : True,
111 "opie/applications" : True,
112 "opie/base" : True,
113 "opie/codecs" : True,
114 "opie/decorations" : True,
115 "opie/fontfactories" : True,
116 "opie/fonts" : True,
117 "opie/games" : True,
118 "opie/help" : True,
119 "opie/inputmethods" : True,
120 "opie/libs" : True,
121 "opie/multimedia" : True,
122 "opie/pim" : True,
123 "opie/setting" : "opie/settings",
124 "opie/settings" : True,
125 "opie/Shell" : False,
126 "opie/styles" : True,
127 "opie/today" : True,
128 "scientific" : True,
129 "utils" : True,
130 "x11" : True,
131 "x11/libs" : True,
132 "x11/wm" : True,
133 }
134 s = testVar("SECTION")
135 if s:
136 try:
137 newsect = valid_sections[s]
138 if newsect == False:
139 bb.note("SECTION '%s' is not recommended" % s)
140 elif newsect != True:
141 bb.note("SECTION '%s' is not recommended, better use '%s'" % (s, newsect))
142 except:
143 bb.note("SECTION '%s' is not recommended" % s)
144
145 if not s.islower():
146 bb.error("SECTION should only use lower case")
147
148
149
150
151 ##############################
152 # Test for valid PRIORITY
153 #
154 valid_priorities = {
155 "standard" : True,
156 "required" : True,
157 "optional" : True,
158 "extra" : True,
159 }
160 s = testVar("PRIORITY")
161 if s:
162 try:
163 newprio = valid_priorities[s]
164 if newprio == False:
165 bb.note("PRIORITY '%s' is not recommended" % s)
166 elif newprio != True:
167 bb.note("PRIORITY '%s' is not recommended, better use '%s'" % (s, newprio))
168 except:
169 bb.note("PRIORITY '%s' is not recommended" % s)
170
171 if not s.islower():
172 bb.error("PRIORITY should only use lower case")
173
174}
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
new file mode 100644
index 0000000000..8a6feaf4d5
--- /dev/null
+++ b/meta/classes/own-mirrors.bbclass
@@ -0,0 +1,12 @@
1PREMIRRORS() {
2cvs://.*/.* ${SOURCE_MIRROR_URL}
3svn://.*/.* ${SOURCE_MIRROR_URL}
4git://.*/.* ${SOURCE_MIRROR_URL}
5hg://.*/.* ${SOURCE_MIRROR_URL}
6bzr://.*/.* ${SOURCE_MIRROR_URL}
7svk://.*/.* ${SOURCE_MIRROR_URL}
8p4://.*/.* ${SOURCE_MIRROR_URL}
9osc://.*/.* ${SOURCE_MIRROR_URL}
10https?$://.*/.* ${SOURCE_MIRROR_URL}
11ftp://.*/.* ${SOURCE_MIRROR_URL}
12}
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
new file mode 100644
index 0000000000..ea7591855e
--- /dev/null
+++ b/meta/classes/package.bbclass
@@ -0,0 +1,2019 @@
1#
2# Packaging process
3#
4# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS
5# Taking D and splitting it up into the packages listed in PACKAGES, placing the
6# resulting output in PKGDEST.
7#
8# There are the following default steps but PACKAGEFUNCS can be extended:
9#
10# a) package_get_auto_pr - get PRAUTO from remote PR service
11#
12# b) perform_packagecopy - Copy D into PKGD
13#
14# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES
15#
16# d) split_and_strip_files - split the files into runtime and debug and strip them.
17# Debug files include debug info split, and associated sources that end up in -dbg packages
18#
19# e) fixup_perms - Fix up permissions in the package before we split it.
20#
21# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/<pkgname>
22# Also triggers the binary stripping code to put files in -dbg packages.
23#
24# g) package_do_filedeps - Collect perfile run-time dependency metadata
25# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with
26# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
27#
28# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
29# depenedencies found. Also stores the package name so anyone else using this library
30# knows which package to depend on.
31#
32# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
33#
34# j) read_shlibdeps - Reads the stored shlibs information into the metadata
35#
36# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages
37#
38# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later
39# packaging steps
40
41inherit packagedata
42inherit prserv
43inherit chrpath
44
45# Need the package_qa_handle_error() in insane.bbclass
46inherit insane
47
48PKGD = "${WORKDIR}/package"
49PKGDEST = "${WORKDIR}/packages-split"
50
51LOCALE_SECTION ?= ''
52
53ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
54
55# rpm is used for the per-file dependency identification
56PACKAGE_DEPENDS += "rpm-native"
57
58def legitimize_package_name(s):
59 """
60 Make sure package names are legitimate strings
61 """
62 import re
63
64 def fixutf(m):
65 cp = m.group(1)
66 if cp:
67 return ('\u%s' % cp).decode('unicode_escape').encode('utf-8')
68
69 # Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
70 s = re.sub('<U([0-9A-Fa-f]{1,4})>', fixutf, s)
71
72 # Remaining package name validity fixes
73 return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
74
75def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False, summary=None):
76 """
77 Used in .bb files to split up dynamically generated subpackages of a
78 given package, usually plugins or modules.
79
80 Arguments:
81 root -- the path in which to search
82 file_regex -- regular expression to match searched files. Use
83 parentheses () to mark the part of this expression
84 that should be used to derive the module name (to be
85 substituted where %s is used in other function
86 arguments as noted below)
87 output_pattern -- pattern to use for the package names. Must include %s.
88 description -- description to set for each package. Must include %s.
89 postinst -- postinstall script to use for all packages (as a
90 string)
91 recursive -- True to perform a recursive search - default False
92 hook -- a hook function to be called for every match. The
93 function will be called with the following arguments
94 (in the order listed):
95 f: full path to the file/directory match
96 pkg: the package name
97 file_regex: as above
98 output_pattern: as above
99 modulename: the module name derived using file_regex
100 extra_depends -- extra runtime dependencies (RDEPENDS) to be set for
101 all packages. The default value of None causes a
102 dependency on the main package (${PN}) - if you do
103 not want this, pass '' for this parameter.
104 aux_files_pattern -- extra item(s) to be added to FILES for each
105 package. Can be a single string item or a list of
106 strings for multiple items. Must include %s.
107 postrm -- postrm script to use for all packages (as a string)
108 allow_dirs -- True allow directories to be matched - default False
109 prepend -- if True, prepend created packages to PACKAGES instead
110 of the default False which appends them
111 match_path -- match file_regex on the whole relative path to the
112 root rather than just the file name
113 aux_files_pattern_verbatim -- extra item(s) to be added to FILES for
114 each package, using the actual derived module name
115 rather than converting it to something legal for a
116 package name. Can be a single string item or a list
117 of strings for multiple items. Must include %s.
118 allow_links -- True to allow symlinks to be matched - default False
119 summary -- Summary to set for each package. Must include %s;
120 defaults to description if not set.
121
122 """
123
124 dvar = d.getVar('PKGD', True)
125
126 # If the root directory doesn't exist, don't error out later but silently do
127 # no splitting.
128 if not os.path.exists(dvar + root):
129 return []
130
131 ml = d.getVar("MLPREFIX", True)
132 if ml:
133 if not output_pattern.startswith(ml):
134 output_pattern = ml + output_pattern
135
136 newdeps = []
137 for dep in (extra_depends or "").split():
138 if dep.startswith(ml):
139 newdeps.append(dep)
140 else:
141 newdeps.append(ml + dep)
142 if newdeps:
143 extra_depends = " ".join(newdeps)
144
145
146 packages = d.getVar('PACKAGES', True).split()
147 split_packages = []
148
149 if postinst:
150 postinst = '#!/bin/sh\n' + postinst + '\n'
151 if postrm:
152 postrm = '#!/bin/sh\n' + postrm + '\n'
153 if not recursive:
154 objs = os.listdir(dvar + root)
155 else:
156 objs = []
157 for walkroot, dirs, files in os.walk(dvar + root):
158 for file in files:
159 relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
160 if relpath:
161 objs.append(relpath)
162
163 if extra_depends == None:
164 extra_depends = d.getVar("PN", True)
165
166 if not summary:
167 summary = description
168
169 for o in sorted(objs):
170 import re, stat
171 if match_path:
172 m = re.match(file_regex, o)
173 else:
174 m = re.match(file_regex, os.path.basename(o))
175
176 if not m:
177 continue
178 f = os.path.join(dvar + root, o)
179 mode = os.lstat(f).st_mode
180 if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
181 continue
182 on = legitimize_package_name(m.group(1))
183 pkg = output_pattern % on
184 split_packages.append(pkg)
185 if not pkg in packages:
186 if prepend:
187 packages = [pkg] + packages
188 else:
189 packages.append(pkg)
190 oldfiles = d.getVar('FILES_' + pkg, True)
191 newfile = os.path.join(root, o)
192 # These names will be passed through glob() so if the filename actually
193 # contains * or ? (rare, but possible) we need to handle that specially
194 newfile = newfile.replace('*', '[*]')
195 newfile = newfile.replace('?', '[?]')
196 if not oldfiles:
197 the_files = [newfile]
198 if aux_files_pattern:
199 if type(aux_files_pattern) is list:
200 for fp in aux_files_pattern:
201 the_files.append(fp % on)
202 else:
203 the_files.append(aux_files_pattern % on)
204 if aux_files_pattern_verbatim:
205 if type(aux_files_pattern_verbatim) is list:
206 for fp in aux_files_pattern_verbatim:
207 the_files.append(fp % m.group(1))
208 else:
209 the_files.append(aux_files_pattern_verbatim % m.group(1))
210 d.setVar('FILES_' + pkg, " ".join(the_files))
211 if extra_depends != '':
212 d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
213 d.setVar('DESCRIPTION_' + pkg, description % on)
214 d.setVar('SUMMARY_' + pkg, summary % on)
215 if postinst:
216 d.setVar('pkg_postinst_' + pkg, postinst)
217 if postrm:
218 d.setVar('pkg_postrm_' + pkg, postrm)
219 else:
220 d.setVar('FILES_' + pkg, oldfiles + " " + newfile)
221 if callable(hook):
222 hook(f, pkg, file_regex, output_pattern, m.group(1))
223
224 d.setVar('PACKAGES', ' '.join(packages))
225 return split_packages
226
227PACKAGE_DEPENDS += "file-native"
228
229python () {
230 if d.getVar('PACKAGES', True) != '':
231 deps = ""
232 for dep in (d.getVar('PACKAGE_DEPENDS', True) or "").split():
233 deps += " %s:do_populate_sysroot" % dep
234 d.appendVarFlag('do_package', 'depends', deps)
235
236 # shlibs requires any DEPENDS to have already packaged for the *.list files
237 d.appendVarFlag('do_package', 'deptask', " do_packagedata")
238}
239
240def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
241 # Function to split a single file into two components, one is the stripped
242 # target system binary, the other contains any debugging information. The
243 # two files are linked to reference each other.
244 #
245 # sourcefile is also generated containing a list of debugsources
246
247 import stat
248
249 dvar = d.getVar('PKGD', True)
250 objcopy = d.getVar("OBJCOPY", True)
251 debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
252 workdir = d.getVar("WORKDIR", True)
253 workparentdir = d.getVar("DEBUGSRC_OVERRIDE_PATH", True) or os.path.dirname(os.path.dirname(workdir))
254
255 # We ignore kernel modules, we don't generate debug info files.
256 if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
257 return 1
258
259 newmode = None
260 if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
261 origmode = os.stat(file)[stat.ST_MODE]
262 newmode = origmode | stat.S_IWRITE | stat.S_IREAD
263 os.chmod(file, newmode)
264
265 # We need to extract the debug src information here...
266 if debugsrcdir:
267 cmd = "'%s' -b '%s' -d '%s' -i -l '%s' '%s'" % (debugedit, workparentdir, debugsrcdir, sourcefile, file)
268 (retval, output) = oe.utils.getstatusoutput(cmd)
269 if retval:
270 bb.fatal("debugedit failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
271
272 bb.utils.mkdirhier(os.path.dirname(debugfile))
273
274 cmd = "'%s' --only-keep-debug '%s' '%s'" % (objcopy, file, debugfile)
275 (retval, output) = oe.utils.getstatusoutput(cmd)
276 if retval:
277 bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
278
279 # Set the debuglink to have the view of the file path on the target
280 cmd = "'%s' --add-gnu-debuglink='%s' '%s'" % (objcopy, debugfile, file)
281 (retval, output) = oe.utils.getstatusoutput(cmd)
282 if retval:
283 bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
284
285 if newmode:
286 os.chmod(file, origmode)
287
288 return 0
289
290def copydebugsources(debugsrcdir, d):
291 # The debug src information written out to sourcefile is further procecessed
292 # and copied to the destination here.
293
294 import stat
295
296 sourcefile = d.expand("${WORKDIR}/debugsources.list")
297 if debugsrcdir and os.path.isfile(sourcefile):
298 dvar = d.getVar('PKGD', True)
299 strip = d.getVar("STRIP", True)
300 objcopy = d.getVar("OBJCOPY", True)
301 debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
302 workdir = d.getVar("WORKDIR", True)
303 workparentdir = os.path.dirname(os.path.dirname(workdir))
304 workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
305
306 nosuchdir = []
307 basepath = dvar
308 for p in debugsrcdir.split("/"):
309 basepath = basepath + "/" + p
310 if not cpath.exists(basepath):
311 nosuchdir.append(basepath)
312 bb.utils.mkdirhier(basepath)
313 cpath.updatecache(basepath)
314
315 processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '(<internal>|<built-in>)$' | "
316 # We need to ignore files that are not actually ours
317 # we do this by only paying attention to items from this package
318 processdebugsrc += "fgrep -zw '%s' | "
319 processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
320
321 cmd = processdebugsrc % (sourcefile, workbasedir, workparentdir, dvar, debugsrcdir)
322 (retval, output) = oe.utils.getstatusoutput(cmd)
323 # Can "fail" if internal headers/transient sources are attempted
324 #if retval:
325 # bb.fatal("debug source copy failed with exit code %s (cmd was %s)" % (retval, cmd))
326
327 # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
328 # Work around this by manually finding and copying any symbolic links that made it through.
329 cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s' 2>/dev/null)" % (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
330 (retval, output) = oe.utils.getstatusoutput(cmd)
331 if retval:
332 bb.fatal("debugsrc symlink fixup failed with exit code %s (cmd was %s)" % (retval, cmd))
333
334 # The copy by cpio may have resulted in some empty directories! Remove these
335 cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
336 (retval, output) = oe.utils.getstatusoutput(cmd)
337 if retval:
338 bb.fatal("empty directory removal failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
339
340 # Also remove debugsrcdir if its empty
341 for p in nosuchdir[::-1]:
342 if os.path.exists(p) and not os.listdir(p):
343 os.rmdir(p)
344
345#
346# Package data handling routines
347#
348
349def get_package_mapping (pkg, basepkg, d):
350 import oe.packagedata
351
352 data = oe.packagedata.read_subpkgdata(pkg, d)
353 key = "PKG_%s" % pkg
354
355 if key in data:
356 # Have to avoid undoing the write_extra_pkgs(global_variants...)
357 if bb.data.inherits_class('allarch', d) and data[key] == basepkg:
358 return pkg
359 return data[key]
360
361 return pkg
362
363def get_package_additional_metadata (pkg_type, d):
364 base_key = "PACKAGE_ADD_METADATA"
365 for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key):
366 if d.getVar(key) is None:
367 continue
368 d.setVarFlag(key, "type", "list")
369 if d.getVarFlag(key, "separator") is None:
370 d.setVarFlag(key, "separator", "\\n")
371 metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
372 return "\n".join(metadata_fields).strip()
373
374def runtime_mapping_rename (varname, pkg, d):
375 #bb.note("%s before: %s" % (varname, d.getVar(varname, True)))
376
377 new_depends = {}
378 deps = bb.utils.explode_dep_versions2(d.getVar(varname, True) or "")
379 for depend in deps:
380 new_depend = get_package_mapping(depend, pkg, d)
381 new_depends[new_depend] = deps[depend]
382
383 d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
384
385 #bb.note("%s after: %s" % (varname, d.getVar(varname, True)))
386
387#
388# Package functions suitable for inclusion in PACKAGEFUNCS
389#
390
391python package_get_auto_pr() {
392 # per recipe PRSERV_HOST
393 pn = d.getVar('PN', True)
394 host = d.getVar("PRSERV_HOST_" + pn, True)
395 if not (host is None):
396 d.setVar("PRSERV_HOST", host)
397
398 if d.getVar('PRSERV_HOST', True):
399 try:
400 auto_pr=prserv_get_pr_auto(d)
401 except Exception as e:
402 bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
403 if auto_pr is None:
404 if d.getVar('PRSERV_LOCKDOWN', True):
405 bb.fatal("Can NOT get PRAUTO from lockdown exported file")
406 else:
407 bb.fatal("Can NOT get PRAUTO from remote PR service")
408 return
409 d.setVar('PRAUTO',str(auto_pr))
410 else:
411 pkgv = d.getVar("PKGV", True)
412 if 'AUTOINC' in pkgv:
413 d.setVar("PKGV", pkgv.replace("AUTOINC", "0"))
414}
415
416LOCALEBASEPN ??= "${PN}"
417
418python package_do_split_locales() {
419 if (d.getVar('PACKAGE_NO_LOCALE', True) == '1'):
420 bb.debug(1, "package requested not splitting locales")
421 return
422
423 packages = (d.getVar('PACKAGES', True) or "").split()
424
425 datadir = d.getVar('datadir', True)
426 if not datadir:
427 bb.note("datadir not defined")
428 return
429
430 dvar = d.getVar('PKGD', True)
431 pn = d.getVar('LOCALEBASEPN', True)
432
433 if pn + '-locale' in packages:
434 packages.remove(pn + '-locale')
435
436 localedir = os.path.join(dvar + datadir, 'locale')
437
438 if not cpath.isdir(localedir):
439 bb.debug(1, "No locale files in this package")
440 return
441
442 locales = os.listdir(localedir)
443
444 summary = d.getVar('SUMMARY', True) or pn
445 description = d.getVar('DESCRIPTION', True) or ""
446 locale_section = d.getVar('LOCALE_SECTION', True)
447 mlprefix = d.getVar('MLPREFIX', True) or ""
448 for l in sorted(locales):
449 ln = legitimize_package_name(l)
450 pkg = pn + '-locale-' + ln
451 packages.append(pkg)
452 d.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l))
453 d.setVar('RRECOMMENDS_' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
454 d.setVar('RPROVIDES_' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
455 d.setVar('SUMMARY_' + pkg, '%s - %s translations' % (summary, l))
456 d.setVar('DESCRIPTION_' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
457 if locale_section:
458 d.setVar('SECTION_' + pkg, locale_section)
459
460 d.setVar('PACKAGES', ' '.join(packages))
461
462 # Disabled by RP 18/06/07
463 # Wildcards aren't supported in debian
464 # They break with ipkg since glibc-locale* will mean that
465 # glibc-localedata-translit* won't install as a dependency
466 # for some other package which breaks meta-toolchain
467 # Probably breaks since virtual-locale- isn't provided anywhere
468 #rdep = (d.getVar('RDEPENDS_%s' % pn, True) or "").split()
469 #rdep.append('%s-locale*' % pn)
470 #d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep))
471}
472
473python perform_packagecopy () {
474 dest = d.getVar('D', True)
475 dvar = d.getVar('PKGD', True)
476
477 # Start by package population by taking a copy of the installed
478 # files to operate on
479 # Preserve sparse files and hard links
480 cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (dest, dvar)
481 (retval, output) = oe.utils.getstatusoutput(cmd)
482 if retval:
483 bb.fatal("file copy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
484
485 # replace RPATHs for the nativesdk binaries, to make them relocatable
486 if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
487 rpath_replace (dvar, d)
488}
489perform_packagecopy[cleandirs] = "${PKGD}"
490perform_packagecopy[dirs] = "${PKGD}"
491
492# We generate a master list of directories to process, we start by
493# seeding this list with reasonable defaults, then load from
494# the fs-perms.txt files
495python fixup_perms () {
496 import pwd, grp
497
498 # init using a string with the same format as a line as documented in
499 # the fs-perms.txt file
500 # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid>
501 # <path> link <link target>
502 #
503 # __str__ can be used to print out an entry in the input format
504 #
505 # if fs_perms_entry.path is None:
506 # an error occured
507 # if fs_perms_entry.link, you can retrieve:
508 # fs_perms_entry.path = path
509 # fs_perms_entry.link = target of link
510 # if not fs_perms_entry.link, you can retrieve:
511 # fs_perms_entry.path = path
512 # fs_perms_entry.mode = expected dir mode or None
513 # fs_perms_entry.uid = expected uid or -1
514 # fs_perms_entry.gid = expected gid or -1
515 # fs_perms_entry.walk = 'true' or something else
516 # fs_perms_entry.fmode = expected file mode or None
517 # fs_perms_entry.fuid = expected file uid or -1
518 # fs_perms_entry_fgid = expected file gid or -1
519 class fs_perms_entry():
520 def __init__(self, line):
521 lsplit = line.split()
522 if len(lsplit) == 3 and lsplit[1].lower() == "link":
523 self._setlink(lsplit[0], lsplit[2])
524 elif len(lsplit) == 8:
525 self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
526 else:
527 msg = "Fixup Perms: invalid config line %s" % line
528 package_qa_handle_error("perm-config", msg, d)
529 self.path = None
530 self.link = None
531
532 def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid):
533 self.path = os.path.normpath(path)
534 self.link = None
535 self.mode = self._procmode(mode)
536 self.uid = self._procuid(uid)
537 self.gid = self._procgid(gid)
538 self.walk = walk.lower()
539 self.fmode = self._procmode(fmode)
540 self.fuid = self._procuid(fuid)
541 self.fgid = self._procgid(fgid)
542
543 def _setlink(self, path, link):
544 self.path = os.path.normpath(path)
545 self.link = link
546
547 def _procmode(self, mode):
548 if not mode or (mode and mode == "-"):
549 return None
550 else:
551 return int(mode,8)
552
553 # Note uid/gid -1 has special significance in os.lchown
554 def _procuid(self, uid):
555 if uid is None or uid == "-":
556 return -1
557 elif uid.isdigit():
558 return int(uid)
559 else:
560 return pwd.getpwnam(uid).pw_uid
561
562 def _procgid(self, gid):
563 if gid is None or gid == "-":
564 return -1
565 elif gid.isdigit():
566 return int(gid)
567 else:
568 return grp.getgrnam(gid).gr_gid
569
570 # Use for debugging the entries
571 def __str__(self):
572 if self.link:
573 return "%s link %s" % (self.path, self.link)
574 else:
575 mode = "-"
576 if self.mode:
577 mode = "0%o" % self.mode
578 fmode = "-"
579 if self.fmode:
580 fmode = "0%o" % self.fmode
581 uid = self._mapugid(self.uid)
582 gid = self._mapugid(self.gid)
583 fuid = self._mapugid(self.fuid)
584 fgid = self._mapugid(self.fgid)
585 return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid)
586
587 def _mapugid(self, id):
588 if id is None or id == -1:
589 return "-"
590 else:
591 return "%d" % id
592
593 # Fix the permission, owner and group of path
594 def fix_perms(path, mode, uid, gid, dir):
595 if mode and not os.path.islink(path):
596 #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
597 os.chmod(path, mode)
598 # -1 is a special value that means don't change the uid/gid
599 # if they are BOTH -1, don't bother to lchown
600 if not (uid == -1 and gid == -1):
601 #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
602 os.lchown(path, uid, gid)
603
604 # Return a list of configuration files based on either the default
605 # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES
606 # paths are resolved via BBPATH
607 def get_fs_perms_list(d):
608 str = ""
609 bbpath = d.getVar('BBPATH', True)
610 fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES', True)
611 if not fs_perms_tables:
612 fs_perms_tables = 'files/fs-perms.txt'
613 for conf_file in fs_perms_tables.split():
614 str += " %s" % bb.utils.which(bbpath, conf_file)
615 return str
616
617
618
619 dvar = d.getVar('PKGD', True)
620
621 fs_perms_table = {}
622
623 # By default all of the standard directories specified in
624 # bitbake.conf will get 0755 root:root.
625 target_path_vars = [ 'base_prefix',
626 'prefix',
627 'exec_prefix',
628 'base_bindir',
629 'base_sbindir',
630 'base_libdir',
631 'datadir',
632 'sysconfdir',
633 'servicedir',
634 'sharedstatedir',
635 'localstatedir',
636 'infodir',
637 'mandir',
638 'docdir',
639 'bindir',
640 'sbindir',
641 'libexecdir',
642 'libdir',
643 'includedir',
644 'oldincludedir' ]
645
646 for path in target_path_vars:
647 dir = d.getVar(path, True) or ""
648 if dir == "":
649 continue
650 fs_perms_table[dir] = fs_perms_entry(bb.data.expand("%s 0755 root root false - - -" % (dir), d))
651
652 # Now we actually load from the configuration files
653 for conf in get_fs_perms_list(d).split():
654 if os.path.exists(conf):
655 f = open(conf)
656 for line in f:
657 if line.startswith('#'):
658 continue
659 lsplit = line.split()
660 if len(lsplit) == 0:
661 continue
662 if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
663 msg = "Fixup perms: %s invalid line: %s" % (conf, line)
664 package_qa_handle_error("perm-line", msg, d)
665 continue
666 entry = fs_perms_entry(d.expand(line))
667 if entry and entry.path:
668 fs_perms_table[entry.path] = entry
669 f.close()
670
671 # Debug -- list out in-memory table
672 #for dir in fs_perms_table:
673 # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
674
675 # We process links first, so we can go back and fixup directory ownership
676 # for any newly created directories
677 for dir in fs_perms_table:
678 if not fs_perms_table[dir].link:
679 continue
680
681 origin = dvar + dir
682 if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)):
683 continue
684
685 link = fs_perms_table[dir].link
686 if link[0] == "/":
687 target = dvar + link
688 ptarget = link
689 else:
690 target = os.path.join(os.path.dirname(origin), link)
691 ptarget = os.path.join(os.path.dirname(dir), link)
692 if os.path.exists(target):
693 msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
694 package_qa_handle_error("perm-link", msg, d)
695 continue
696
697 # Create path to move directory to, move it, and then setup the symlink
698 bb.utils.mkdirhier(os.path.dirname(target))
699 #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
700 os.rename(origin, target)
701 #bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
702 os.symlink(link, origin)
703
704 for dir in fs_perms_table:
705 if fs_perms_table[dir].link:
706 continue
707
708 origin = dvar + dir
709 if not (cpath.exists(origin) and cpath.isdir(origin)):
710 continue
711
712 fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
713
714 if fs_perms_table[dir].walk == 'true':
715 for root, dirs, files in os.walk(origin):
716 for dr in dirs:
717 each_dir = os.path.join(root, dr)
718 fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
719 for f in files:
720 each_file = os.path.join(root, f)
721 fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
722}
723
724python split_and_strip_files () {
725 import stat, errno
726
727 dvar = d.getVar('PKGD', True)
728 pn = d.getVar('PN', True)
729
730 # We default to '.debug' style
731 if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory':
732 # Single debug-file-directory style debug info
733 debugappend = ".debug"
734 debugdir = ""
735 debuglibdir = "/usr/lib/debug"
736 debugsrcdir = "/usr/src/debug"
737 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-without-src':
738 # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
739 debugappend = ""
740 debugdir = "/.debug"
741 debuglibdir = ""
742 debugsrcdir = ""
743 else:
744 # Original OE-core, a.k.a. ".debug", style debug info
745 debugappend = ""
746 debugdir = "/.debug"
747 debuglibdir = ""
748 debugsrcdir = "/usr/src/debug"
749
750 sourcefile = d.expand("${WORKDIR}/debugsources.list")
751 bb.utils.remove(sourcefile)
752
753 os.chdir(dvar)
754
755 # Return type (bits):
756 # 0 - not elf
757 # 1 - ELF
758 # 2 - stripped
759 # 4 - executable
760 # 8 - shared library
761 # 16 - kernel module
762 def isELF(path):
763 type = 0
764 ret, result = oe.utils.getstatusoutput("file \"%s\"" % path.replace("\"", "\\\""))
765
766 if ret:
767 msg = "split_and_strip_files: 'file %s' failed" % path
768 package_qa_handle_error("split-strip", msg, d)
769 return type
770
771 # Not stripped
772 if "ELF" in result:
773 type |= 1
774 if "not stripped" not in result:
775 type |= 2
776 if "executable" in result:
777 type |= 4
778 if "shared" in result:
779 type |= 8
780 return type
781
782
783 #
784 # First lets figure out all of the files we may have to process ... do this only once!
785 #
786 elffiles = {}
787 symlinks = {}
788 hardlinks = {}
789 kernmods = []
790 libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir", True))
791 baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir", True))
792 if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
793 for root, dirs, files in cpath.walk(dvar):
794 for f in files:
795 file = os.path.join(root, f)
796 if file.endswith(".ko") and file.find("/lib/modules/") != -1:
797 kernmods.append(file)
798 continue
799
800 # Skip debug files
801 if debugappend and file.endswith(debugappend):
802 continue
803 if debugdir and debugdir in os.path.dirname(file[len(dvar):]):
804 continue
805
806 try:
807 ltarget = cpath.realpath(file, dvar, False)
808 s = cpath.lstat(ltarget)
809 except OSError as e:
810 (err, strerror) = e.args
811 if err != errno.ENOENT:
812 raise
813 # Skip broken symlinks
814 continue
815 if not s:
816 continue
817 # Check its an excutable
818 if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
819 or ((file.startswith(libdir) or file.startswith(baselibdir)) and ".so" in f):
820 # If it's a symlink, and points to an ELF file, we capture the readlink target
821 if cpath.islink(file):
822 target = os.readlink(file)
823 if isELF(ltarget):
824 #bb.note("Sym: %s (%d)" % (ltarget, isELF(ltarget)))
825 symlinks[file] = target
826 continue
827 # It's a file (or hardlink), not a link
828 # ...but is it ELF, and is it already stripped?
829 elf_file = isELF(file)
830 if elf_file & 1:
831 if elf_file & 2:
832 if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
833 bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
834 else:
835 msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
836 package_qa_handle_error("already-stripped", msg, d)
837 continue
838 # Check if it's a hard link to something else
839 if s.st_nlink > 1:
840 file_reference = "%d_%d" % (s.st_dev, s.st_ino)
841 # Hard link to something else
842 hardlinks[file] = file_reference
843 continue
844 elffiles[file] = elf_file
845
846 #
847 # First lets process debug splitting
848 #
849 if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1'):
850 hardlinkmap = {}
851 # For hardlinks, process only one of the files
852 for file in hardlinks:
853 file_reference = hardlinks[file]
854 if file_reference not in hardlinkmap:
855 # If this is a new file, add it as a reference, and
856 # update it's type, so we can fall through and split
857 elffiles[file] = isELF(file)
858 hardlinkmap[file_reference] = file
859
860 for file in elffiles:
861 src = file[len(dvar):]
862 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
863 fpath = dvar + dest
864
865 # Split the file...
866 bb.utils.mkdirhier(os.path.dirname(fpath))
867 #bb.note("Split %s -> %s" % (file, fpath))
868 # Only store off the hard link reference if we successfully split!
869 splitdebuginfo(file, fpath, debugsrcdir, sourcefile, d)
870
871 # Hardlink our debug symbols to the other hardlink copies
872 for file in hardlinks:
873 if file not in elffiles:
874 src = file[len(dvar):]
875 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
876 fpath = dvar + dest
877 file_reference = hardlinks[file]
878 target = hardlinkmap[file_reference][len(dvar):]
879 ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend
880 bb.utils.mkdirhier(os.path.dirname(fpath))
881 #bb.note("Link %s -> %s" % (fpath, ftarget))
882 os.link(ftarget, fpath)
883
884 # Create symlinks for all cases we were able to split symbols
885 for file in symlinks:
886 src = file[len(dvar):]
887 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
888 fpath = dvar + dest
889 # Skip it if the target doesn't exist
890 try:
891 s = os.stat(fpath)
892 except OSError as e:
893 (err, strerror) = e.args
894 if err != errno.ENOENT:
895 raise
896 continue
897
898 ltarget = symlinks[file]
899 lpath = os.path.dirname(ltarget)
900 lbase = os.path.basename(ltarget)
901 ftarget = ""
902 if lpath and lpath != ".":
903 ftarget += lpath + debugdir + "/"
904 ftarget += lbase + debugappend
905 if lpath.startswith(".."):
906 ftarget = os.path.join("..", ftarget)
907 bb.utils.mkdirhier(os.path.dirname(fpath))
908 #bb.note("Symlink %s -> %s" % (fpath, ftarget))
909 os.symlink(ftarget, fpath)
910
911 # Process the debugsrcdir if requested...
912 # This copies and places the referenced sources for later debugging...
913 copydebugsources(debugsrcdir, d)
914 #
915 # End of debug splitting
916 #
917
918 #
919 # Now lets go back over things and strip them
920 #
921 if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
922 strip = d.getVar("STRIP", True)
923 sfiles = []
924 for file in elffiles:
925 elf_file = int(elffiles[file])
926 #bb.note("Strip %s" % file)
927 sfiles.append((file, elf_file, strip))
928 for f in kernmods:
929 sfiles.append((f, 16, strip))
930
931
932 import multiprocessing
933 nproc = multiprocessing.cpu_count()
934 pool = bb.utils.multiprocessingpool(nproc)
935 processed = list(pool.imap(oe.package.runstrip, sfiles))
936 pool.close()
937 pool.join()
938
939 #
940 # End of strip
941 #
942}
943
944python populate_packages () {
945 import glob, re
946
947 workdir = d.getVar('WORKDIR', True)
948 outdir = d.getVar('DEPLOY_DIR', True)
949 dvar = d.getVar('PKGD', True)
950 packages = d.getVar('PACKAGES', True)
951 pn = d.getVar('PN', True)
952
953 bb.utils.mkdirhier(outdir)
954 os.chdir(dvar)
955
956 # Sanity check PACKAGES for duplicates and for LICENSE_EXCLUSION
957 # Sanity should be moved to sanity.bbclass once we have the infrastucture
958 package_list = []
959
960 for pkg in packages.split():
961 if d.getVar('LICENSE_EXCLUSION-' + pkg, True):
962 msg = "%s has an incompatible license. Excluding from packaging." % pkg
963 package_qa_handle_error("incompatible-license", msg, d)
964 if pkg in package_list:
965 msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
966 package_qa_handle_error("packages-list", msg, d)
967 else:
968 package_list.append(pkg)
969 d.setVar('PACKAGES', ' '.join(package_list))
970 pkgdest = d.getVar('PKGDEST', True)
971
972 seen = []
973
974 # os.mkdir masks the permissions with umask so we have to unset it first
975 oldumask = os.umask(0)
976
977 for pkg in package_list:
978 root = os.path.join(pkgdest, pkg)
979 bb.utils.mkdirhier(root)
980
981 filesvar = d.getVar('FILES_%s' % pkg, True) or ""
982 if "//" in filesvar:
983 msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
984 package_qa_handle_error("files-invalid", msg, d)
985 filesvar.replace("//", "/")
986
987 origfiles = filesvar.split()
988 files = []
989 for file in origfiles:
990 if os.path.isabs(file):
991 file = '.' + file
992 if not file.startswith("./"):
993 file = './' + file
994 globbed = glob.glob(file)
995 if globbed:
996 if [ file ] != globbed:
997 files += globbed
998 continue
999 files.append(file)
1000
1001 for file in files:
1002 if not cpath.islink(file):
1003 if cpath.isdir(file):
1004 newfiles = [ os.path.join(file,x) for x in os.listdir(file) ]
1005 if newfiles:
1006 files += newfiles
1007 continue
1008 if (not cpath.islink(file)) and (not cpath.exists(file)):
1009 continue
1010 if file in seen:
1011 continue
1012 seen.append(file)
1013
1014 if d.getVar('LICENSE_EXCLUSION-' + pkg, True):
1015 continue
1016
1017 def mkdir(src, dest, p):
1018 src = os.path.join(src, p)
1019 dest = os.path.join(dest, p)
1020 fstat = cpath.stat(src)
1021 os.mkdir(dest, fstat.st_mode)
1022 os.chown(dest, fstat.st_uid, fstat.st_gid)
1023 if p not in seen:
1024 seen.append(p)
1025 cpath.updatecache(dest)
1026
1027 def mkdir_recurse(src, dest, paths):
1028 if cpath.exists(dest + '/' + paths):
1029 return
1030 while paths.startswith("./"):
1031 paths = paths[2:]
1032 p = "."
1033 for c in paths.split("/"):
1034 p = os.path.join(p, c)
1035 if not cpath.exists(os.path.join(dest, p)):
1036 mkdir(src, dest, p)
1037
1038 if cpath.isdir(file) and not cpath.islink(file):
1039 mkdir_recurse(dvar, root, file)
1040 continue
1041
1042 mkdir_recurse(dvar, root, os.path.dirname(file))
1043 fpath = os.path.join(root,file)
1044 if not cpath.islink(file):
1045 os.link(file, fpath)
1046 fstat = cpath.stat(file)
1047 os.chmod(fpath, fstat.st_mode)
1048 os.chown(fpath, fstat.st_uid, fstat.st_gid)
1049 continue
1050 ret = bb.utils.copyfile(file, fpath)
1051 if ret is False or ret == 0:
1052 raise bb.build.FuncFailed("File population failed")
1053
1054 os.umask(oldumask)
1055 os.chdir(workdir)
1056
1057 unshipped = []
1058 for root, dirs, files in cpath.walk(dvar):
1059 dir = root[len(dvar):]
1060 if not dir:
1061 dir = os.sep
1062 for f in (files + dirs):
1063 path = os.path.join(dir, f)
1064 if ('.' + path) not in seen:
1065 unshipped.append(path)
1066
1067 if unshipped != []:
1068 msg = pn + ": Files/directories were installed but not shipped"
1069 if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
1070 bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
1071 else:
1072 for f in unshipped:
1073 msg = msg + "\n " + f
1074 package_qa_handle_error("installed-vs-shipped", msg, d)
1075}
1076populate_packages[dirs] = "${D}"
1077
1078python package_fixsymlinks () {
1079 import errno
1080 pkgdest = d.getVar('PKGDEST', True)
1081 packages = d.getVar("PACKAGES").split()
1082
1083 dangling_links = {}
1084 pkg_files = {}
1085 for pkg in packages:
1086 dangling_links[pkg] = []
1087 pkg_files[pkg] = []
1088 inst_root = os.path.join(pkgdest, pkg)
1089 for path in pkgfiles[pkg]:
1090 rpath = path[len(inst_root):]
1091 pkg_files[pkg].append(rpath)
1092 rtarget = cpath.realpath(path, inst_root, True, assume_dir = True)
1093 if not cpath.lexists(rtarget):
1094 dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):]))
1095
1096 newrdepends = {}
1097 for pkg in dangling_links:
1098 for l in dangling_links[pkg]:
1099 found = False
1100 bb.debug(1, "%s contains dangling link %s" % (pkg, l))
1101 for p in packages:
1102 if l in pkg_files[p]:
1103 found = True
1104 bb.debug(1, "target found in %s" % p)
1105 if p == pkg:
1106 break
1107 if pkg not in newrdepends:
1108 newrdepends[pkg] = []
1109 newrdepends[pkg].append(p)
1110 break
1111 if found == False:
1112 bb.note("%s contains dangling symlink to %s" % (pkg, l))
1113
1114 for pkg in newrdepends:
1115 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
1116 for p in newrdepends[pkg]:
1117 if p not in rdepends:
1118 rdepends[p] = []
1119 d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
1120}
1121
1122PKGDESTWORK = "${WORKDIR}/pkgdata"
1123
1124python emit_pkgdata() {
1125 from glob import glob
1126 import json
1127
1128 def write_if_exists(f, pkg, var):
1129 def encode(str):
1130 import codecs
1131 c = codecs.getencoder("string_escape")
1132 return c(str)[0]
1133
1134 val = d.getVar('%s_%s' % (var, pkg), True)
1135 if val:
1136 f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
1137 return
1138 val = d.getVar('%s' % (var), True)
1139 if val:
1140 f.write('%s: %s\n' % (var, encode(val)))
1141 return
1142
1143 def write_extra_pkgs(variants, pn, packages, pkgdatadir):
1144 for variant in variants:
1145 with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
1146 fd.write("PACKAGES: %s\n" % ' '.join(
1147 map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
1148
1149 def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
1150 for variant in variants:
1151 for pkg in packages.split():
1152 ml_pkg = "%s-%s" % (variant, pkg)
1153 subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
1154 with open(subdata_file, 'w') as fd:
1155 fd.write("PKG_%s: %s" % (ml_pkg, pkg))
1156
1157 packages = d.getVar('PACKAGES', True)
1158 pkgdest = d.getVar('PKGDEST', True)
1159 pkgdatadir = d.getVar('PKGDESTWORK', True)
1160
1161 # Take shared lock since we're only reading, not writing
1162 lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
1163
1164 data_file = pkgdatadir + d.expand("/${PN}" )
1165 f = open(data_file, 'w')
1166 f.write("PACKAGES: %s\n" % packages)
1167 f.close()
1168
1169 pn = d.getVar('PN', True)
1170 global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS', True) or "").split()
1171 variants = (d.getVar('MULTILIB_VARIANTS', True) or "").split()
1172
1173 if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
1174 write_extra_pkgs(variants, pn, packages, pkgdatadir)
1175
1176 if (bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d)):
1177 write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
1178
1179 workdir = d.getVar('WORKDIR', True)
1180
1181 for pkg in packages.split():
1182 pkgval = d.getVar('PKG_%s' % pkg, True)
1183 if pkgval is None:
1184 pkgval = pkg
1185 d.setVar('PKG_%s' % pkg, pkg)
1186
1187 pkgdestpkg = os.path.join(pkgdest, pkg)
1188 files = {}
1189 total_size = 0
1190 for f in pkgfiles[pkg]:
1191 relpth = os.path.relpath(f, pkgdestpkg)
1192 fstat = os.lstat(f)
1193 total_size += fstat.st_size
1194 files[os.sep + relpth] = fstat.st_size
1195 d.setVar('FILES_INFO', json.dumps(files))
1196
1197 subdata_file = pkgdatadir + "/runtime/%s" % pkg
1198 sf = open(subdata_file, 'w')
1199 write_if_exists(sf, pkg, 'PN')
1200 write_if_exists(sf, pkg, 'PE')
1201 write_if_exists(sf, pkg, 'PV')
1202 write_if_exists(sf, pkg, 'PR')
1203 write_if_exists(sf, pkg, 'PKGE')
1204 write_if_exists(sf, pkg, 'PKGV')
1205 write_if_exists(sf, pkg, 'PKGR')
1206 write_if_exists(sf, pkg, 'LICENSE')
1207 write_if_exists(sf, pkg, 'DESCRIPTION')
1208 write_if_exists(sf, pkg, 'SUMMARY')
1209 write_if_exists(sf, pkg, 'RDEPENDS')
1210 write_if_exists(sf, pkg, 'RPROVIDES')
1211 write_if_exists(sf, pkg, 'RRECOMMENDS')
1212 write_if_exists(sf, pkg, 'RSUGGESTS')
1213 write_if_exists(sf, pkg, 'RREPLACES')
1214 write_if_exists(sf, pkg, 'RCONFLICTS')
1215 write_if_exists(sf, pkg, 'SECTION')
1216 write_if_exists(sf, pkg, 'PKG')
1217 write_if_exists(sf, pkg, 'ALLOW_EMPTY')
1218 write_if_exists(sf, pkg, 'FILES')
1219 write_if_exists(sf, pkg, 'pkg_postinst')
1220 write_if_exists(sf, pkg, 'pkg_postrm')
1221 write_if_exists(sf, pkg, 'pkg_preinst')
1222 write_if_exists(sf, pkg, 'pkg_prerm')
1223 write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
1224 write_if_exists(sf, pkg, 'FILES_INFO')
1225 for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg, True) or "").split():
1226 write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
1227
1228 write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
1229 for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg, True) or "").split():
1230 write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
1231
1232 sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
1233 sf.close()
1234
1235 # Symlinks needed for reverse lookups (from the final package name)
1236 subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
1237 oe.path.symlink("../runtime/%s" % pkg, subdata_sym, True)
1238
1239 allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg, True)
1240 if not allow_empty:
1241 allow_empty = d.getVar('ALLOW_EMPTY', True)
1242 root = "%s/%s" % (pkgdest, pkg)
1243 os.chdir(root)
1244 g = glob('*')
1245 if g or allow_empty == "1":
1246 packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
1247 open(packagedfile, 'w').close()
1248
1249 if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
1250 write_extra_runtime_pkgs(variants, packages, pkgdatadir)
1251
1252 if bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d):
1253 write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
1254
1255 bb.utils.unlockfile(lf)
1256}
1257emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse"
1258
1259ldconfig_postinst_fragment() {
1260if [ x"$D" = "x" ]; then
1261 if [ -x /sbin/ldconfig ]; then /sbin/ldconfig ; fi
1262fi
1263}
1264
1265RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/bin/rpmdeps-oecore --macros ${STAGING_LIBDIR_NATIVE}/rpm/macros --define '_rpmfc_magic_path ${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc' --rpmpopt ${STAGING_LIBDIR_NATIVE}/rpm/rpmpopt"
1266
1267# Collect perfile run-time dependency metadata
1268# Output:
1269# FILERPROVIDESFLIST_pkg - list of all files w/ deps
1270# FILERPROVIDES_filepath_pkg - per file dep
1271#
1272# FILERDEPENDSFLIST_pkg - list of all files w/ deps
1273# FILERDEPENDS_filepath_pkg - per file dep
1274
1275python package_do_filedeps() {
1276 if d.getVar('SKIP_FILEDEPS', True) == '1':
1277 return
1278
1279 pkgdest = d.getVar('PKGDEST', True)
1280 packages = d.getVar('PACKAGES', True)
1281 rpmdeps = d.getVar('RPMDEPS', True)
1282
1283 def chunks(files, n):
1284 return [files[i:i+n] for i in range(0, len(files), n)]
1285
1286 pkglist = []
1287 for pkg in packages.split():
1288 if d.getVar('SKIP_FILEDEPS_' + pkg, True) == '1':
1289 continue
1290 if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-'):
1291 continue
1292 for files in chunks(pkgfiles[pkg], 100):
1293 pkglist.append((pkg, files, rpmdeps, pkgdest))
1294
1295 import multiprocessing
1296 nproc = multiprocessing.cpu_count()
1297 pool = bb.utils.multiprocessingpool(nproc)
1298 processed = list(pool.imap(oe.package.filedeprunner, pkglist))
1299 pool.close()
1300 pool.join()
1301
1302 provides_files = {}
1303 requires_files = {}
1304
1305 for result in processed:
1306 (pkg, provides, requires) = result
1307
1308 if pkg not in provides_files:
1309 provides_files[pkg] = []
1310 if pkg not in requires_files:
1311 requires_files[pkg] = []
1312
1313 for file in provides:
1314 provides_files[pkg].append(file)
1315 key = "FILERPROVIDES_" + file + "_" + pkg
1316 d.setVar(key, " ".join(provides[file]))
1317
1318 for file in requires:
1319 requires_files[pkg].append(file)
1320 key = "FILERDEPENDS_" + file + "_" + pkg
1321 d.setVar(key, " ".join(requires[file]))
1322
1323 for pkg in requires_files:
1324 d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files[pkg]))
1325 for pkg in provides_files:
1326 d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
1327}
1328
1329SHLIBSDIRS = "${PKGDATA_DIR}/${MLPREFIX}shlibs"
1330SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs"
1331
1332python package_do_shlibs() {
1333 import re, pipes
1334
1335 exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', 0)
1336 if exclude_shlibs:
1337 bb.note("not generating shlibs")
1338 return
1339
1340 lib_re = re.compile("^.*\.so")
1341 libdir_re = re.compile(".*/%s$" % d.getVar('baselib', True))
1342
1343 packages = d.getVar('PACKAGES', True)
1344 targetos = d.getVar('TARGET_OS', True)
1345
1346 workdir = d.getVar('WORKDIR', True)
1347
1348 ver = d.getVar('PKGV', True)
1349 if not ver:
1350 msg = "PKGV not defined"
1351 package_qa_handle_error("pkgv-undefined", msg, d)
1352 return
1353
1354 pkgdest = d.getVar('PKGDEST', True)
1355
1356 shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
1357 shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
1358
1359 # Take shared lock since we're only reading, not writing
1360 lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
1361
1362 def read_shlib_providers():
1363 list_re = re.compile('^(.*)\.list$')
1364 # Go from least to most specific since the last one found wins
1365 for dir in reversed(shlibs_dirs):
1366 bb.debug(2, "Reading shlib providers in %s" % (dir))
1367 if not os.path.exists(dir):
1368 continue
1369 for file in os.listdir(dir):
1370 m = list_re.match(file)
1371 if m:
1372 dep_pkg = m.group(1)
1373 fd = open(os.path.join(dir, file))
1374 lines = fd.readlines()
1375 fd.close()
1376 ver_file = os.path.join(dir, dep_pkg + '.ver')
1377 lib_ver = None
1378 if os.path.exists(ver_file):
1379 fd = open(ver_file)
1380 lib_ver = fd.readline().rstrip()
1381 fd.close()
1382 for l in lines:
1383 shlib_provider[l.rstrip()] = (dep_pkg, lib_ver)
1384
1385 def linux_so(file):
1386 needs_ldconfig = False
1387 cmd = d.getVar('OBJDUMP', True) + " -p " + pipes.quote(file) + " 2>/dev/null"
1388 fd = os.popen(cmd)
1389 lines = fd.readlines()
1390 fd.close()
1391 for l in lines:
1392 m = re.match("\s+NEEDED\s+([^\s]*)", l)
1393 if m:
1394 if m.group(1) not in needed[pkg]:
1395 needed[pkg].append(m.group(1))
1396 if m.group(1) not in needed_from:
1397 needed_from[m.group(1)] = []
1398 needed_from[m.group(1)].append(file)
1399 m = re.match("\s+SONAME\s+([^\s]*)", l)
1400 if m:
1401 this_soname = m.group(1)
1402 if not this_soname in sonames:
1403 # if library is private (only used by package) then do not build shlib for it
1404 if not private_libs or this_soname not in private_libs:
1405 sonames.append(this_soname)
1406 if libdir_re.match(os.path.dirname(file)):
1407 needs_ldconfig = True
1408 if snap_symlinks and (os.path.basename(file) != this_soname):
1409 renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
1410 return needs_ldconfig
1411
1412 def darwin_so(file):
1413 if not os.path.exists(file):
1414 return
1415
1416 def get_combinations(base):
1417 #
1418 # Given a base library name, find all combinations of this split by "." and "-"
1419 #
1420 combos = []
1421 options = base.split(".")
1422 for i in range(1, len(options) + 1):
1423 combos.append(".".join(options[0:i]))
1424 options = base.split("-")
1425 for i in range(1, len(options) + 1):
1426 combos.append("-".join(options[0:i]))
1427 return combos
1428
1429 if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg'):
1430 # Drop suffix
1431 name = os.path.basename(file).rsplit(".",1)[0]
1432 # Find all combinations
1433 combos = get_combinations(name)
1434 for combo in combos:
1435 if not combo in sonames:
1436 sonames.append(combo)
1437 if file.endswith('.dylib') or file.endswith('.so'):
1438 lafile = file.replace(os.path.join(pkgdest, pkg), d.getVar('PKGD', True))
1439 # Drop suffix
1440 lafile = lafile.rsplit(".",1)[0]
1441 lapath = os.path.dirname(lafile)
1442 lafile = os.path.basename(lafile)
1443 # Find all combinations
1444 combos = get_combinations(lafile)
1445 for combo in combos:
1446 if os.path.exists(lapath + '/' + combo + '.la'):
1447 break
1448 lafile = lapath + '/' + combo + '.la'
1449
1450 #bb.note("Foo2: %s" % lafile)
1451 #bb.note("Foo %s" % file)
1452 if os.path.exists(lafile):
1453 fd = open(lafile, 'r')
1454 lines = fd.readlines()
1455 fd.close()
1456 for l in lines:
1457 m = re.match("\s*dependency_libs=\s*'(.*)'", l)
1458 if m:
1459 deps = m.group(1).split(" ")
1460 for dep in deps:
1461 #bb.note("Trying %s for %s" % (dep, pkg))
1462 name = None
1463 if dep.endswith(".la"):
1464 name = os.path.basename(dep).replace(".la", "")
1465 elif dep.startswith("-l"):
1466 name = dep.replace("-l", "lib")
1467 if pkg not in needed:
1468 needed[pkg] = []
1469 if name and name not in needed[pkg]:
1470 needed[pkg].append(name)
1471 if name not in needed_from:
1472 needed_from[name] = []
1473 if lafile and lafile not in needed_from[name]:
1474 needed_from[name].append(lafile)
1475 #bb.note("Adding %s for %s" % (name, pkg))
1476
1477 if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS', True) == "1":
1478 snap_symlinks = True
1479 else:
1480 snap_symlinks = False
1481
1482 if (d.getVar('USE_LDCONFIG', True) or "1") == "1":
1483 use_ldconfig = True
1484 else:
1485 use_ldconfig = False
1486
1487 needed = {}
1488 needed_from = {}
1489 shlib_provider = {}
1490 read_shlib_providers()
1491
1492 for pkg in packages.split():
1493 private_libs = d.getVar('PRIVATE_LIBS_' + pkg, True) or d.getVar('PRIVATE_LIBS', True) or ""
1494 private_libs = private_libs.split()
1495 needs_ldconfig = False
1496 bb.debug(2, "calculating shlib provides for %s" % pkg)
1497
1498 pkgver = d.getVar('PKGV_' + pkg, True)
1499 if not pkgver:
1500 pkgver = d.getVar('PV_' + pkg, True)
1501 if not pkgver:
1502 pkgver = ver
1503
1504 needed[pkg] = []
1505 sonames = list()
1506 renames = list()
1507 for file in pkgfiles[pkg]:
1508 soname = None
1509 if cpath.islink(file):
1510 continue
1511 if targetos == "darwin" or targetos == "darwin8":
1512 darwin_so(file)
1513 elif os.access(file, os.X_OK) or lib_re.match(file):
1514 ldconfig = linux_so(file)
1515 needs_ldconfig = needs_ldconfig or ldconfig
1516 for (old, new) in renames:
1517 bb.note("Renaming %s to %s" % (old, new))
1518 os.rename(old, new)
1519 pkgfiles[pkg].remove(old)
1520
1521 shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
1522 shver_file = os.path.join(shlibswork_dir, pkg + ".ver")
1523 if len(sonames):
1524 fd = open(shlibs_file, 'w')
1525 for s in sonames:
1526 if s in shlib_provider:
1527 (old_pkg, old_pkgver) = shlib_provider[s]
1528 if old_pkg != pkg:
1529 bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s, pkg, pkgver))
1530 bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s))
1531 fd.write(s + '\n')
1532 shlib_provider[s] = (pkg, pkgver)
1533 fd.close()
1534 fd = open(shver_file, 'w')
1535 fd.write(pkgver + '\n')
1536 fd.close()
1537 if needs_ldconfig and use_ldconfig:
1538 bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
1539 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
1540 if not postinst:
1541 postinst = '#!/bin/sh\n'
1542 postinst += d.getVar('ldconfig_postinst_fragment', True)
1543 d.setVar('pkg_postinst_%s' % pkg, postinst)
1544 bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
1545
1546 bb.utils.unlockfile(lf)
1547
1548 assumed_libs = d.getVar('ASSUME_SHLIBS', True)
1549 if assumed_libs:
1550 for e in assumed_libs.split():
1551 l, dep_pkg = e.split(":")
1552 lib_ver = None
1553 dep_pkg = dep_pkg.rsplit("_", 1)
1554 if len(dep_pkg) == 2:
1555 lib_ver = dep_pkg[1]
1556 dep_pkg = dep_pkg[0]
1557 shlib_provider[l] = (dep_pkg, lib_ver)
1558
1559 for pkg in packages.split():
1560 bb.debug(2, "calculating shlib requirements for %s" % pkg)
1561
1562 deps = list()
1563 for n in needed[pkg]:
1564 # if n is in private libraries, don't try to search provider for it
1565 # this could cause problem in case some abc.bb provides private
1566 # /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
1567 # but skipping it is still better alternative than providing own
1568 # version and then adding runtime dependency for the same system library
1569 if private_libs and n in private_libs:
1570 bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n))
1571 continue
1572 if n in shlib_provider.keys():
1573 (dep_pkg, ver_needed) = shlib_provider[n]
1574
1575 bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n, dep_pkg, needed_from[n]))
1576
1577 if dep_pkg == pkg:
1578 continue
1579
1580 if ver_needed:
1581 dep = "%s (>= %s)" % (dep_pkg, ver_needed)
1582 else:
1583 dep = dep_pkg
1584 if not dep in deps:
1585 deps.append(dep)
1586 else:
1587 bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n, needed_from[n]))
1588
1589 deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
1590 if os.path.exists(deps_file):
1591 os.remove(deps_file)
1592 if len(deps):
1593 fd = open(deps_file, 'w')
1594 for dep in deps:
1595 fd.write(dep + '\n')
1596 fd.close()
1597}
1598
1599python package_do_pkgconfig () {
1600 import re
1601
1602 packages = d.getVar('PACKAGES', True)
1603 workdir = d.getVar('WORKDIR', True)
1604 pkgdest = d.getVar('PKGDEST', True)
1605
1606 shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
1607 shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
1608
1609 pc_re = re.compile('(.*)\.pc$')
1610 var_re = re.compile('(.*)=(.*)')
1611 field_re = re.compile('(.*): (.*)')
1612
1613 pkgconfig_provided = {}
1614 pkgconfig_needed = {}
1615 for pkg in packages.split():
1616 pkgconfig_provided[pkg] = []
1617 pkgconfig_needed[pkg] = []
1618 for file in pkgfiles[pkg]:
1619 m = pc_re.match(file)
1620 if m:
1621 pd = bb.data.init()
1622 name = m.group(1)
1623 pkgconfig_provided[pkg].append(name)
1624 if not os.access(file, os.R_OK):
1625 continue
1626 f = open(file, 'r')
1627 lines = f.readlines()
1628 f.close()
1629 for l in lines:
1630 m = var_re.match(l)
1631 if m:
1632 name = m.group(1)
1633 val = m.group(2)
1634 pd.setVar(name, pd.expand(val))
1635 continue
1636 m = field_re.match(l)
1637 if m:
1638 hdr = m.group(1)
1639 exp = bb.data.expand(m.group(2), pd)
1640 if hdr == 'Requires':
1641 pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
1642
1643 # Take shared lock since we're only reading, not writing
1644 lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
1645
1646 for pkg in packages.split():
1647 pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
1648 if pkgconfig_provided[pkg] != []:
1649 f = open(pkgs_file, 'w')
1650 for p in pkgconfig_provided[pkg]:
1651 f.write('%s\n' % p)
1652 f.close()
1653
1654 # Go from least to most specific since the last one found wins
1655 for dir in reversed(shlibs_dirs):
1656 if not os.path.exists(dir):
1657 continue
1658 for file in os.listdir(dir):
1659 m = re.match('^(.*)\.pclist$', file)
1660 if m:
1661 pkg = m.group(1)
1662 fd = open(os.path.join(dir, file))
1663 lines = fd.readlines()
1664 fd.close()
1665 pkgconfig_provided[pkg] = []
1666 for l in lines:
1667 pkgconfig_provided[pkg].append(l.rstrip())
1668
1669 for pkg in packages.split():
1670 deps = []
1671 for n in pkgconfig_needed[pkg]:
1672 found = False
1673 for k in pkgconfig_provided.keys():
1674 if n in pkgconfig_provided[k]:
1675 if k != pkg and not (k in deps):
1676 deps.append(k)
1677 found = True
1678 if found == False:
1679 bb.note("couldn't find pkgconfig module '%s' in any package" % n)
1680 deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
1681 if len(deps):
1682 fd = open(deps_file, 'w')
1683 for dep in deps:
1684 fd.write(dep + '\n')
1685 fd.close()
1686
1687 bb.utils.unlockfile(lf)
1688}
1689
1690def read_libdep_files(d):
1691 pkglibdeps = {}
1692 packages = d.getVar('PACKAGES', True).split()
1693 for pkg in packages:
1694 pkglibdeps[pkg] = {}
1695 for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
1696 depsfile = d.expand("${PKGDEST}/" + pkg + extension)
1697 if os.access(depsfile, os.R_OK):
1698 fd = open(depsfile)
1699 lines = fd.readlines()
1700 fd.close()
1701 for l in lines:
1702 l.rstrip()
1703 deps = bb.utils.explode_dep_versions2(l)
1704 for dep in deps:
1705 if not dep in pkglibdeps[pkg]:
1706 pkglibdeps[pkg][dep] = deps[dep]
1707 return pkglibdeps
1708
1709python read_shlibdeps () {
1710 pkglibdeps = read_libdep_files(d)
1711
1712 packages = d.getVar('PACKAGES', True).split()
1713 for pkg in packages:
1714 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
1715 for dep in pkglibdeps[pkg]:
1716 # Add the dep if it's not already there, or if no comparison is set
1717 if dep not in rdepends:
1718 rdepends[dep] = []
1719 for v in pkglibdeps[pkg][dep]:
1720 if v not in rdepends[dep]:
1721 rdepends[dep].append(v)
1722 d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
1723}
1724
1725python package_depchains() {
1726 """
1727 For a given set of prefix and postfix modifiers, make those packages
1728 RRECOMMENDS on the corresponding packages for its RDEPENDS.
1729
1730 Example: If package A depends upon package B, and A's .bb emits an
1731 A-dev package, this would make A-dev Recommends: B-dev.
1732
1733 If only one of a given suffix is specified, it will take the RRECOMMENDS
1734 based on the RDEPENDS of *all* other packages. If more than one of a given
1735 suffix is specified, its will only use the RDEPENDS of the single parent
1736 package.
1737 """
1738
1739 packages = d.getVar('PACKAGES', True)
1740 postfixes = (d.getVar('DEPCHAIN_POST', True) or '').split()
1741 prefixes = (d.getVar('DEPCHAIN_PRE', True) or '').split()
1742
1743 def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
1744
1745 #bb.note('depends for %s is %s' % (base, depends))
1746 rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "")
1747
1748 for depend in depends:
1749 if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
1750 #bb.note("Skipping %s" % depend)
1751 continue
1752 if depend.endswith('-dev'):
1753 depend = depend[:-4]
1754 if depend.endswith('-dbg'):
1755 depend = depend[:-4]
1756 pkgname = getname(depend, suffix)
1757 #bb.note("Adding %s for %s" % (pkgname, depend))
1758 if pkgname not in rreclist and pkgname != pkg:
1759 rreclist[pkgname] = []
1760
1761 #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
1762 d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
1763
1764 def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
1765
1766 #bb.note('rdepends for %s is %s' % (base, rdepends))
1767 rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "")
1768
1769 for depend in rdepends:
1770 if depend.find('virtual-locale-') != -1:
1771 #bb.note("Skipping %s" % depend)
1772 continue
1773 if depend.endswith('-dev'):
1774 depend = depend[:-4]
1775 if depend.endswith('-dbg'):
1776 depend = depend[:-4]
1777 pkgname = getname(depend, suffix)
1778 #bb.note("Adding %s for %s" % (pkgname, depend))
1779 if pkgname not in rreclist and pkgname != pkg:
1780 rreclist[pkgname] = []
1781
1782 #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
1783 d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
1784
1785 def add_dep(list, dep):
1786 if dep not in list:
1787 list.append(dep)
1788
1789 depends = []
1790 for dep in bb.utils.explode_deps(d.getVar('DEPENDS', True) or ""):
1791 add_dep(depends, dep)
1792
1793 rdepends = []
1794 for pkg in packages.split():
1795 for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg, True) or ""):
1796 add_dep(rdepends, dep)
1797
1798 #bb.note('rdepends is %s' % rdepends)
1799
1800 def post_getname(name, suffix):
1801 return '%s%s' % (name, suffix)
1802 def pre_getname(name, suffix):
1803 return '%s%s' % (suffix, name)
1804
1805 pkgs = {}
1806 for pkg in packages.split():
1807 for postfix in postfixes:
1808 if pkg.endswith(postfix):
1809 if not postfix in pkgs:
1810 pkgs[postfix] = {}
1811 pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname)
1812
1813 for prefix in prefixes:
1814 if pkg.startswith(prefix):
1815 if not prefix in pkgs:
1816 pkgs[prefix] = {}
1817 pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname)
1818
1819 if "-dbg" in pkgs:
1820 pkglibdeps = read_libdep_files(d)
1821 pkglibdeplist = []
1822 for pkg in pkglibdeps:
1823 for k in pkglibdeps[pkg]:
1824 add_dep(pkglibdeplist, k)
1825 # FIXME this should not look at PN once all task recipes inherit from task.bbclass
1826 dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS', True) == '1') or (d.getVar('PN', True) or '').startswith('packagegroup-'))
1827
1828 for suffix in pkgs:
1829 for pkg in pkgs[suffix]:
1830 if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'):
1831 continue
1832 (base, func) = pkgs[suffix][pkg]
1833 if suffix == "-dev":
1834 pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
1835 elif suffix == "-dbg":
1836 if not dbgdefaultdeps:
1837 pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d)
1838 continue
1839 if len(pkgs[suffix]) == 1:
1840 pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
1841 else:
1842 rdeps = []
1843 for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base, True) or ""):
1844 add_dep(rdeps, dep)
1845 pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
1846}
1847
1848# Since bitbake can't determine which variables are accessed during package
1849# iteration, we need to list them here:
1850PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES"
1851
1852def gen_packagevar(d):
1853 ret = []
1854 pkgs = (d.getVar("PACKAGES", True) or "").split()
1855 vars = (d.getVar("PACKAGEVARS", True) or "").split()
1856 for p in pkgs:
1857 for v in vars:
1858 ret.append(v + "_" + p)
1859
1860 # Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
1861 # affected recipes.
1862 ret.append('LICENSE_EXCLUSION-%s' % p)
1863 return " ".join(ret)
1864
1865PACKAGE_PREPROCESS_FUNCS ?= ""
1866# Functions for setting up PKGD
1867PACKAGEBUILDPKGD ?= " \
1868 perform_packagecopy \
1869 ${PACKAGE_PREPROCESS_FUNCS} \
1870 split_and_strip_files \
1871 fixup_perms \
1872 "
1873# Functions which split PKGD up into separate packages
1874PACKAGESPLITFUNCS ?= " \
1875 package_do_split_locales \
1876 populate_packages"
1877# Functions which process metadata based on split packages
1878PACKAGEFUNCS += " \
1879 package_fixsymlinks \
1880 package_name_hook \
1881 package_do_filedeps \
1882 package_do_shlibs \
1883 package_do_pkgconfig \
1884 read_shlibdeps \
1885 package_depchains \
1886 emit_pkgdata"
1887
1888python do_package () {
1889 # Change the following version to cause sstate to invalidate the package
1890 # cache. This is useful if an item this class depends on changes in a
1891 # way that the output of this class changes. rpmdeps is a good example
1892 # as any change to rpmdeps requires this to be rerun.
1893 # PACKAGE_BBCLASS_VERSION = "1"
1894
1895 # Init cachedpath
1896 global cpath
1897 cpath = oe.cachedpath.CachedPath()
1898
1899 ###########################################################################
1900 # Sanity test the setup
1901 ###########################################################################
1902
1903 packages = (d.getVar('PACKAGES', True) or "").split()
1904 if len(packages) < 1:
1905 bb.debug(1, "No packages to build, skipping do_package")
1906 return
1907
1908 workdir = d.getVar('WORKDIR', True)
1909 outdir = d.getVar('DEPLOY_DIR', True)
1910 dest = d.getVar('D', True)
1911 dvar = d.getVar('PKGD', True)
1912 pn = d.getVar('PN', True)
1913
1914 if not workdir or not outdir or not dest or not dvar or not pn:
1915 msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
1916 package_qa_handle_error("var-undefined", msg, d)
1917 return
1918
1919 bb.build.exec_func("package_get_auto_pr", d)
1920
1921 ###########################################################################
1922 # Optimisations
1923 ###########################################################################
1924
1925 # Contunually rexpanding complex expressions is inefficient, particularly when
1926 # we write to the datastore and invalidate the expansion cache. This code
1927 # pre-expands some frequently used variables
1928
1929 def expandVar(x, d):
1930 d.setVar(x, d.getVar(x, True))
1931
1932 for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
1933 expandVar(x, d)
1934
1935 ###########################################################################
1936 # Setup PKGD (from D)
1937 ###########################################################################
1938
1939 for f in (d.getVar('PACKAGEBUILDPKGD', True) or '').split():
1940 bb.build.exec_func(f, d)
1941
1942 ###########################################################################
1943 # Split up PKGD into PKGDEST
1944 ###########################################################################
1945
1946 cpath = oe.cachedpath.CachedPath()
1947
1948 for f in (d.getVar('PACKAGESPLITFUNCS', True) or '').split():
1949 bb.build.exec_func(f, d)
1950
1951 ###########################################################################
1952 # Process PKGDEST
1953 ###########################################################################
1954
1955 # Build global list of files in each split package
1956 global pkgfiles
1957 pkgfiles = {}
1958 packages = d.getVar('PACKAGES', True).split()
1959 pkgdest = d.getVar('PKGDEST', True)
1960 for pkg in packages:
1961 pkgfiles[pkg] = []
1962 for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
1963 for file in files:
1964 pkgfiles[pkg].append(walkroot + os.sep + file)
1965
1966 for f in (d.getVar('PACKAGEFUNCS', True) or '').split():
1967 bb.build.exec_func(f, d)
1968}
1969
1970do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
1971do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
1972addtask package after do_install
1973
1974PACKAGELOCK = "${STAGING_DIR}/package-output.lock"
1975SSTATETASKS += "do_package"
1976do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
1977do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
1978do_package[sstate-lockfile-shared] = "${PACKAGELOCK}"
1979do_package_setscene[dirs] = "${STAGING_DIR}"
1980
1981python do_package_setscene () {
1982 sstate_setscene(d)
1983}
1984addtask do_package_setscene
1985
1986do_packagedata () {
1987 :
1988}
1989
1990addtask packagedata before do_build after do_package
1991
1992SSTATETASKS += "do_packagedata"
1993do_packagedata[sstate-inputdirs] = "${PKGDESTWORK}"
1994do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
1995do_packagedata[sstate-lockfile-shared] = "${PACKAGELOCK}"
1996do_packagedata[stamp-extra-info] = "${MACHINE}"
1997
1998python do_packagedata_setscene () {
1999 sstate_setscene(d)
2000}
2001addtask do_packagedata_setscene
2002
2003#
2004# Helper functions for the package writing classes
2005#
2006
2007def mapping_rename_hook(d):
2008 """
2009 Rewrite variables to account for package renaming in things
2010 like debian.bbclass or manual PKG variable name changes
2011 """
2012 pkg = d.getVar("PKG", True)
2013 runtime_mapping_rename("RDEPENDS", pkg, d)
2014 runtime_mapping_rename("RRECOMMENDS", pkg, d)
2015 runtime_mapping_rename("RSUGGESTS", pkg, d)
2016 runtime_mapping_rename("RPROVIDES", pkg, d)
2017 runtime_mapping_rename("RREPLACES", pkg, d)
2018 runtime_mapping_rename("RCONFLICTS", pkg, d)
2019
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
new file mode 100644
index 0000000000..7bc29df165
--- /dev/null
+++ b/meta/classes/package_deb.bbclass
@@ -0,0 +1,317 @@
1#
2# Copyright 2006-2008 OpenedHand Ltd.
3#
4
5inherit package
6
7IMAGE_PKGTYPE ?= "deb"
8
9DPKG_ARCH ?= "${TARGET_ARCH}"
10
11PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
12
13APTCONF_TARGET = "${WORKDIR}"
14
15APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
16
17#
18# install a bunch of packages using apt
19# the following shell variables needs to be set before calling this func:
20# INSTALL_ROOTFS_DEB - install root dir
21# INSTALL_BASEARCH_DEB - install base architecutre
22# INSTALL_ARCHS_DEB - list of available archs
23# INSTALL_PACKAGES_NORMAL_DEB - packages to be installed
24# INSTALL_PACKAGES_ATTEMPTONLY_DEB - packages attemped to be installed only
25# INSTALL_PACKAGES_LINGUAS_DEB - additional packages for uclibc
26# INSTALL_TASK_DEB - task name
27
28python do_package_deb () {
29 import re, copy
30 import textwrap
31 import subprocess
32
33 workdir = d.getVar('WORKDIR', True)
34 if not workdir:
35 bb.error("WORKDIR not defined, unable to package")
36 return
37
38 outdir = d.getVar('PKGWRITEDIRDEB', True)
39 if not outdir:
40 bb.error("PKGWRITEDIRDEB not defined, unable to package")
41 return
42
43 packages = d.getVar('PACKAGES', True)
44 if not packages:
45 bb.debug(1, "PACKAGES not defined, nothing to package")
46 return
47
48 tmpdir = d.getVar('TMPDIR', True)
49
50 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
51 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
52
53 if packages == []:
54 bb.debug(1, "No packages; nothing to do")
55 return
56
57 pkgdest = d.getVar('PKGDEST', True)
58
59 def cleanupcontrol(root):
60 for p in ['CONTROL', 'DEBIAN']:
61 p = os.path.join(root, p)
62 if os.path.exists(p):
63 bb.utils.prunedir(p)
64
65 for pkg in packages.split():
66 localdata = bb.data.createCopy(d)
67 root = "%s/%s" % (pkgdest, pkg)
68
69 lf = bb.utils.lockfile(root + ".lock")
70
71 localdata.setVar('ROOT', '')
72 localdata.setVar('ROOT_%s' % pkg, root)
73 pkgname = localdata.getVar('PKG_%s' % pkg, True)
74 if not pkgname:
75 pkgname = pkg
76 localdata.setVar('PKG', pkgname)
77
78 localdata.setVar('OVERRIDES', pkg)
79
80 bb.data.update_data(localdata)
81 basedir = os.path.join(os.path.dirname(root))
82
83 pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH', True))
84 bb.utils.mkdirhier(pkgoutdir)
85
86 os.chdir(root)
87 cleanupcontrol(root)
88 from glob import glob
89 g = glob('*')
90 if not g and localdata.getVar('ALLOW_EMPTY') != "1":
91 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
92 bb.utils.unlockfile(lf)
93 continue
94
95 controldir = os.path.join(root, 'DEBIAN')
96 bb.utils.mkdirhier(controldir)
97 os.chmod(controldir, 0755)
98 try:
99 ctrlfile = open(os.path.join(controldir, 'control'), 'w')
100 # import codecs
101 # ctrlfile = codecs.open("someFile", "w", "utf-8")
102 except OSError:
103 bb.utils.unlockfile(lf)
104 raise bb.build.FuncFailed("unable to open control file for writing.")
105
106 fields = []
107 pe = d.getVar('PKGE', True)
108 if pe and int(pe) > 0:
109 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
110 else:
111 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
112 fields.append(["Description: %s\n", ['DESCRIPTION']])
113 fields.append(["Section: %s\n", ['SECTION']])
114 fields.append(["Priority: %s\n", ['PRIORITY']])
115 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
116 fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
117 fields.append(["OE: %s\n", ['PN']])
118 fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
119 if d.getVar('HOMEPAGE', True):
120 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
121
122 # Package, Version, Maintainer, Description - mandatory
123 # Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
124
125
126 def pullData(l, d):
127 l2 = []
128 for i in l:
129 data = d.getVar(i, True)
130 if data is None:
131 raise KeyError(f)
132 if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH', True) == 'all':
133 data = 'all'
134 elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
135 # The params in deb package control don't allow character
136 # `_', so change the arch's `_' to `-'. Such as `x86_64'
137 # -->`x86-64'
138 data = data.replace('_', '-')
139 l2.append(data)
140 return l2
141
142 ctrlfile.write("Package: %s\n" % pkgname)
143 # check for required fields
144 try:
145 for (c, fs) in fields:
146 for f in fs:
147 if localdata.getVar(f) is None:
148 raise KeyError(f)
149 # Special behavior for description...
150 if 'DESCRIPTION' in fs:
151 summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
152 ctrlfile.write('Description: %s\n' % unicode(summary))
153 description = localdata.getVar('DESCRIPTION', True) or "."
154 description = textwrap.dedent(description).strip()
155 if '\\n' in description:
156 # Manually indent
157 for t in description.split('\\n'):
158 # We don't limit the width when manually indent, but we do
159 # need the textwrap.fill() to set the initial_indent and
160 # subsequent_indent, so set a large width
161 ctrlfile.write('%s\n' % unicode(textwrap.fill(t, width=100000, initial_indent=' ', subsequent_indent=' ')))
162 else:
163 # Auto indent
164 ctrlfile.write('%s\n' % unicode(textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' ')))
165
166 else:
167 ctrlfile.write(unicode(c % tuple(pullData(fs, localdata))))
168 except KeyError:
169 import sys
170 (type, value, traceback) = sys.exc_info()
171 bb.utils.unlockfile(lf)
172 ctrlfile.close()
173 raise bb.build.FuncFailed("Missing field for deb generation: %s" % value)
174 # more fields
175
176 custom_fields_chunk = get_package_additional_metadata("deb", localdata)
177 if custom_fields_chunk is not None:
178 ctrlfile.write(unicode(custom_fields_chunk))
179 ctrlfile.write("\n")
180
181 mapping_rename_hook(localdata)
182
183 def debian_cmp_remap(var):
184 # dpkg does not allow for '(' or ')' in a dependency name
185 # replace these instances with '__' and '__'
186 #
187 # In debian '>' and '<' do not mean what it appears they mean
188 # '<' = less or equal
189 # '>' = greater or equal
190 # adjust these to the '<<' and '>>' equivalents
191 #
192 for dep in var:
193 if '(' in dep:
194 newdep = dep.replace('(', '__')
195 newdep = newdep.replace(')', '__')
196 if newdep != dep:
197 var[newdep] = var[dep]
198 del var[dep]
199 for dep in var:
200 for i, v in enumerate(var[dep]):
201 if (v or "").startswith("< "):
202 var[dep][i] = var[dep][i].replace("< ", "<< ")
203 elif (v or "").startswith("> "):
204 var[dep][i] = var[dep][i].replace("> ", ">> ")
205
206 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
207 debian_cmp_remap(rdepends)
208 for dep in rdepends:
209 if '*' in dep:
210 del rdepends[dep]
211 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
212 debian_cmp_remap(rrecommends)
213 for dep in rrecommends:
214 if '*' in dep:
215 del rrecommends[dep]
216 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
217 debian_cmp_remap(rsuggests)
218 rprovides = bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or "")
219 debian_cmp_remap(rprovides)
220 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
221 debian_cmp_remap(rreplaces)
222 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
223 debian_cmp_remap(rconflicts)
224 if rdepends:
225 ctrlfile.write("Depends: %s\n" % unicode(bb.utils.join_deps(rdepends)))
226 if rsuggests:
227 ctrlfile.write("Suggests: %s\n" % unicode(bb.utils.join_deps(rsuggests)))
228 if rrecommends:
229 ctrlfile.write("Recommends: %s\n" % unicode(bb.utils.join_deps(rrecommends)))
230 if rprovides:
231 ctrlfile.write("Provides: %s\n" % unicode(bb.utils.join_deps(rprovides)))
232 if rreplaces:
233 ctrlfile.write("Replaces: %s\n" % unicode(bb.utils.join_deps(rreplaces)))
234 if rconflicts:
235 ctrlfile.write("Conflicts: %s\n" % unicode(bb.utils.join_deps(rconflicts)))
236 ctrlfile.close()
237
238 for script in ["preinst", "postinst", "prerm", "postrm"]:
239 scriptvar = localdata.getVar('pkg_%s' % script, True)
240 if not scriptvar:
241 continue
242 try:
243 scriptfile = open(os.path.join(controldir, script), 'w')
244 except OSError:
245 bb.utils.unlockfile(lf)
246 raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
247 scriptfile.write("#!/bin/sh\n")
248 scriptfile.write(scriptvar)
249 scriptfile.close()
250 os.chmod(os.path.join(controldir, script), 0755)
251
252 conffiles_str = localdata.getVar("CONFFILES", True)
253 if conffiles_str:
254 try:
255 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
256 except OSError:
257 bb.utils.unlockfile(lf)
258 raise bb.build.FuncFailed("unable to open conffiles for writing.")
259 for f in conffiles_str.split():
260 if os.path.exists(oe.path.join(root, f)):
261 conffiles.write('%s\n' % f)
262 conffiles.close()
263
264 os.chdir(basedir)
265 ret = subprocess.call("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH", True), root, pkgoutdir), shell=True)
266 if ret != 0:
267 bb.utils.unlockfile(lf)
268 raise bb.build.FuncFailed("dpkg-deb execution failed")
269
270 cleanupcontrol(root)
271 bb.utils.unlockfile(lf)
272}
273
274SSTATETASKS += "do_package_write_deb"
275do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
276do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
277
278python do_package_write_deb_setscene () {
279 tmpdir = d.getVar('TMPDIR', True)
280
281 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
282 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
283
284 sstate_setscene(d)
285}
286addtask do_package_write_deb_setscene
287
288python () {
289 if d.getVar('PACKAGES', True) != '':
290 deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
291 d.appendVarFlag('do_package_write_deb', 'depends', deps)
292 d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
293
294 # Map TARGET_ARCH to Debian's ideas about architectures
295 darch = d.getVar('DPKG_ARCH', True)
296 if darch in ["x86", "i486", "i586", "i686", "pentium"]:
297 d.setVar('DPKG_ARCH', 'i386')
298 elif darch == "x86_64":
299 d.setVar('DPKG_ARCH', 'amd64')
300 elif darch == "arm":
301 d.setVar('DPKG_ARCH', 'armel')
302}
303
304python do_package_write_deb () {
305 bb.build.exec_func("read_subpackage_metadata", d)
306 bb.build.exec_func("do_package_deb", d)
307}
308do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
309do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
310do_package_write_deb[umask] = "022"
311addtask package_write_deb after do_packagedata do_package
312
313
314PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
315PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
316
317do_build[recrdeptask] += "do_package_write_deb"
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
new file mode 100644
index 0000000000..2949d1d2e0
--- /dev/null
+++ b/meta/classes/package_ipk.bbclass
@@ -0,0 +1,261 @@
1inherit package
2
3IMAGE_PKGTYPE ?= "ipk"
4
5IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
6IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
7
8PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
9
10# Program to be used to build opkg packages
11OPKGBUILDCMD ??= "opkg-build"
12
13OPKG_ARGS = "--force_postinstall --prefer-arch-to-version"
14OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
15OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE', True) or "").split())][(d.getVar("PACKAGE_EXCLUDE", True) or "") != ""]}"
16
17OPKGLIBDIR = "${localstatedir}/lib"
18
19python do_package_ipk () {
20 import re, copy
21 import textwrap
22 import subprocess
23
24 workdir = d.getVar('WORKDIR', True)
25 outdir = d.getVar('PKGWRITEDIRIPK', True)
26 tmpdir = d.getVar('TMPDIR', True)
27 pkgdest = d.getVar('PKGDEST', True)
28 if not workdir or not outdir or not tmpdir:
29 bb.error("Variables incorrectly set, unable to package")
30 return
31
32 packages = d.getVar('PACKAGES', True)
33 if not packages or packages == '':
34 bb.debug(1, "No packages; nothing to do")
35 return
36
37 # We're about to add new packages so the index needs to be checked
38 # so remove the appropriate stamp file.
39 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
40 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
41
42 def cleanupcontrol(root):
43 for p in ['CONTROL', 'DEBIAN']:
44 p = os.path.join(root, p)
45 if os.path.exists(p):
46 bb.utils.prunedir(p)
47
48 for pkg in packages.split():
49 localdata = bb.data.createCopy(d)
50 root = "%s/%s" % (pkgdest, pkg)
51
52 lf = bb.utils.lockfile(root + ".lock")
53
54 localdata.setVar('ROOT', '')
55 localdata.setVar('ROOT_%s' % pkg, root)
56 pkgname = localdata.getVar('PKG_%s' % pkg, True)
57 if not pkgname:
58 pkgname = pkg
59 localdata.setVar('PKG', pkgname)
60
61 localdata.setVar('OVERRIDES', pkg)
62
63 bb.data.update_data(localdata)
64 basedir = os.path.join(os.path.dirname(root))
65 arch = localdata.getVar('PACKAGE_ARCH', True)
66 pkgoutdir = "%s/%s" % (outdir, arch)
67 bb.utils.mkdirhier(pkgoutdir)
68 os.chdir(root)
69 cleanupcontrol(root)
70 from glob import glob
71 g = glob('*')
72 if not g and localdata.getVar('ALLOW_EMPTY') != "1":
73 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
74 bb.utils.unlockfile(lf)
75 continue
76
77 controldir = os.path.join(root, 'CONTROL')
78 bb.utils.mkdirhier(controldir)
79 try:
80 ctrlfile = open(os.path.join(controldir, 'control'), 'w')
81 except OSError:
82 bb.utils.unlockfile(lf)
83 raise bb.build.FuncFailed("unable to open control file for writing.")
84
85 fields = []
86 pe = d.getVar('PKGE', True)
87 if pe and int(pe) > 0:
88 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
89 else:
90 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
91 fields.append(["Description: %s\n", ['DESCRIPTION']])
92 fields.append(["Section: %s\n", ['SECTION']])
93 fields.append(["Priority: %s\n", ['PRIORITY']])
94 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
95 fields.append(["License: %s\n", ['LICENSE']])
96 fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
97 fields.append(["OE: %s\n", ['PN']])
98 if d.getVar('HOMEPAGE', True):
99 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
100
101 def pullData(l, d):
102 l2 = []
103 for i in l:
104 l2.append(d.getVar(i, True))
105 return l2
106
107 ctrlfile.write("Package: %s\n" % pkgname)
108 # check for required fields
109 try:
110 for (c, fs) in fields:
111 for f in fs:
112 if localdata.getVar(f) is None:
113 raise KeyError(f)
114 # Special behavior for description...
115 if 'DESCRIPTION' in fs:
116 summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
117 ctrlfile.write('Description: %s\n' % summary)
118 description = localdata.getVar('DESCRIPTION', True) or "."
119 description = textwrap.dedent(description).strip()
120 if '\\n' in description:
121 # Manually indent
122 for t in description.split('\\n'):
123 # We don't limit the width when manually indent, but we do
124 # need the textwrap.fill() to set the initial_indent and
125 # subsequent_indent, so set a large width
126 ctrlfile.write('%s\n' % textwrap.fill(t.strip(), width=100000, initial_indent=' ', subsequent_indent=' '))
127 else:
128 # Auto indent
129 ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
130 else:
131 ctrlfile.write(c % tuple(pullData(fs, localdata)))
132 except KeyError:
133 import sys
134 (type, value, traceback) = sys.exc_info()
135 ctrlfile.close()
136 bb.utils.unlockfile(lf)
137 raise bb.build.FuncFailed("Missing field for ipk generation: %s" % value)
138 # more fields
139
140 custom_fields_chunk = get_package_additional_metadata("ipk", localdata)
141 if custom_fields_chunk is not None:
142 ctrlfile.write(custom_fields_chunk)
143 ctrlfile.write("\n")
144
145 mapping_rename_hook(localdata)
146
147 def debian_cmp_remap(var):
148 # In debian '>' and '<' do not mean what it appears they mean
149 # '<' = less or equal
150 # '>' = greater or equal
151 # adjust these to the '<<' and '>>' equivalents
152 #
153 for dep in var:
154 for i, v in enumerate(var[dep]):
155 if (v or "").startswith("< "):
156 var[dep][i] = var[dep][i].replace("< ", "<< ")
157 elif (v or "").startswith("> "):
158 var[dep][i] = var[dep][i].replace("> ", ">> ")
159
160 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
161 debian_cmp_remap(rdepends)
162 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
163 debian_cmp_remap(rrecommends)
164 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
165 debian_cmp_remap(rsuggests)
166 rprovides = bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or "")
167 debian_cmp_remap(rprovides)
168 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
169 debian_cmp_remap(rreplaces)
170 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
171 debian_cmp_remap(rconflicts)
172
173 if rdepends:
174 ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
175 if rsuggests:
176 ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
177 if rrecommends:
178 ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
179 if rprovides:
180 ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
181 if rreplaces:
182 ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
183 if rconflicts:
184 ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
185 src_uri = localdata.getVar("SRC_URI", True) or "None"
186 if src_uri:
187 src_uri = re.sub("\s+", " ", src_uri)
188 ctrlfile.write("Source: %s\n" % " ".join(src_uri.split()))
189 ctrlfile.close()
190
191 for script in ["preinst", "postinst", "prerm", "postrm"]:
192 scriptvar = localdata.getVar('pkg_%s' % script, True)
193 if not scriptvar:
194 continue
195 try:
196 scriptfile = open(os.path.join(controldir, script), 'w')
197 except OSError:
198 bb.utils.unlockfile(lf)
199 raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
200 scriptfile.write(scriptvar)
201 scriptfile.close()
202 os.chmod(os.path.join(controldir, script), 0755)
203
204 conffiles_str = localdata.getVar("CONFFILES", True)
205 if conffiles_str:
206 try:
207 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
208 except OSError:
209 bb.utils.unlockfile(lf)
210 raise bb.build.FuncFailed("unable to open conffiles for writing.")
211 for f in conffiles_str.split():
212 if os.path.exists(oe.path.join(root, f)):
213 conffiles.write('%s\n' % f)
214 conffiles.close()
215
216 os.chdir(basedir)
217 ret = subprocess.call("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH", True),
218 d.getVar("OPKGBUILDCMD",1), pkg, pkgoutdir), shell=True)
219 if ret != 0:
220 bb.utils.unlockfile(lf)
221 raise bb.build.FuncFailed("opkg-build execution failed")
222
223 cleanupcontrol(root)
224 bb.utils.unlockfile(lf)
225
226}
227
228SSTATETASKS += "do_package_write_ipk"
229do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
230do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
231
232python do_package_write_ipk_setscene () {
233 tmpdir = d.getVar('TMPDIR', True)
234
235 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
236 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
237
238 sstate_setscene(d)
239}
240addtask do_package_write_ipk_setscene
241
242python () {
243 if d.getVar('PACKAGES', True) != '':
244 deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
245 d.appendVarFlag('do_package_write_ipk', 'depends', deps)
246 d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
247}
248
249python do_package_write_ipk () {
250 bb.build.exec_func("read_subpackage_metadata", d)
251 bb.build.exec_func("do_package_ipk", d)
252}
253do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
254do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
255do_package_write_ipk[umask] = "022"
256addtask package_write_ipk after do_packagedata do_package
257
258PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
259PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
260
261do_build[recrdeptask] += "do_package_write_ipk"
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
new file mode 100644
index 0000000000..1ff2b36434
--- /dev/null
+++ b/meta/classes/package_rpm.bbclass
@@ -0,0 +1,731 @@
1inherit package
2
3IMAGE_PKGTYPE ?= "rpm"
4
5RPM="rpm"
6RPMBUILD="rpmbuild"
7
8PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
9
10# Maintaining the perfile dependencies has singificant overhead when writing the
11# packages. When set, this value merges them for efficiency.
12MERGEPERFILEDEPS = "1"
13
14# Construct per file dependencies file
15def write_rpm_perfiledata(srcname, d):
16 workdir = d.getVar('WORKDIR', True)
17 packages = d.getVar('PACKAGES', True)
18 pkgd = d.getVar('PKGD', True)
19
20 def dump_filerdeps(varname, outfile, d):
21 outfile.write("#!/usr/bin/env python\n\n")
22 outfile.write("# Dependency table\n")
23 outfile.write('deps = {\n')
24 for pkg in packages.split():
25 dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
26 dependsflist = (d.getVar(dependsflist_key, True) or "")
27 for dfile in dependsflist.split():
28 key = "FILE" + varname + "_" + dfile + "_" + pkg
29 depends_dict = bb.utils.explode_dep_versions(d.getVar(key, True) or "")
30 file = dfile.replace("@underscore@", "_")
31 file = file.replace("@closebrace@", "]")
32 file = file.replace("@openbrace@", "[")
33 file = file.replace("@tab@", "\t")
34 file = file.replace("@space@", " ")
35 file = file.replace("@at@", "@")
36 outfile.write('"' + pkgd + file + '" : "')
37 for dep in depends_dict:
38 ver = depends_dict[dep]
39 if dep and ver:
40 ver = ver.replace("(","")
41 ver = ver.replace(")","")
42 outfile.write(dep + " " + ver + " ")
43 else:
44 outfile.write(dep + " ")
45 outfile.write('",\n')
46 outfile.write('}\n\n')
47 outfile.write("import sys\n")
48 outfile.write("while 1:\n")
49 outfile.write("\tline = sys.stdin.readline().strip()\n")
50 outfile.write("\tif not line:\n")
51 outfile.write("\t\tsys.exit(0)\n")
52 outfile.write("\tif line in deps:\n")
53 outfile.write("\t\tprint(deps[line] + '\\n')\n")
54
55 # OE-core dependencies a.k.a. RPM requires
56 outdepends = workdir + "/" + srcname + ".requires"
57
58 try:
59 dependsfile = open(outdepends, 'w')
60 except OSError:
61 raise bb.build.FuncFailed("unable to open spec file for writing.")
62
63 dump_filerdeps('RDEPENDS', dependsfile, d)
64
65 dependsfile.close()
66 os.chmod(outdepends, 0755)
67
68 # OE-core / RPM Provides
69 outprovides = workdir + "/" + srcname + ".provides"
70
71 try:
72 providesfile = open(outprovides, 'w')
73 except OSError:
74 raise bb.build.FuncFailed("unable to open spec file for writing.")
75
76 dump_filerdeps('RPROVIDES', providesfile, d)
77
78 providesfile.close()
79 os.chmod(outprovides, 0755)
80
81 return (outdepends, outprovides)
82
83
84python write_specfile () {
85 import oe.packagedata
86
87 # append information for logs and patches to %prep
88 def add_prep(d,spec_files_bottom):
89 if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
90 spec_files_bottom.append('%%prep -n %s' % d.getVar('PN', True) )
91 spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
92 spec_files_bottom.append('')
93
94 # append the name of tarball to key word 'SOURCE' in xxx.spec.
95 def tail_source(d):
96 if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
97 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
98 if not os.path.exists(ar_outdir):
99 return
100 source_list = os.listdir(ar_outdir)
101 source_number = 0
102 for source in source_list:
103 # The rpmbuild doesn't need the root permission, but it needs
104 # to know the file's user and group name, the only user and
105 # group in fakeroot is "root" when working in fakeroot.
106 f = os.path.join(ar_outdir, source)
107 os.chown(f, 0, 0)
108 spec_preamble_top.append('Source%s: %s' % (source_number, source))
109 source_number += 1
110 # We need a simple way to remove the MLPREFIX from the package name,
111 # and dependency information...
112 def strip_multilib(name, d):
113 multilibs = d.getVar('MULTILIBS', True) or ""
114 for ext in multilibs.split():
115 eext = ext.split(':')
116 if len(eext) > 1 and eext[0] == 'multilib' and name and name.find(eext[1] + '-') >= 0:
117 name = "".join(name.split(eext[1] + '-'))
118 return name
119
120 def strip_multilib_deps(deps, d):
121 depends = bb.utils.explode_dep_versions2(deps or "")
122 newdeps = {}
123 for dep in depends:
124 newdeps[strip_multilib(dep, d)] = depends[dep]
125 return bb.utils.join_deps(newdeps)
126
127# ml = d.getVar("MLPREFIX", True)
128# if ml and name and len(ml) != 0 and name.find(ml) == 0:
129# return ml.join(name.split(ml, 1)[1:])
130# return name
131
132 # In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
133 # This format is similar to OE, however there are restrictions on the
134 # characters that can be in a field. In the Version field, "-"
135 # characters are not allowed. "-" is allowed in the Release field.
136 #
137 # We translate the "-" in the version to a "+", by loading the PKGV
138 # from the dependent recipe, replacing the - with a +, and then using
139 # that value to do a replace inside of this recipe's dependencies.
140 # This preserves the "-" separator between the version and release, as
141 # well as any "-" characters inside of the release field.
142 #
143 # All of this has to happen BEFORE the mapping_rename_hook as
144 # after renaming we cannot look up the dependencies in the packagedata
145 # store.
146 def translate_vers(varname, d):
147 depends = d.getVar(varname, True)
148 if depends:
149 depends_dict = bb.utils.explode_dep_versions2(depends)
150 newdeps_dict = {}
151 for dep in depends_dict:
152 verlist = []
153 for ver in depends_dict[dep]:
154 if '-' in ver:
155 subd = oe.packagedata.read_subpkgdata_dict(dep, d)
156 if 'PKGV' in subd:
157 pv = subd['PV']
158 pkgv = subd['PKGV']
159 reppv = pkgv.replace('-', '+')
160 ver = ver.replace(pv, reppv).replace(pkgv, reppv)
161 if 'PKGR' in subd:
162 # Make sure PKGR rather than PR in ver
163 pr = '-' + subd['PR']
164 pkgr = '-' + subd['PKGR']
165 if pkgr not in ver:
166 ver = ver.replace(pr, pkgr)
167 verlist.append(ver)
168 else:
169 verlist.append(ver)
170 newdeps_dict[dep] = verlist
171 depends = bb.utils.join_deps(newdeps_dict)
172 d.setVar(varname, depends.strip())
173
174 # We need to change the style the dependency from BB to RPM
175 # This needs to happen AFTER the mapping_rename_hook
176 def print_deps(variable, tag, array, d):
177 depends = variable
178 if depends:
179 depends_dict = bb.utils.explode_dep_versions2(depends)
180 for dep in depends_dict:
181 for ver in depends_dict[dep]:
182 ver = ver.replace('(', '')
183 ver = ver.replace(')', '')
184 array.append("%s: %s %s" % (tag, dep, ver))
185 if not len(depends_dict[dep]):
186 array.append("%s: %s" % (tag, dep))
187
188 def walk_files(walkpath, target, conffiles):
189 # We can race against the ipk/deb backends which create CONTROL or DEBIAN directories
190 # when packaging. We just ignore these files which are created in
191 # packages-split/ and not package/
192 # We have the odd situation where the CONTROL/DEBIAN directory can be removed in the middle of
193 # of the walk, the isdir() test would then fail and the walk code would assume its a file
194 # hence we check for the names in files too.
195 for rootpath, dirs, files in os.walk(walkpath):
196 path = rootpath.replace(walkpath, "")
197 if path.endswith("DEBIAN") or path.endswith("CONTROL"):
198 continue
199 for dir in dirs:
200 if dir == "CONTROL" or dir == "DEBIAN":
201 continue
202 # All packages own the directories their files are in...
203 target.append('%dir "' + path + '/' + dir + '"')
204 for file in files:
205 if file == "CONTROL" or file == "DEBIAN":
206 continue
207 if conffiles.count(path + '/' + file):
208 target.append('%config "' + path + '/' + file + '"')
209 else:
210 target.append('"' + path + '/' + file + '"')
211
212 # Prevent the prerm/postrm scripts from being run during an upgrade
213 def wrap_uninstall(scriptvar):
214 scr = scriptvar.strip()
215 if scr.startswith("#!"):
216 pos = scr.find("\n") + 1
217 else:
218 pos = 0
219 scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi'
220 return scr
221
222 def get_perfile(varname, pkg, d):
223 deps = []
224 dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
225 dependsflist = (d.getVar(dependsflist_key, True) or "")
226 for dfile in dependsflist.split():
227 key = "FILE" + varname + "_" + dfile + "_" + pkg
228 depends = d.getVar(key, True)
229 if depends:
230 deps.append(depends)
231 return " ".join(deps)
232
233 def append_description(spec_preamble, text):
234 """
235 Add the description to the spec file.
236 """
237 import textwrap
238 dedent_text = textwrap.dedent(text).strip()
239 # Bitbake saves "\n" as "\\n"
240 if '\\n' in dedent_text:
241 for t in dedent_text.split('\\n'):
242 spec_preamble.append(t.strip())
243 else:
244 spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
245
246 packages = d.getVar('PACKAGES', True)
247 if not packages or packages == '':
248 bb.debug(1, "No packages; nothing to do")
249 return
250
251 pkgdest = d.getVar('PKGDEST', True)
252 if not pkgdest:
253 bb.fatal("No PKGDEST")
254
255 outspecfile = d.getVar('OUTSPECFILE', True)
256 if not outspecfile:
257 bb.fatal("No OUTSPECFILE")
258
259 # Construct the SPEC file...
260 srcname = strip_multilib(d.getVar('PN', True), d)
261 srcsummary = (d.getVar('SUMMARY', True) or d.getVar('DESCRIPTION', True) or ".")
262 srcversion = d.getVar('PKGV', True).replace('-', '+')
263 srcrelease = d.getVar('PKGR', True)
264 srcepoch = (d.getVar('PKGE', True) or "")
265 srclicense = d.getVar('LICENSE', True)
266 srcsection = d.getVar('SECTION', True)
267 srcmaintainer = d.getVar('MAINTAINER', True)
268 srchomepage = d.getVar('HOMEPAGE', True)
269 srcdescription = d.getVar('DESCRIPTION', True) or "."
270 srccustomtagschunk = get_package_additional_metadata("rpm", d)
271
272 srcdepends = strip_multilib_deps(d.getVar('DEPENDS', True), d)
273 srcrdepends = []
274 srcrrecommends = []
275 srcrsuggests = []
276 srcrprovides = []
277 srcrreplaces = []
278 srcrconflicts = []
279 srcrobsoletes = []
280
281 srcrpreinst = []
282 srcrpostinst = []
283 srcrprerm = []
284 srcrpostrm = []
285
286 spec_preamble_top = []
287 spec_preamble_bottom = []
288
289 spec_scriptlets_top = []
290 spec_scriptlets_bottom = []
291
292 spec_files_top = []
293 spec_files_bottom = []
294
295 perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
296
297 for pkg in packages.split():
298 localdata = bb.data.createCopy(d)
299
300 root = "%s/%s" % (pkgdest, pkg)
301
302 localdata.setVar('ROOT', '')
303 localdata.setVar('ROOT_%s' % pkg, root)
304 pkgname = localdata.getVar('PKG_%s' % pkg, True)
305 if not pkgname:
306 pkgname = pkg
307 localdata.setVar('PKG', pkgname)
308
309 localdata.setVar('OVERRIDES', pkg)
310
311 bb.data.update_data(localdata)
312
313 conffiles = (localdata.getVar('CONFFILES', True) or "").split()
314
315 splitname = strip_multilib(pkgname, d)
316
317 splitsummary = (localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or ".")
318 splitversion = (localdata.getVar('PKGV', True) or "").replace('-', '+')
319 splitrelease = (localdata.getVar('PKGR', True) or "")
320 splitepoch = (localdata.getVar('PKGE', True) or "")
321 splitlicense = (localdata.getVar('LICENSE', True) or "")
322 splitsection = (localdata.getVar('SECTION', True) or "")
323 splitdescription = (localdata.getVar('DESCRIPTION', True) or ".")
324 splitcustomtagschunk = get_package_additional_metadata("rpm", localdata)
325
326 translate_vers('RDEPENDS', localdata)
327 translate_vers('RRECOMMENDS', localdata)
328 translate_vers('RSUGGESTS', localdata)
329 translate_vers('RPROVIDES', localdata)
330 translate_vers('RREPLACES', localdata)
331 translate_vers('RCONFLICTS', localdata)
332
333 # Map the dependencies into their final form
334 mapping_rename_hook(localdata)
335
336 splitrdepends = strip_multilib_deps(localdata.getVar('RDEPENDS', True), d)
337 splitrrecommends = strip_multilib_deps(localdata.getVar('RRECOMMENDS', True), d)
338 splitrsuggests = strip_multilib_deps(localdata.getVar('RSUGGESTS', True), d)
339 splitrprovides = strip_multilib_deps(localdata.getVar('RPROVIDES', True), d)
340 splitrreplaces = strip_multilib_deps(localdata.getVar('RREPLACES', True), d)
341 splitrconflicts = strip_multilib_deps(localdata.getVar('RCONFLICTS', True), d)
342 splitrobsoletes = []
343
344 splitrpreinst = localdata.getVar('pkg_preinst', True)
345 splitrpostinst = localdata.getVar('pkg_postinst', True)
346 splitrprerm = localdata.getVar('pkg_prerm', True)
347 splitrpostrm = localdata.getVar('pkg_postrm', True)
348
349
350 if not perfiledeps:
351 # Add in summary of per file dependencies
352 splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
353 splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
354
355 # Gather special src/first package data
356 if srcname == splitname:
357 srcrdepends = splitrdepends
358 srcrrecommends = splitrrecommends
359 srcrsuggests = splitrsuggests
360 srcrprovides = splitrprovides
361 srcrreplaces = splitrreplaces
362 srcrconflicts = splitrconflicts
363
364 srcrpreinst = splitrpreinst
365 srcrpostinst = splitrpostinst
366 srcrprerm = splitrprerm
367 srcrpostrm = splitrpostrm
368
369 file_list = []
370 walk_files(root, file_list, conffiles)
371 if not file_list and localdata.getVar('ALLOW_EMPTY') != "1":
372 bb.note("Not creating empty RPM package for %s" % splitname)
373 else:
374 bb.note("Creating RPM package for %s" % splitname)
375 spec_files_top.append('%files')
376 spec_files_top.append('%defattr(-,-,-,-)')
377 if file_list:
378 bb.note("Creating RPM package for %s" % splitname)
379 spec_files_top.extend(file_list)
380 else:
381 bb.note("Creating EMPTY RPM Package for %s" % splitname)
382 spec_files_top.append('')
383 continue
384
385 # Process subpackage data
386 spec_preamble_bottom.append('%%package -n %s' % splitname)
387 spec_preamble_bottom.append('Summary: %s' % splitsummary)
388 if srcversion != splitversion:
389 spec_preamble_bottom.append('Version: %s' % splitversion)
390 if srcrelease != splitrelease:
391 spec_preamble_bottom.append('Release: %s' % splitrelease)
392 if srcepoch != splitepoch:
393 spec_preamble_bottom.append('Epoch: %s' % splitepoch)
394 if srclicense != splitlicense:
395 spec_preamble_bottom.append('License: %s' % splitlicense)
396 spec_preamble_bottom.append('Group: %s' % splitsection)
397
398 if srccustomtagschunk != splitcustomtagschunk:
399 spec_preamble_bottom.append(splitcustomtagschunk)
400
401 # Replaces == Obsoletes && Provides
402 robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes or "")
403 rprovides = bb.utils.explode_dep_versions2(splitrprovides or "")
404 rreplaces = bb.utils.explode_dep_versions2(splitrreplaces or "")
405 for dep in rreplaces:
406 if not dep in robsoletes:
407 robsoletes[dep] = rreplaces[dep]
408 if not dep in rprovides:
409 rprovides[dep] = rreplaces[dep]
410 splitrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
411 splitrprovides = bb.utils.join_deps(rprovides, commasep=False)
412
413 print_deps(splitrdepends, "Requires", spec_preamble_bottom, d)
414 if splitrpreinst:
415 print_deps(splitrdepends, "Requires(pre)", spec_preamble_bottom, d)
416 if splitrpostinst:
417 print_deps(splitrdepends, "Requires(post)", spec_preamble_bottom, d)
418 if splitrprerm:
419 print_deps(splitrdepends, "Requires(preun)", spec_preamble_bottom, d)
420 if splitrpostrm:
421 print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
422
423 # Suggests in RPM are like recommends in OE-core!
424 print_deps(splitrrecommends, "Suggests", spec_preamble_bottom, d)
425 # While there is no analog for suggests... (So call them recommends for now)
426 print_deps(splitrsuggests, "Recommends", spec_preamble_bottom, d)
427 print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
428 print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
429
430 # conflicts can not be in a provide! We will need to filter it.
431 if splitrconflicts:
432 depends_dict = bb.utils.explode_dep_versions2(splitrconflicts)
433 newdeps_dict = {}
434 for dep in depends_dict:
435 if dep not in splitrprovides:
436 newdeps_dict[dep] = depends_dict[dep]
437 if newdeps_dict:
438 splitrconflicts = bb.utils.join_deps(newdeps_dict)
439 else:
440 splitrconflicts = ""
441
442 print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
443
444 spec_preamble_bottom.append('')
445
446 spec_preamble_bottom.append('%%description -n %s' % splitname)
447 append_description(spec_preamble_bottom, splitdescription)
448
449 spec_preamble_bottom.append('')
450
451 # Now process scriptlets
452 if splitrpreinst:
453 spec_scriptlets_bottom.append('%%pre -n %s' % splitname)
454 spec_scriptlets_bottom.append('# %s - preinst' % splitname)
455 spec_scriptlets_bottom.append(splitrpreinst)
456 spec_scriptlets_bottom.append('')
457 if splitrpostinst:
458 spec_scriptlets_bottom.append('%%post -n %s' % splitname)
459 spec_scriptlets_bottom.append('# %s - postinst' % splitname)
460 spec_scriptlets_bottom.append(splitrpostinst)
461 spec_scriptlets_bottom.append('')
462 if splitrprerm:
463 spec_scriptlets_bottom.append('%%preun -n %s' % splitname)
464 spec_scriptlets_bottom.append('# %s - prerm' % splitname)
465 scriptvar = wrap_uninstall(splitrprerm)
466 spec_scriptlets_bottom.append(scriptvar)
467 spec_scriptlets_bottom.append('')
468 if splitrpostrm:
469 spec_scriptlets_bottom.append('%%postun -n %s' % splitname)
470 spec_scriptlets_bottom.append('# %s - postrm' % splitname)
471 scriptvar = wrap_uninstall(splitrpostrm)
472 spec_scriptlets_bottom.append(scriptvar)
473 spec_scriptlets_bottom.append('')
474
475 # Now process files
476 file_list = []
477 walk_files(root, file_list, conffiles)
478 if not file_list and localdata.getVar('ALLOW_EMPTY') != "1":
479 bb.note("Not creating empty RPM package for %s" % splitname)
480 else:
481 spec_files_bottom.append('%%files -n %s' % splitname)
482 spec_files_bottom.append('%defattr(-,-,-,-)')
483 if file_list:
484 bb.note("Creating RPM package for %s" % splitname)
485 spec_files_bottom.extend(file_list)
486 else:
487 bb.note("Creating EMPTY RPM Package for %s" % splitname)
488 spec_files_bottom.append('')
489
490 del localdata
491
492 add_prep(d,spec_files_bottom)
493 spec_preamble_top.append('Summary: %s' % srcsummary)
494 spec_preamble_top.append('Name: %s' % srcname)
495 spec_preamble_top.append('Version: %s' % srcversion)
496 spec_preamble_top.append('Release: %s' % srcrelease)
497 if srcepoch and srcepoch.strip() != "":
498 spec_preamble_top.append('Epoch: %s' % srcepoch)
499 spec_preamble_top.append('License: %s' % srclicense)
500 spec_preamble_top.append('Group: %s' % srcsection)
501 spec_preamble_top.append('Packager: %s' % srcmaintainer)
502 if srchomepage:
503 spec_preamble_top.append('URL: %s' % srchomepage)
504 if srccustomtagschunk:
505 spec_preamble_top.append(srccustomtagschunk)
506 tail_source(d)
507
508 # Replaces == Obsoletes && Provides
509 robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes or "")
510 rprovides = bb.utils.explode_dep_versions2(srcrprovides or "")
511 rreplaces = bb.utils.explode_dep_versions2(srcrreplaces or "")
512 for dep in rreplaces:
513 if not dep in robsoletes:
514 robsoletes[dep] = rreplaces[dep]
515 if not dep in rprovides:
516 rprovides[dep] = rreplaces[dep]
517 srcrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
518 srcrprovides = bb.utils.join_deps(rprovides, commasep=False)
519
520 print_deps(srcdepends, "BuildRequires", spec_preamble_top, d)
521 print_deps(srcrdepends, "Requires", spec_preamble_top, d)
522 if srcrpreinst:
523 print_deps(srcrdepends, "Requires(pre)", spec_preamble_top, d)
524 if srcrpostinst:
525 print_deps(srcrdepends, "Requires(post)", spec_preamble_top, d)
526 if srcrprerm:
527 print_deps(srcrdepends, "Requires(preun)", spec_preamble_top, d)
528 if srcrpostrm:
529 print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
530
531 # Suggests in RPM are like recommends in OE-core!
532 print_deps(srcrrecommends, "Suggests", spec_preamble_top, d)
533 # While there is no analog for suggests... (So call them recommends for now)
534 print_deps(srcrsuggests, "Recommends", spec_preamble_top, d)
535 print_deps(srcrprovides, "Provides", spec_preamble_top, d)
536 print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
537
538 # conflicts can not be in a provide! We will need to filter it.
539 if srcrconflicts:
540 depends_dict = bb.utils.explode_dep_versions2(srcrconflicts)
541 newdeps_dict = {}
542 for dep in depends_dict:
543 if dep not in srcrprovides:
544 newdeps_dict[dep] = depends_dict[dep]
545 if newdeps_dict:
546 srcrconflicts = bb.utils.join_deps(newdeps_dict)
547 else:
548 srcrconflicts = ""
549
550 print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
551
552 spec_preamble_top.append('')
553
554 spec_preamble_top.append('%description')
555 append_description(spec_preamble_top, srcdescription)
556
557 spec_preamble_top.append('')
558
559 if srcrpreinst:
560 spec_scriptlets_top.append('%pre')
561 spec_scriptlets_top.append('# %s - preinst' % srcname)
562 spec_scriptlets_top.append(srcrpreinst)
563 spec_scriptlets_top.append('')
564 if srcrpostinst:
565 spec_scriptlets_top.append('%post')
566 spec_scriptlets_top.append('# %s - postinst' % srcname)
567 spec_scriptlets_top.append(srcrpostinst)
568 spec_scriptlets_top.append('')
569 if srcrprerm:
570 spec_scriptlets_top.append('%preun')
571 spec_scriptlets_top.append('# %s - prerm' % srcname)
572 scriptvar = wrap_uninstall(srcrprerm)
573 spec_scriptlets_top.append(scriptvar)
574 spec_scriptlets_top.append('')
575 if srcrpostrm:
576 spec_scriptlets_top.append('%postun')
577 spec_scriptlets_top.append('# %s - postrm' % srcname)
578 scriptvar = wrap_uninstall(srcrpostrm)
579 spec_scriptlets_top.append(scriptvar)
580 spec_scriptlets_top.append('')
581
582 # Write the SPEC file
583 try:
584 specfile = open(outspecfile, 'w')
585 except OSError:
586 raise bb.build.FuncFailed("unable to open spec file for writing.")
587
588 # RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
589 # of the generated spec file
590 external_preamble = d.getVar("RPMSPEC_PREAMBLE", True)
591 if external_preamble:
592 specfile.write(external_preamble + "\n")
593
594 for line in spec_preamble_top:
595 specfile.write(line + "\n")
596
597 for line in spec_preamble_bottom:
598 specfile.write(line + "\n")
599
600 for line in spec_scriptlets_top:
601 specfile.write(line + "\n")
602
603 for line in spec_scriptlets_bottom:
604 specfile.write(line + "\n")
605
606 for line in spec_files_top:
607 specfile.write(line + "\n")
608
609 for line in spec_files_bottom:
610 specfile.write(line + "\n")
611
612 specfile.close()
613}
614
615python do_package_rpm () {
616 # We need a simple way to remove the MLPREFIX from the package name,
617 # and dependency information...
618 def strip_multilib(name, d):
619 ml = d.getVar("MLPREFIX", True)
620 if ml and name and len(ml) != 0 and name.find(ml) >= 0:
621 return "".join(name.split(ml))
622 return name
623
624 workdir = d.getVar('WORKDIR', True)
625 tmpdir = d.getVar('TMPDIR', True)
626 pkgd = d.getVar('PKGD', True)
627 pkgdest = d.getVar('PKGDEST', True)
628 if not workdir or not pkgd or not tmpdir:
629 bb.error("Variables incorrectly set, unable to package")
630 return
631
632 packages = d.getVar('PACKAGES', True)
633 if not packages or packages == '':
634 bb.debug(1, "No packages; nothing to do")
635 return
636
637 # Construct the spec file...
638 # If the spec file already exist, and has not been stored into
639 # pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
640 # so remove it before doing rpmbuild src.rpm.
641 srcname = strip_multilib(d.getVar('PN', True), d)
642 outspecfile = workdir + "/" + srcname + ".spec"
643 if os.path.isfile(outspecfile):
644 os.remove(outspecfile)
645 d.setVar('OUTSPECFILE', outspecfile)
646 bb.build.exec_func('write_specfile', d)
647
648 perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
649 if perfiledeps:
650 outdepends, outprovides = write_rpm_perfiledata(srcname, d)
651
652 # Setup the rpmbuild arguments...
653 rpmbuild = d.getVar('RPMBUILD', True)
654 targetsys = d.getVar('TARGET_SYS', True)
655 targetvendor = d.getVar('TARGET_VENDOR', True)
656 package_arch = (d.getVar('PACKAGE_ARCH', True) or "").replace("-", "_")
657 if package_arch not in "all any noarch".split() and not package_arch.endswith("_nativesdk"):
658 ml_prefix = (d.getVar('MLPREFIX', True) or "").replace("-", "_")
659 d.setVar('PACKAGE_ARCH_EXTEND', ml_prefix + package_arch)
660 else:
661 d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
662 pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
663 pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${TARGET_VENDOR}-${TARGET_OS}')
664 magicfile = d.expand('${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc')
665 bb.utils.mkdirhier(pkgwritedir)
666 os.chmod(pkgwritedir, 0755)
667
668 cmd = rpmbuild
669 cmd = cmd + " --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
670 cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
671 cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
672 cmd = cmd + " --define '_use_internal_dependency_generator 0'"
673 if perfiledeps:
674 cmd = cmd + " --define '__find_requires " + outdepends + "'"
675 cmd = cmd + " --define '__find_provides " + outprovides + "'"
676 else:
677 cmd = cmd + " --define '__find_requires %{nil}'"
678 cmd = cmd + " --define '__find_provides %{nil}'"
679 cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
680 cmd = cmd + " --define 'debug_package %{nil}'"
681 cmd = cmd + " --define '_rpmfc_magic_path " + magicfile + "'"
682 cmd = cmd + " --define '_tmppath " + workdir + "'"
683 if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
684 cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR', True) + "'"
685 cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_OUTDIR', True) + "'"
686 cmdsrpm = cmdsrpm + " -bs " + outspecfile
687 # Build the .src.rpm
688 d.setVar('SBUILDSPEC', cmdsrpm + "\n")
689 d.setVarFlag('SBUILDSPEC', 'func', '1')
690 bb.build.exec_func('SBUILDSPEC', d)
691 cmd = cmd + " -bb " + outspecfile
692
693 # Build the rpm package!
694 d.setVar('BUILDSPEC', cmd + "\n")
695 d.setVarFlag('BUILDSPEC', 'func', '1')
696 bb.build.exec_func('BUILDSPEC', d)
697}
698
699python () {
700 if d.getVar('PACKAGES', True) != '':
701 deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
702 d.appendVarFlag('do_package_write_rpm', 'depends', deps)
703 d.setVarFlag('do_package_write_rpm', 'fakeroot', 1)
704}
705
706SSTATETASKS += "do_package_write_rpm"
707do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}"
708do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}"
709# Take a shared lock, we can write multiple packages at the same time...
710# but we need to stop the rootfs/solver from running while we do...
711do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock"
712
713python do_package_write_rpm_setscene () {
714 sstate_setscene(d)
715}
716addtask do_package_write_rpm_setscene
717
718python do_package_write_rpm () {
719 bb.build.exec_func("read_subpackage_metadata", d)
720 bb.build.exec_func("do_package_rpm", d)
721}
722
723do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
724do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
725do_package_write_rpm[umask] = "022"
726addtask package_write_rpm after do_packagedata do_package
727
728PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
729PACKAGEINDEXDEPS += "createrepo-native:do_populate_sysroot"
730
731do_build[recrdeptask] += "do_package_write_rpm"
diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass
new file mode 100644
index 0000000000..fed2c28b69
--- /dev/null
+++ b/meta/classes/package_tar.bbclass
@@ -0,0 +1,69 @@
1inherit package
2
3IMAGE_PKGTYPE ?= "tar"
4
5python do_package_tar () {
6 import subprocess
7 workdir = d.getVar('WORKDIR', True)
8 if not workdir:
9 bb.error("WORKDIR not defined, unable to package")
10 return
11
12 outdir = d.getVar('DEPLOY_DIR_TAR', True)
13 if not outdir:
14 bb.error("DEPLOY_DIR_TAR not defined, unable to package")
15 return
16
17 dvar = d.getVar('D', True)
18 if not dvar:
19 bb.error("D not defined, unable to package")
20 return
21
22 packages = d.getVar('PACKAGES', True)
23 if not packages:
24 bb.debug(1, "PACKAGES not defined, nothing to package")
25 return
26
27 pkgdest = d.getVar('PKGDEST', True)
28
29 bb.utils.mkdirhier(outdir)
30 bb.utils.mkdirhier(dvar)
31
32 for pkg in packages.split():
33 localdata = bb.data.createCopy(d)
34 root = "%s/%s" % (pkgdest, pkg)
35
36 overrides = localdata.getVar('OVERRIDES')
37 localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg))
38 bb.data.update_data(localdata)
39
40 bb.utils.mkdirhier(root)
41 basedir = os.path.dirname(root)
42 tarfn = localdata.expand("${DEPLOY_DIR_TAR}/${PKG}-${PKGV}-${PKGR}.tar.gz")
43 os.chdir(root)
44 dlist = os.listdir(root)
45 if not dlist:
46 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
47 continue
48 args = "tar -cz --exclude=CONTROL --exclude=DEBIAN -f".split()
49 ret = subprocess.call(args + [tarfn] + dlist)
50 if ret != 0:
51 bb.error("Creation of tar %s failed." % tarfn)
52}
53
54python () {
55 if d.getVar('PACKAGES', True) != '':
56 deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split()
57 deps.append('tar-native:do_populate_sysroot')
58 deps.append('virtual/fakeroot-native:do_populate_sysroot')
59 d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
60 d.setVarFlag('do_package_write_tar', 'fakeroot', "1")
61}
62
63
64python do_package_write_tar () {
65 bb.build.exec_func("read_subpackage_metadata", d)
66 bb.build.exec_func("do_package_tar", d)
67}
68do_package_write_tar[dirs] = "${D}"
69addtask package_write_tar before do_build after do_packagedata do_package
diff --git a/meta/classes/packagedata.bbclass b/meta/classes/packagedata.bbclass
new file mode 100644
index 0000000000..d1aedf2289
--- /dev/null
+++ b/meta/classes/packagedata.bbclass
@@ -0,0 +1,26 @@
1python read_subpackage_metadata () {
2 import oe.packagedata
3
4 vars = {
5 "PN" : d.getVar('PN', True),
6 "PE" : d.getVar('PE', True),
7 "PV" : d.getVar('PV', True),
8 "PR" : d.getVar('PR', True),
9 }
10
11 data = oe.packagedata.read_pkgdata(vars["PN"], d)
12
13 for key in data.keys():
14 d.setVar(key, data[key])
15
16 for pkg in d.getVar('PACKAGES', True).split():
17 sdata = oe.packagedata.read_subpkgdata(pkg, d)
18 for key in sdata.keys():
19 if key in vars:
20 if sdata[key] != vars[key]:
21 if key == "PN":
22 bb.fatal("Recipe %s is trying to create package %s which was already written by recipe %s. This will cause corruption, please resolve this and only provide the package from one recipe or the other or only build one of the recipes." % (vars[key], pkg, sdata[key]))
23 bb.fatal("Recipe %s is trying to change %s from '%s' to '%s'. This will cause do_package_write_* failures since the incorrect data will be used and they will be unable to find the right workdir." % (vars["PN"], key, vars[key], sdata[key]))
24 continue
25 d.setVar(key, sdata[key])
26}
diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass
new file mode 100644
index 0000000000..9bc9cc22ad
--- /dev/null
+++ b/meta/classes/packagegroup.bbclass
@@ -0,0 +1,47 @@
1# Class for packagegroup (package group) recipes
2
3# By default, only the packagegroup package itself is in PACKAGES.
4# -dbg and -dev flavours are handled by the anonfunc below.
5# This means that packagegroup recipes used to build multiple packagegroup
6# packages have to modify PACKAGES after inheriting packagegroup.bbclass.
7PACKAGES = "${PN}"
8
9# By default, packagegroup packages do not depend on a certain architecture.
10# Only if dependencies are modified by MACHINE_FEATURES, packages
11# need to be set to MACHINE_ARCH after inheriting packagegroup.bbclass
12inherit allarch
13
14# This automatically adds -dbg and -dev flavours of all PACKAGES
15# to the list. Their dependencies (RRECOMMENDS) are handled as usual
16# by package_depchains in a following step.
17# Also mark all packages as ALLOW_EMPTY
18python () {
19 packages = d.getVar('PACKAGES', True).split()
20 genpackages = []
21 for pkg in packages:
22 d.setVar("ALLOW_EMPTY_%s" % pkg, "1")
23 for postfix in ['-dbg', '-dev', '-ptest']:
24 genpackages.append(pkg+postfix)
25 if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY', True) != '1':
26 d.setVar('PACKAGES', ' '.join(packages+genpackages))
27}
28
29# We don't want to look at shared library dependencies for the
30# dbg packages
31DEPCHAIN_DBGDEFAULTDEPS = "1"
32
33# We only need the packaging tasks - disable the rest
34do_fetch[noexec] = "1"
35do_unpack[noexec] = "1"
36do_patch[noexec] = "1"
37do_configure[noexec] = "1"
38do_compile[noexec] = "1"
39do_install[noexec] = "1"
40do_populate_sysroot[noexec] = "1"
41
42python () {
43 initman = d.getVar("VIRTUAL-RUNTIME_init_manager", True)
44 if initman and initman in ['sysvinit', 'systemd'] and not base_contains('DISTRO_FEATURES', initman, True, False, d):
45 bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman)
46}
47
diff --git a/meta/classes/packageinfo.bbclass b/meta/classes/packageinfo.bbclass
new file mode 100644
index 0000000000..7d60ace1dc
--- /dev/null
+++ b/meta/classes/packageinfo.bbclass
@@ -0,0 +1,22 @@
1python packageinfo_handler () {
2 import oe.packagedata
3 pkginfolist = []
4
5 pkgdata_dir = e.data.getVar("PKGDATA_DIR", True) + '/runtime/'
6 if os.path.exists(pkgdata_dir):
7 for root, dirs, files in os.walk(pkgdata_dir):
8 for pkgname in files:
9 if pkgname.endswith('.packaged'):
10 pkgname = pkgname[:-9]
11 pkgdatafile = root + pkgname
12 try:
13 sdata = oe.packagedata.read_pkgdatafile(pkgdatafile)
14 sdata['PKG'] = pkgname
15 pkginfolist.append(sdata)
16 except Exception as e:
17 bb.warn("Failed to read pkgdata file %s: %s: %s" % (pkgdatafile, e.__class__, str(e)))
18 bb.event.fire(bb.event.PackageInfo(pkginfolist), e.data)
19}
20
21addhandler packageinfo_handler
22packageinfo_handler[eventmask] = "bb.event.RequestPackageInfo"
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
new file mode 100644
index 0000000000..86c65b3b8d
--- /dev/null
+++ b/meta/classes/patch.bbclass
@@ -0,0 +1,187 @@
1# Copyright (C) 2006 OpenedHand LTD
2
3# Point to an empty file so any user's custom settings don't break things
4QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
5
6PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
7
8inherit terminal
9
10def src_patches(d, all = False ):
11 workdir = d.getVar('WORKDIR', True)
12 fetch = bb.fetch2.Fetch([], d)
13 patches = []
14 sources = []
15 for url in fetch.urls:
16 local = patch_path(url, fetch, workdir)
17 if not local:
18 if all:
19 local = fetch.localpath(url)
20 sources.append(local)
21 continue
22
23 urldata = fetch.ud[url]
24 parm = urldata.parm
25 patchname = parm.get('pname') or os.path.basename(local)
26
27 apply, reason = should_apply(parm, d)
28 if not apply:
29 if reason:
30 bb.note("Patch %s %s" % (patchname, reason))
31 continue
32
33 patchparm = {'patchname': patchname}
34 if "striplevel" in parm:
35 striplevel = parm["striplevel"]
36 elif "pnum" in parm:
37 #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
38 striplevel = parm["pnum"]
39 else:
40 striplevel = '1'
41 patchparm['striplevel'] = striplevel
42
43 patchdir = parm.get('patchdir')
44 if patchdir:
45 patchparm['patchdir'] = patchdir
46
47 localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm))
48 patches.append(localurl)
49
50 if all:
51 return sources
52
53 return patches
54
55def patch_path(url, fetch, workdir):
56 """Return the local path of a patch, or None if this isn't a patch"""
57
58 local = fetch.localpath(url)
59 base, ext = os.path.splitext(os.path.basename(local))
60 if ext in ('.gz', '.bz2', '.Z'):
61 local = os.path.join(workdir, base)
62 ext = os.path.splitext(base)[1]
63
64 urldata = fetch.ud[url]
65 if "apply" in urldata.parm:
66 apply = oe.types.boolean(urldata.parm["apply"])
67 if not apply:
68 return
69 elif ext not in (".diff", ".patch"):
70 return
71
72 return local
73
74def should_apply(parm, d):
75 """Determine if we should apply the given patch"""
76
77 if "mindate" in parm or "maxdate" in parm:
78 pn = d.getVar('PN', True)
79 srcdate = d.getVar('SRCDATE_%s' % pn, True)
80 if not srcdate:
81 srcdate = d.getVar('SRCDATE', True)
82
83 if srcdate == "now":
84 srcdate = d.getVar('DATE', True)
85
86 if "maxdate" in parm and parm["maxdate"] < srcdate:
87 return False, 'is outdated'
88
89 if "mindate" in parm and parm["mindate"] > srcdate:
90 return False, 'is predated'
91
92
93 if "minrev" in parm:
94 srcrev = d.getVar('SRCREV', True)
95 if srcrev and srcrev < parm["minrev"]:
96 return False, 'applies to later revisions'
97
98 if "maxrev" in parm:
99 srcrev = d.getVar('SRCREV', True)
100 if srcrev and srcrev > parm["maxrev"]:
101 return False, 'applies to earlier revisions'
102
103 if "rev" in parm:
104 srcrev = d.getVar('SRCREV', True)
105 if srcrev and parm["rev"] not in srcrev:
106 return False, "doesn't apply to revision"
107
108 if "notrev" in parm:
109 srcrev = d.getVar('SRCREV', True)
110 if srcrev and parm["notrev"] in srcrev:
111 return False, "doesn't apply to revision"
112
113 return True, None
114
115should_apply[vardepsexclude] = "DATE SRCDATE"
116
117python patch_do_patch() {
118 import oe.patch
119
120 patchsetmap = {
121 "patch": oe.patch.PatchTree,
122 "quilt": oe.patch.QuiltTree,
123 "git": oe.patch.GitApplyTree,
124 }
125
126 cls = patchsetmap[d.getVar('PATCHTOOL', True) or 'quilt']
127
128 resolvermap = {
129 "noop": oe.patch.NOOPResolver,
130 "user": oe.patch.UserResolver,
131 }
132
133 rcls = resolvermap[d.getVar('PATCHRESOLVE', True) or 'user']
134
135 classes = {}
136
137 s = d.getVar('S', True)
138
139 path = os.getenv('PATH')
140 os.putenv('PATH', d.getVar('PATH', True))
141
142 # We must use one TMPDIR per process so that the "patch" processes
143 # don't generate the same temp file name.
144
145 import tempfile
146 process_tmpdir = tempfile.mkdtemp()
147 os.environ['TMPDIR'] = process_tmpdir
148
149 for patch in src_patches(d):
150 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
151
152 if "patchdir" in parm:
153 patchdir = parm["patchdir"]
154 if not os.path.isabs(patchdir):
155 patchdir = os.path.join(s, patchdir)
156 else:
157 patchdir = s
158
159 if not patchdir in classes:
160 patchset = cls(patchdir, d)
161 resolver = rcls(patchset, oe_terminal)
162 classes[patchdir] = (patchset, resolver)
163 patchset.Clean()
164 else:
165 patchset, resolver = classes[patchdir]
166
167 bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
168 try:
169 patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
170 except Exception as exc:
171 bb.utils.remove(process_tmpdir, True)
172 bb.fatal(str(exc))
173 try:
174 resolver.Resolve()
175 except bb.BBHandledException as e:
176 bb.utils.remove(process_tmpdir, True)
177 bb.fatal(str(e))
178
179 bb.utils.remove(process_tmpdir, True)
180}
181patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
182
183addtask patch after do_unpack
184do_patch[dirs] = "${WORKDIR}"
185do_patch[depends] = "${PATCHDEPENDENCY}"
186
187EXPORT_FUNCTIONS do_patch
diff --git a/meta/classes/perlnative.bbclass b/meta/classes/perlnative.bbclass
new file mode 100644
index 0000000000..cc8de8b381
--- /dev/null
+++ b/meta/classes/perlnative.bbclass
@@ -0,0 +1,3 @@
1EXTRANATIVEPATH += "perl-native"
2DEPENDS += "perl-native"
3OECMAKE_PERLNATIVE_DIR = "${STAGING_BINDIR_NATIVE}/perl-native"
diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass
new file mode 100644
index 0000000000..922174dffe
--- /dev/null
+++ b/meta/classes/pixbufcache.bbclass
@@ -0,0 +1,70 @@
1#
2# This class will generate the proper postinst/postrm scriptlets for pixbuf
3# packages.
4#
5
6DEPENDS += "qemu-native"
7inherit qemu
8
9PIXBUF_PACKAGES ??= "${PN}"
10
11pixbufcache_common() {
12if [ "x$D" != "x" ]; then
13 $INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
14 bindir=${bindir} base_libdir=${base_libdir}
15else
16
17 # Update the pixbuf loaders in case they haven't been registered yet
18 GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache
19
20 if [ -x ${bindir}/gtk-update-icon-cache ] && [ -d ${datadir}/icons ]; then
21 for icondir in /usr/share/icons/*; do
22 if [ -d ${icondir} ]; then
23 gtk-update-icon-cache -t -q ${icondir}
24 fi
25 done
26 fi
27fi
28}
29
30python populate_packages_append() {
31 pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES', True).split()
32
33 for pkg in pixbuf_pkgs:
34 bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
35 postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
36 if not postinst:
37 postinst = '#!/bin/sh\n'
38 postinst += d.getVar('pixbufcache_common', True)
39 d.setVar('pkg_postinst_%s' % pkg, postinst)
40
41 postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
42 if not postrm:
43 postrm = '#!/bin/sh\n'
44 postrm += d.getVar('pixbufcache_common', True)
45 d.setVar('pkg_postrm_%s' % pkg, postrm)
46}
47
48#
49# Add a sstate postinst hook to update the cache for native packages
50#
51SSTATEPOSTINSTFUNCS_append_class-native = " pixbufcache_sstate_postinst"
52
53pixbufcache_sstate_postinst() {
54 if [ "${BB_CURRENTTASK}" = "populate_sysroot" -o "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]
55 then
56 GDK_PIXBUF_FATAL_LOADER=1 gdk-pixbuf-query-loaders --update-cache
57 fi
58}
59
60# Add all of the dependencies of gdk-pixbuf as dependencies of
61# do_populate_sysroot_setscene so that pixbufcache_sstate_postinst can work
62# (otherwise gdk-pixbuf-query-loaders may not exist or link). Only add
63# gdk-pixbuf-native if we're not building gdk-pixbuf itself.
64#
65# Packages that use this class should extend this variable with their runtime
66# dependencies.
67PIXBUFCACHE_SYSROOT_DEPS = ""
68PIXBUFCACHE_SYSROOT_DEPS_class-native = "${@['gdk-pixbuf-native:do_populate_sysroot_setscene', '']['${BPN}' == 'gdk-pixbuf']} glib-2.0-native:do_populate_sysroot_setscene libffi-native:do_populate_sysroot_setscene libpng-native:do_populate_sysroot_setscene zlib-native:do_populate_sysroot_setscene"
69do_populate_sysroot_setscene[depends] += "${PIXBUFCACHE_SYSROOT_DEPS}"
70do_populate_sysroot[depends] += "${@d.getVar('PIXBUFCACHE_SYSROOT_DEPS', True).replace('_setscene','')}"
diff --git a/meta/classes/pkgconfig.bbclass b/meta/classes/pkgconfig.bbclass
new file mode 100644
index 0000000000..ad1f84f506
--- /dev/null
+++ b/meta/classes/pkgconfig.bbclass
@@ -0,0 +1,2 @@
1DEPENDS_prepend = "pkgconfig-native "
2
diff --git a/meta/classes/populate_sdk.bbclass b/meta/classes/populate_sdk.bbclass
new file mode 100644
index 0000000000..f64a911b72
--- /dev/null
+++ b/meta/classes/populate_sdk.bbclass
@@ -0,0 +1,7 @@
1# The majority of populate_sdk is located in populate_sdk_base
2# This chunk simply facilitates compatibility with SDK only recipes.
3
4inherit populate_sdk_base
5
6addtask populate_sdk after do_install before do_build
7
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
new file mode 100644
index 0000000000..10d04edc63
--- /dev/null
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -0,0 +1,337 @@
1inherit meta toolchain-scripts
2
3# Wildcards specifying complementary packages to install for every package that has been explicitly
4# installed into the rootfs
5COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev'
6COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev'
7COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
8COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
9COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
10
11def complementary_globs(featurevar, d):
12 all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
13 globs = []
14 features = set((d.getVar(featurevar, True) or '').split())
15 for name, glob in all_globs.items():
16 if name in features:
17 globs.append(glob)
18 return ' '.join(globs)
19
20SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs"
21SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
22
23inherit populate_sdk_${IMAGE_PKGTYPE}
24
25SDK_DIR = "${WORKDIR}/sdk"
26SDK_OUTPUT = "${SDK_DIR}/image"
27SDK_DEPLOY = "${DEPLOY_DIR}/sdk"
28
29B_task-populate-sdk = "${SDK_DIR}"
30
31SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
32
33TOOLCHAIN_HOST_TASK ?= "nativesdk-packagegroup-sdk-host packagegroup-cross-canadian-${MACHINE}"
34TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
35TOOLCHAIN_TARGET_TASK ?= "packagegroup-core-standalone-sdk-target packagegroup-core-standalone-sdk-target-dbg"
36TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
37TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
38
39SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
40SDK_DEPENDS = "virtual/fakeroot-native sed-native"
41
42# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
43# could be set to the MACHINE_ARCH
44REAL_MULTIMACH_TARGET_SYS = "${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
45
46PID = "${@os.getpid()}"
47
48EXCLUDE_FROM_WORLD = "1"
49
50SDK_PACKAGING_FUNC ?= "create_shar"
51
52fakeroot python do_populate_sdk() {
53 from oe.sdk import populate_sdk
54 from oe.manifest import create_manifest, Manifest
55
56 pn = d.getVar('PN', True)
57 runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
58
59 # create target/host SDK manifests
60 create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True),
61 manifest_type=Manifest.MANIFEST_TYPE_SDK_HOST)
62 create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True),
63 manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET)
64
65 populate_sdk(d)
66
67 # Handle multilibs in the SDK environment, siteconfig, etc files...
68 localdata = bb.data.createCopy(d)
69
70 # make sure we only use the WORKDIR value from 'd', or it can change
71 localdata.setVar('WORKDIR', d.getVar('WORKDIR', True))
72
73 # make sure we only use the SDKTARGETSYSROOT value from 'd'
74 localdata.setVar('SDKTARGETSYSROOT', d.getVar('SDKTARGETSYSROOT', True))
75
76 # Process DEFAULTTUNE
77 bb.build.exec_func("create_sdk_files", localdata)
78
79 variants = d.getVar("MULTILIB_VARIANTS", True) or ""
80 for item in variants.split():
81 # Load overrides from 'd' to avoid having to reset the value...
82 overrides = d.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
83 localdata.setVar("OVERRIDES", overrides)
84 bb.data.update_data(localdata)
85 bb.build.exec_func("create_sdk_files", localdata)
86
87 bb.build.exec_func("tar_sdk", d)
88
89 bb.build.exec_func(d.getVar("SDK_PACKAGING_FUNC", True), d)
90}
91
92fakeroot create_sdk_files() {
93 # Setup site file for external use
94 toolchain_create_sdk_siteconfig ${SDK_OUTPUT}/${SDKPATH}/site-config-${REAL_MULTIMACH_TARGET_SYS}
95
96 toolchain_create_sdk_env_script ${SDK_OUTPUT}/${SDKPATH}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
97
98 # Add version information
99 toolchain_create_sdk_version ${SDK_OUTPUT}/${SDKPATH}/version-${REAL_MULTIMACH_TARGET_SYS}
100
101 cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
102
103 # Replace the ##DEFAULT_INSTALL_DIR## with the correct pattern.
104 # Escape special characters like '+' and '.' in the SDKPATH
105 escaped_sdkpath=$(echo ${SDKPATH} |sed -e "s:[\+\.]:\\\\\\\\\0:g")
106 sed -i -e "s:##DEFAULT_INSTALL_DIR##:$escaped_sdkpath:" ${SDK_OUTPUT}/${SDKPATH}/relocate_sdk.py
107}
108
109SDKTAROPTS = "--owner=root --group=root -j"
110
111fakeroot tar_sdk() {
112 # Package it up
113 mkdir -p ${SDK_DEPLOY}
114 cd ${SDK_OUTPUT}/${SDKPATH}
115 tar ${SDKTAROPTS} -c --file=${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 .
116}
117
118fakeroot create_shar() {
119 cat << "EOF" > ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
120#!/bin/bash
121
122INST_ARCH=$(uname -m | sed -e "s/i[3-6]86/ix86/" -e "s/x86[-_]64/x86_64/")
123SDK_ARCH=$(echo ${SDK_ARCH} | sed -e "s/i[3-6]86/ix86/" -e "s/x86[-_]64/x86_64/")
124
125if [ "$INST_ARCH" != "$SDK_ARCH" ]; then
126 # Allow for installation of ix86 SDK on x86_64 host
127 if [ "$INST_ARCH" != x86_64 -o "$SDK_ARCH" != ix86 ]; then
128 echo "Error: Installation machine not supported!"
129 exit 1
130 fi
131fi
132
133DEFAULT_INSTALL_DIR="${SDKPATH}"
134SUDO_EXEC=""
135target_sdk_dir=""
136answer=""
137relocate=1
138savescripts=0
139verbose=0
140while getopts ":yd:DRS" OPT; do
141 case $OPT in
142 y)
143 answer="Y"
144 [ "$target_sdk_dir" = "" ] && target_sdk_dir=$DEFAULT_INSTALL_DIR
145 ;;
146 d)
147 target_sdk_dir=$OPTARG
148 ;;
149 D)
150 verbose=1
151 ;;
152 R)
153 relocate=0
154 savescripts=1
155 ;;
156 S)
157 savescripts=1
158 ;;
159 *)
160 echo "Usage: $(basename $0) [-y] [-d <dir>]"
161 echo " -y Automatic yes to all prompts"
162 echo " -d <dir> Install the SDK to <dir>"
163 echo "======== Advanced DEBUGGING ONLY OPTIONS ========"
164 echo " -S Save relocation scripts"
165 echo " -R Do not relocate executables"
166 echo " -D use set -x to see what is going on"
167 exit 1
168 ;;
169 esac
170done
171
172if [ $verbose = 1 ] ; then
173 set -x
174fi
175
176printf "Enter target directory for SDK (default: $DEFAULT_INSTALL_DIR): "
177if [ "$target_sdk_dir" = "" ]; then
178 read target_sdk_dir
179 [ "$target_sdk_dir" = "" ] && target_sdk_dir=$DEFAULT_INSTALL_DIR
180else
181 echo "$target_sdk_dir"
182fi
183
184eval target_sdk_dir=$(echo "$target_sdk_dir"|sed 's/ /\\ /g')
185if [ -d "$target_sdk_dir" ]; then
186 target_sdk_dir=$(cd "$target_sdk_dir"; pwd)
187else
188 target_sdk_dir=$(readlink -m "$target_sdk_dir")
189fi
190
191if [ -n "$(echo $target_sdk_dir|grep ' ')" ]; then
192 echo "The target directory path ($target_sdk_dir) contains spaces. Abort!"
193 exit 1
194fi
195
196if [ -e "$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}" ]; then
197 echo "The directory \"$target_sdk_dir\" already contains a SDK for this architecture."
198 printf "If you continue, existing files will be overwritten! Proceed[y/N]?"
199
200 default_answer="n"
201else
202 printf "You are about to install the SDK to \"$target_sdk_dir\". Proceed[Y/n]?"
203
204 default_answer="y"
205fi
206
207if [ "$answer" = "" ]; then
208 read answer
209 [ "$answer" = "" ] && answer="$default_answer"
210else
211 echo $answer
212fi
213
214if [ "$answer" != "Y" -a "$answer" != "y" ]; then
215 echo "Installation aborted!"
216 exit 1
217fi
218
219# Try to create the directory (this will not succeed if user doesn't have rights)
220mkdir -p $target_sdk_dir >/dev/null 2>&1
221
222# if don't have the right to access dir, gain by sudo
223if [ ! -x $target_sdk_dir -o ! -w $target_sdk_dir -o ! -r $target_sdk_dir ]; then
224 SUDO_EXEC=$(which "sudo")
225 if [ -z $SUDO_EXEC ]; then
226 echo "No command 'sudo' found, please install sudo first. Abort!"
227 exit 1
228 fi
229
230 # test sudo could gain root right
231 $SUDO_EXEC pwd >/dev/null 2>&1
232 [ $? -ne 0 ] && echo "Sorry, you are not allowed to execute as root." && exit 1
233
234 # now that we have sudo rights, create the directory
235 $SUDO_EXEC mkdir -p $target_sdk_dir >/dev/null 2>&1
236fi
237
238payload_offset=$(($(grep -na -m1 "^MARKER:$" $0|cut -d':' -f1) + 1))
239
240printf "Extracting SDK..."
241tail -n +$payload_offset $0| $SUDO_EXEC tar xj -C $target_sdk_dir
242echo "done"
243
244printf "Setting it up..."
245# fix environment paths
246for env_setup_script in `ls $target_sdk_dir/environment-setup-*`; do
247 $SUDO_EXEC sed -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:g" -i $env_setup_script
248done
249
250# fix dynamic loader paths in all ELF SDK binaries
251native_sysroot=$($SUDO_EXEC cat $env_setup_script |grep 'OECORE_NATIVE_SYSROOT='|cut -d'=' -f2|tr -d '"')
252dl_path=$($SUDO_EXEC find $native_sysroot/lib -name "ld-linux*")
253if [ "$dl_path" = "" ] ; then
254 echo "SDK could not be set up. Relocate script unable to find ld-linux.so. Abort!"
255 exit 1
256fi
257executable_files=$($SUDO_EXEC find $native_sysroot -type f -perm /111 -exec file '{}' \;| grep "\(executable\|dynamically linked\)" | cut -f 1 -d ':')
258
259tdir=`mktemp -d`
260if [ x$tdir = x ] ; then
261 echo "SDK relocate failed, could not create a temporary directory"
262 exit 1
263fi
264echo "#!/bin/bash" > $tdir/relocate_sdk.sh
265echo exec ${env_setup_script%/*}/relocate_sdk.py $target_sdk_dir $dl_path $executable_files >> $tdir/relocate_sdk.sh
266$SUDO_EXEC mv $tdir/relocate_sdk.sh ${env_setup_script%/*}/relocate_sdk.sh
267$SUDO_EXEC chmod 755 ${env_setup_script%/*}/relocate_sdk.sh
268rm -rf $tdir
269if [ $relocate = 1 ] ; then
270 $SUDO_EXEC ${env_setup_script%/*}/relocate_sdk.sh
271 if [ $? -ne 0 ]; then
272 echo "SDK could not be set up. Relocate script failed. Abort!"
273 exit 1
274 fi
275fi
276
277# replace ${SDKPATH} with the new prefix in all text files: configs/scripts/etc
278$SUDO_EXEC find $native_sysroot -type f -exec file '{}' \;|grep ":.*\(ASCII\|script\|source\).*text"|cut -d':' -f1|$SUDO_EXEC xargs sed -i -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:g"
279
280# change all symlinks pointing to ${SDKPATH}
281for l in $($SUDO_EXEC find $native_sysroot -type l); do
282 $SUDO_EXEC ln -sfn $(readlink $l|$SUDO_EXEC sed -e "s:$DEFAULT_INSTALL_DIR:$target_sdk_dir:") $l
283done
284
285# find out all perl scripts in $native_sysroot and modify them replacing the
286# host perl with SDK perl.
287for perl_script in $($SUDO_EXEC grep "^#!.*perl" -rl $native_sysroot); do
288 $SUDO_EXEC sed -i -e "s:^#! */usr/bin/perl.*:#! /usr/bin/env perl:g" -e \
289 "s: /usr/bin/perl: /usr/bin/env perl:g" $perl_script
290done
291
292echo done
293
294# delete the relocating script, so that user is forced to re-run the installer
295# if he/she wants another location for the sdk
296if [ $savescripts = 0 ] ; then
297 $SUDO_EXEC rm ${env_setup_script%/*}/relocate_sdk.py ${env_setup_script%/*}/relocate_sdk.sh
298fi
299
300echo "SDK has been successfully set up and is ready to be used."
301
302exit 0
303
304MARKER:
305EOF
306 # add execution permission
307 chmod +x ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
308
309 # append the SDK tarball
310 cat ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 >> ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
311
312 # delete the old tarball, we don't need it anymore
313 rm ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2
314}
315
316populate_sdk_log_check() {
317 for target in $*
318 do
319 lf_path="`dirname ${BB_LOGFILE}`/log.do_$target.${PID}"
320
321 echo "log_check: Using $lf_path as logfile"
322
323 if test -e "$lf_path"
324 then
325 ${IMAGE_PKGTYPE}_log_check $target $lf_path
326 else
327 echo "Cannot find logfile [$lf_path]"
328 fi
329 echo "Logfile is clean"
330 done
331}
332
333do_populate_sdk[dirs] = "${TOPDIR}"
334do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS', True).split()])}"
335do_populate_sdk[rdepends] = "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_RDEPENDS', True).split()])}"
336do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb"
337addtask populate_sdk
diff --git a/meta/classes/populate_sdk_deb.bbclass b/meta/classes/populate_sdk_deb.bbclass
new file mode 100644
index 0000000000..acb1f73983
--- /dev/null
+++ b/meta/classes/populate_sdk_deb.bbclass
@@ -0,0 +1,13 @@
1do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
2
3DEB_SDK_ARCH = "${@[d.getVar('SDK_ARCH', True), "i386"]\
4 [d.getVar('SDK_ARCH', True) in \
5 ["x86", "i486", "i586", "i686", "pentium"]]}"
6
7DEB_SDK_ARCH = "${@[d.getVar('SDK_ARCH', True), "amd64"]\
8 [d.getVar('SDK_ARCH', True) == "x86_64"]}"
9
10do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
11
12# This will of course only work after rootfs_deb_do_rootfs or populate_sdk_deb has been called
13DPKG_QUERY_COMMAND = "${STAGING_BINDIR_NATIVE}/dpkg-query --admindir=$INSTALL_ROOTFS_DEB/var/lib/dpkg"
diff --git a/meta/classes/populate_sdk_ipk.bbclass b/meta/classes/populate_sdk_ipk.bbclass
new file mode 100644
index 0000000000..8b2cb6dc48
--- /dev/null
+++ b/meta/classes/populate_sdk_ipk.bbclass
@@ -0,0 +1,3 @@
1do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
2
3do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
diff --git a/meta/classes/populate_sdk_rpm.bbclass b/meta/classes/populate_sdk_rpm.bbclass
new file mode 100644
index 0000000000..4954d00490
--- /dev/null
+++ b/meta/classes/populate_sdk_rpm.bbclass
@@ -0,0 +1,16 @@
1# Smart is python based, so be sure python-native is available to us.
2EXTRANATIVEPATH += "python-native"
3
4do_populate_sdk[depends] += "rpm-native:do_populate_sysroot"
5do_populate_sdk[depends] += "rpmresolve-native:do_populate_sysroot"
6do_populate_sdk[depends] += "python-smartpm-native:do_populate_sysroot"
7
8# Needed for update-alternatives
9do_populate_sdk[depends] += "opkg-native:do_populate_sysroot"
10
11# Creating the repo info in do_rootfs
12do_populate_sdk[depends] += "createrepo-native:do_populate_sysroot"
13
14rpmlibdir = "/var/lib/rpm"
15
16do_populate_sdk[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
diff --git a/meta/classes/prexport.bbclass b/meta/classes/prexport.bbclass
new file mode 100644
index 0000000000..5a1cb33c6a
--- /dev/null
+++ b/meta/classes/prexport.bbclass
@@ -0,0 +1,58 @@
1PRSERV_DUMPOPT_VERSION = "${PRAUTOINX}"
2PRSERV_DUMPOPT_PKGARCH = ""
3PRSERV_DUMPOPT_CHECKSUM = ""
4PRSERV_DUMPOPT_COL = "0"
5
6PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
7PRSERV_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv.inc"
8
9python prexport_handler () {
10 import bb.event
11 if not e.data:
12 return
13
14 if isinstance(e, bb.event.RecipeParsed):
15 import oe.prservice
16 #get all PR values for the current PRAUTOINX
17 ver = e.data.getVar('PRSERV_DUMPOPT_VERSION', True)
18 ver = ver.replace('%','-')
19 retval = oe.prservice.prserv_dump_db(e.data)
20 if not retval:
21 bb.fatal("prexport_handler: export failed!")
22 (metainfo, datainfo) = retval
23 if not datainfo:
24 bb.warn("prexport_handler: No AUTOPR values found for %s" % ver)
25 return
26 oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
27 if 'AUTOINC' in ver:
28 import re
29 srcpv = bb.fetch2.get_srcrev(e.data)
30 base_ver = "AUTOINC-%s" % ver[:ver.find(srcpv)]
31 e.data.setVar('PRSERV_DUMPOPT_VERSION', base_ver)
32 retval = oe.prservice.prserv_dump_db(e.data)
33 if not retval:
34 bb.fatal("prexport_handler: export failed!")
35 (metainfo, datainfo) = retval
36 oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
37 elif isinstance(e, bb.event.ParseStarted):
38 import bb.utils
39 import oe.prservice
40 oe.prservice.prserv_check_avail(e.data)
41 #remove dumpfile
42 bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE', True))
43 elif isinstance(e, bb.event.ParseCompleted):
44 import oe.prservice
45 #dump meta info of tables
46 d = e.data.createCopy()
47 d.setVar('PRSERV_DUMPOPT_COL', "1")
48 retval = oe.prservice.prserv_dump_db(d)
49 if not retval:
50 bb.error("prexport_handler: export failed!")
51 return
52 (metainfo, datainfo) = retval
53 oe.prservice.prserv_export_tofile(d, metainfo, None, True)
54
55}
56
57addhandler prexport_handler
58prexport_handler[eventmask] = "bb.event.RecipeParsed bb.event.ParseStarted bb.event.ParseCompleted"
diff --git a/meta/classes/primport.bbclass b/meta/classes/primport.bbclass
new file mode 100644
index 0000000000..8ed45f03f0
--- /dev/null
+++ b/meta/classes/primport.bbclass
@@ -0,0 +1,21 @@
1python primport_handler () {
2 import bb.event
3 if not e.data:
4 return
5
6 if isinstance(e, bb.event.ParseCompleted):
7 import oe.prservice
8 #import all exported AUTOPR values
9 imported = oe.prservice.prserv_import_db(e.data)
10 if imported is None:
11 bb.fatal("import failed!")
12
13 for (version, pkgarch, checksum, value) in imported:
14 bb.note("imported (%s,%s,%s,%d)" % (version, pkgarch, checksum, value))
15 elif isinstance(e, bb.event.ParseStarted):
16 import oe.prservice
17 oe.prservice.prserv_check_avail(e.data)
18}
19
20addhandler primport_handler
21primport_handler[eventmask] = "bb.event.ParseCompleted bb.event.ParseStarted"
diff --git a/meta/classes/prserv.bbclass b/meta/classes/prserv.bbclass
new file mode 100644
index 0000000000..b440d863ef
--- /dev/null
+++ b/meta/classes/prserv.bbclass
@@ -0,0 +1,33 @@
1def prserv_get_pr_auto(d):
2 import oe.prservice
3 import re
4
5 pv = d.getVar("PV", True)
6 if not d.getVar('PRSERV_HOST', True):
7 if 'AUTOINC' in pv:
8 d.setVar("PKGV", pv.replace("AUTOINC", "0"))
9 bb.warn("Not using network based PR service")
10 return None
11
12 version = d.getVar("PRAUTOINX", True)
13 pkgarch = d.getVar("PACKAGE_ARCH", True)
14 checksum = d.getVar("BB_TASKHASH", True)
15
16 conn = d.getVar("__PRSERV_CONN", True)
17 if conn is None:
18 conn = oe.prservice.prserv_make_conn(d)
19 if conn is None:
20 return None
21
22 if "AUTOINC" in pv:
23 srcpv = bb.fetch2.get_srcrev(d)
24 base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
25 value = conn.getPR(base_ver, pkgarch, srcpv)
26 d.setVar("PKGV", pv.replace("AUTOINC", str(value)))
27
28 if d.getVar('PRSERV_LOCKDOWN', True):
29 auto_rev = d.getVar('PRAUTO_' + version + '_' + pkgarch, True) or d.getVar('PRAUTO_' + version, True) or None
30 else:
31 auto_rev = conn.getPR(version, pkgarch, checksum)
32
33 return auto_rev
diff --git a/meta/classes/ptest.bbclass b/meta/classes/ptest.bbclass
new file mode 100644
index 0000000000..ea4a38b954
--- /dev/null
+++ b/meta/classes/ptest.bbclass
@@ -0,0 +1,62 @@
1SUMMARY_${PN}-ptest ?= "${SUMMARY} - Package test files"
2DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \
3This package contains a test directory ${PTEST_PATH} for package test purposes."
4
5PTEST_PATH ?= "${libdir}/${PN}/ptest"
6FILES_${PN}-ptest = "${PTEST_PATH}"
7SECTION_${PN}-ptest = "devel"
8ALLOW_EMPTY_${PN}-ptest = "1"
9PTEST_ENABLED = "${@base_contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
10PTEST_ENABLED_class-native = ""
11PTEST_ENABLED_class-nativesdk = ""
12PTEST_ENABLED_class-cross-canadian = ""
13RDEPENDS_${PN}-ptest_class-native = ""
14RDEPENDS_${PN}-ptest_class-nativesdk = ""
15
16PACKAGES =+ "${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}"
17
18do_configure_ptest() {
19 :
20}
21
22do_configure_ptest_base() {
23 do_configure_ptest
24}
25
26do_compile_ptest() {
27 :
28}
29
30do_compile_ptest_base() {
31 do_compile_ptest
32}
33
34do_install_ptest() {
35 :
36}
37
38do_install_ptest_base() {
39 if [ -f ${WORKDIR}/run-ptest ]; then
40 install -D ${WORKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest
41 if grep -q install-ptest: Makefile; then
42 oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest
43 fi
44 do_install_ptest
45 fi
46}
47
48do_install_ptest_base[cleandirs] = "${D}${PTEST_PATH}"
49
50addtask configure_ptest_base after do_configure before do_compile
51addtask compile_ptest_base after do_compile before do_install
52addtask install_ptest_base after do_install before do_package do_populate_sysroot
53
54python () {
55 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
56 d.setVarFlag('do_install_ptest_base', 'fakeroot', 1)
57
58 # Remove all '*ptest_base' tasks when ptest is not enabled
59 if not(d.getVar('PTEST_ENABLED', True) == "1"):
60 for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']:
61 bb.build.deltask(i, d)
62}
diff --git a/meta/classes/python-dir.bbclass b/meta/classes/python-dir.bbclass
new file mode 100644
index 0000000000..ebfa4b30f6
--- /dev/null
+++ b/meta/classes/python-dir.bbclass
@@ -0,0 +1,5 @@
1PYTHON_BASEVERSION ?= "2.7"
2PYTHON_ABI ?= ""
3PYTHON_DIR = "python${PYTHON_BASEVERSION}"
4PYTHON_PN = "python${@'' if '${PYTHON_BASEVERSION}'.startswith('2') else '3'}"
5PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/meta/classes/python3native.bbclass b/meta/classes/python3native.bbclass
new file mode 100644
index 0000000000..f86374fd33
--- /dev/null
+++ b/meta/classes/python3native.bbclass
@@ -0,0 +1,7 @@
1PYTHON_BASEVERSION = "3.3"
2
3inherit python-dir
4
5PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
6EXTRANATIVEPATH += "${PYTHON_PN}-native"
7DEPENDS += " ${PYTHON_PN}-native "
diff --git a/meta/classes/pythonnative.bbclass b/meta/classes/pythonnative.bbclass
new file mode 100644
index 0000000000..fdd22bbc86
--- /dev/null
+++ b/meta/classes/pythonnative.bbclass
@@ -0,0 +1,6 @@
1
2inherit python-dir
3
4PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
5EXTRANATIVEPATH += "${PYTHON_PN}-native"
6DEPENDS += " ${PYTHON_PN}-native "
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
new file mode 100644
index 0000000000..3d437b0e45
--- /dev/null
+++ b/meta/classes/qemu.bbclass
@@ -0,0 +1,35 @@
1#
2# This class contains functions for recipes that need QEMU or test for its
3# existence.
4#
5
6def qemu_target_binary(data):
7 target_arch = data.getVar("TARGET_ARCH", True)
8 if target_arch in ("i486", "i586", "i686"):
9 target_arch = "i386"
10 elif target_arch == "powerpc":
11 target_arch = "ppc"
12 elif target_arch == "powerpc64":
13 target_arch = "ppc64"
14
15 return "qemu-" + target_arch
16#
17# Next function will return a string containing the command that is needed to
18# to run a certain binary through qemu. For example, in order to make a certain
19# postinstall scriptlet run at do_rootfs time and running the postinstall is
20# architecture dependent, we can run it through qemu. For example, in the
21# postinstall scriptlet, we could use the following:
22#
23# ${@qemu_run_binary(d, '$D', '/usr/bin/test_app')} [test_app arguments]
24#
25def qemu_run_binary(data, rootfs_path, binary):
26 qemu_binary = qemu_target_binary(data)
27 if qemu_binary == "qemu-allarch":
28 qemu_binary = "qemuwrapper"
29
30 libdir = rootfs_path + data.getVar("libdir", False)
31 base_libdir = rootfs_path + data.getVar("base_libdir", False)
32
33 return "PSEUDO_UNLOAD=1 " + qemu_binary + " -L " + rootfs_path\
34 + " -E LD_LIBRARY_PATH=" + libdir + ":" + base_libdir + " "\
35 + rootfs_path + binary
diff --git a/meta/classes/qmake2.bbclass b/meta/classes/qmake2.bbclass
new file mode 100644
index 0000000000..6e73ad2d1e
--- /dev/null
+++ b/meta/classes/qmake2.bbclass
@@ -0,0 +1,27 @@
1#
2# QMake variables for Qt4
3#
4inherit qmake_base
5
6DEPENDS_prepend = "qt4-tools-native "
7
8export QMAKESPEC = "${STAGING_DATADIR}/qt4/mkspecs/${TARGET_OS}-oe-g++"
9export OE_QMAKE_QT_CONFIG = "${STAGING_DATADIR}/qt4/mkspecs/qconfig.pri"
10export OE_QMAKE_UIC = "${STAGING_BINDIR_NATIVE}/uic4"
11export OE_QMAKE_UIC3 = "${STAGING_BINDIR_NATIVE}/uic34"
12export OE_QMAKE_MOC = "${STAGING_BINDIR_NATIVE}/moc4"
13export OE_QMAKE_RCC = "${STAGING_BINDIR_NATIVE}/rcc4"
14export OE_QMAKE_QDBUSCPP2XML = "${STAGING_BINDIR_NATIVE}/qdbuscpp2xml4"
15export OE_QMAKE_QDBUSXML2CPP = "${STAGING_BINDIR_NATIVE}/qdbusxml2cpp4"
16export OE_QMAKE_QMAKE = "${STAGING_BINDIR_NATIVE}/qmake2"
17export OE_QMAKE_LINK = "${CXX}"
18export OE_QMAKE_CXXFLAGS = "${CXXFLAGS}"
19export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/qt4"
20export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
21export OE_QMAKE_LIBS_QT = "qt"
22export OE_QMAKE_LIBS_X11 = "-lXext -lX11 -lm"
23export OE_QMAKE_LIBS_X11SM = "-lSM -lICE"
24export OE_QMAKE_LCONVERT = "${STAGING_BINDIR_NATIVE}/lconvert4"
25export OE_QMAKE_LRELEASE = "${STAGING_BINDIR_NATIVE}/lrelease4"
26export OE_QMAKE_LUPDATE = "${STAGING_BINDIR_NATIVE}/lupdate4"
27export OE_QMAKE_XMLPATTERNS = "${STAGING_BINDIR_NATIVE}/xmlpatterns4"
diff --git a/meta/classes/qmake_base.bbclass b/meta/classes/qmake_base.bbclass
new file mode 100644
index 0000000000..86bbede260
--- /dev/null
+++ b/meta/classes/qmake_base.bbclass
@@ -0,0 +1,119 @@
1QMAKE_MKSPEC_PATH ?= "${STAGING_DATADIR_NATIVE}/qmake"
2
3OE_QMAKE_PLATFORM = "${TARGET_OS}-oe-g++"
4QMAKESPEC := "${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}"
5
6# We override this completely to eliminate the -e normally passed in
7EXTRA_OEMAKE = ""
8
9export OE_QMAKE_CC="${CC}"
10export OE_QMAKE_CFLAGS="${CFLAGS}"
11export OE_QMAKE_CXX="${CXX}"
12export OE_QMAKE_LDFLAGS="${LDFLAGS}"
13export OE_QMAKE_AR="${AR}"
14export OE_QMAKE_STRIP="echo"
15export OE_QMAKE_RPATH="-Wl,-rpath-link,"
16
17# default to qte2 via bb.conf, inherit qt3x11 to configure for qt3x11
18
19oe_qmake_mkspecs () {
20 mkdir -p mkspecs/${OE_QMAKE_PLATFORM}
21 for f in ${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}/*; do
22 if [ -L $f ]; then
23 lnk=`readlink $f`
24 if [ -f mkspecs/${OE_QMAKE_PLATFORM}/$lnk ]; then
25 ln -s $lnk mkspecs/${OE_QMAKE_PLATFORM}/`basename $f`
26 else
27 cp $f mkspecs/${OE_QMAKE_PLATFORM}/
28 fi
29 else
30 cp $f mkspecs/${OE_QMAKE_PLATFORM}/
31 fi
32 done
33}
34
35do_generate_qt_config_file() {
36 export QT_CONF_PATH=${WORKDIR}/qt.conf
37 cat > ${WORKDIR}/qt.conf <<EOF
38[Paths]
39Prefix =
40Binaries = ${STAGING_BINDIR_NATIVE}
41Headers = ${STAGING_INCDIR}/qt4
42Plugins = ${STAGING_LIBDIR}/qt4/plugins/
43Mkspecs = ${STAGING_DATADIR}/qt4/mkspecs/
44EOF
45}
46
47addtask generate_qt_config_file after do_patch before do_configure
48
49qmake_base_do_configure() {
50 case ${QMAKESPEC} in
51 *linux-oe-g++|*linux-uclibc-oe-g++|*linux-gnueabi-oe-g++|*linux-uclibceabi-oe-g++|*linux-gnuspe-oe-g++|*linux-uclibcspe-oe-g++|*linux-gnun32-oe-g++)
52 ;;
53 *-oe-g++)
54 die Unsupported target ${TARGET_OS} for oe-g++ qmake spec
55 ;;
56 *)
57 bbnote Searching for qmake spec file
58 paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-${TARGET_ARCH}-g++"
59 paths="${QMAKE_MKSPEC_PATH}/${TARGET_OS}-g++ $paths"
60
61 if (echo "${TARGET_ARCH}"|grep -q 'i.86'); then
62 paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-x86-g++ $paths"
63 fi
64 for i in $paths; do
65 if test -e $i; then
66 export QMAKESPEC=$i
67 break
68 fi
69 done
70 ;;
71 esac
72
73 bbnote "using qmake spec in ${QMAKESPEC}, using profiles '${QMAKE_PROFILES}'"
74
75 if [ -z "${QMAKE_PROFILES}" ]; then
76 PROFILES="`ls *.pro`"
77 else
78 PROFILES="${QMAKE_PROFILES}"
79 fi
80
81 if [ -z "$PROFILES" ]; then
82 die "QMAKE_PROFILES not set and no profiles found in $PWD"
83 fi
84
85 if [ ! -z "${EXTRA_QMAKEVARS_POST}" ]; then
86 AFTER="-after"
87 QMAKE_VARSUBST_POST="${EXTRA_QMAKEVARS_POST}"
88 bbnote "qmake postvar substitution: ${EXTRA_QMAKEVARS_POST}"
89 fi
90
91 if [ ! -z "${EXTRA_QMAKEVARS_PRE}" ]; then
92 QMAKE_VARSUBST_PRE="${EXTRA_QMAKEVARS_PRE}"
93 bbnote "qmake prevar substitution: ${EXTRA_QMAKEVARS_PRE}"
94 fi
95
96 # Hack .pro files to use OE utilities
97 LCONVERT_NAME=$(basename ${OE_QMAKE_LCONVERT})
98 LRELEASE_NAME=$(basename ${OE_QMAKE_LRELEASE})
99 LUPDATE_NAME=$(basename ${OE_QMAKE_LUPDATE})
100 XMLPATTERNS_NAME=$(basename ${OE_QMAKE_XMLPATTERNS})
101 find -name '*.pro' \
102 -exec sed -i -e "s|\(=\s*.*\)/$LCONVERT_NAME|\1/lconvert|g" \
103 -e "s|\(=\s*.*\)/$LRELEASE_NAME|\1/lrelease|g" \
104 -e "s|\(=\s*.*\)/$LUPDATE_NAME|\1/lupdate|g" \
105 -e "s|\(=\s*.*\)/$XMLPATTERNS_NAME|\1/xmlpatterns|g" \
106 -e "s|\(=\s*.*\)/lconvert|\1/$LCONVERT_NAME|g" \
107 -e "s|\(=\s*.*\)/lrelease|\1/$LRELEASE_NAME|g" \
108 -e "s|\(=\s*.*\)/lupdate|\1/$LUPDATE_NAME|g" \
109 -e "s|\(=\s*.*\)/xmlpatterns|\1/$XMLPATTERNS_NAME|g" \
110 '{}' ';'
111
112#bbnote "Calling '${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST'"
113 unset QMAKESPEC || true
114 ${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST || die "Error calling ${OE_QMAKE_QMAKE} on $PROFILES"
115}
116
117EXPORT_FUNCTIONS do_configure
118
119addtask configure after do_unpack do_patch before do_compile
diff --git a/meta/classes/qt4e.bbclass b/meta/classes/qt4e.bbclass
new file mode 100644
index 0000000000..850bb6a717
--- /dev/null
+++ b/meta/classes/qt4e.bbclass
@@ -0,0 +1,24 @@
1QT4EDEPENDS ?= "qt4-embedded "
2DEPENDS_prepend = "${QT4EDEPENDS}"
3
4inherit qmake2
5
6QT_BASE_NAME = "qt4-embedded"
7QT_DIR_NAME = "qtopia"
8QT_LIBINFIX = "E"
9# override variables set by qmake-base to compile Qt/Embedded apps
10#
11export QMAKESPEC = "${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/${TARGET_OS}-oe-g++"
12export OE_QMAKE_QT_CONFIG = "${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/qconfig.pri"
13export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/${QT_DIR_NAME}"
14export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
15export OE_QMAKE_LIBS_QT = "qt"
16export OE_QMAKE_LIBS_X11 = ""
17export OE_QMAKE_EXTRA_MODULES = "network"
18EXTRA_QMAKEVARS_PRE += " QT_LIBINFIX=${QT_LIBINFIX} "
19
20# Qt4 uses atomic instructions not supported in thumb mode
21ARM_INSTRUCTION_SET = "arm"
22
23# Qt4 could NOT be built on MIPS64 with 64 bits userspace
24COMPATIBLE_HOST_mips64 = "mips64.*-linux-gnun32"
diff --git a/meta/classes/qt4x11.bbclass b/meta/classes/qt4x11.bbclass
new file mode 100644
index 0000000000..65d196afc6
--- /dev/null
+++ b/meta/classes/qt4x11.bbclass
@@ -0,0 +1,14 @@
1QT4DEPENDS ?= "qt4-x11 "
2DEPENDS_prepend = "${QT4DEPENDS}"
3
4inherit qmake2
5
6QT_BASE_NAME = "qt4"
7QT_DIR_NAME = "qt4"
8QT_LIBINFIX = ""
9
10# Qt4 uses atomic instructions not supported in thumb mode
11ARM_INSTRUCTION_SET = "arm"
12
13# Qt4 could NOT be built on MIPS64 with 64 bits userspace
14COMPATIBLE_HOST_mips64 = "mips64.*-linux-gnun32"
diff --git a/meta/classes/recipe_sanity.bbclass b/meta/classes/recipe_sanity.bbclass
new file mode 100644
index 0000000000..5dd4624f40
--- /dev/null
+++ b/meta/classes/recipe_sanity.bbclass
@@ -0,0 +1,168 @@
1def __note(msg, d):
2 bb.note("%s: recipe_sanity: %s" % (d.getVar("P", True), msg))
3
4__recipe_sanity_badruntimevars = "RDEPENDS RPROVIDES RRECOMMENDS RCONFLICTS"
5def bad_runtime_vars(cfgdata, d):
6 if bb.data.inherits_class("native", d) or \
7 bb.data.inherits_class("cross", d):
8 return
9
10 for var in d.getVar("__recipe_sanity_badruntimevars", True).split():
11 val = d.getVar(var, 0)
12 if val and val != cfgdata.get(var):
13 __note("%s should be %s_${PN}" % (var, var), d)
14
15__recipe_sanity_reqvars = "DESCRIPTION"
16__recipe_sanity_reqdiffvars = ""
17def req_vars(cfgdata, d):
18 for var in d.getVar("__recipe_sanity_reqvars", True).split():
19 if not d.getVar(var, 0):
20 __note("%s should be set" % var, d)
21
22 for var in d.getVar("__recipe_sanity_reqdiffvars", True).split():
23 val = d.getVar(var, 0)
24 cfgval = cfgdata.get(var)
25
26 if not val:
27 __note("%s should be set" % var, d)
28 elif val == cfgval:
29 __note("%s should be defined to something other than default (%s)" % (var, cfgval), d)
30
31def var_renames_overwrite(cfgdata, d):
32 renames = d.getVar("__recipe_sanity_renames", 0)
33 if renames:
34 for (key, newkey, oldvalue, newvalue) in renames:
35 if oldvalue != newvalue and oldvalue != cfgdata.get(newkey):
36 __note("rename of variable '%s' to '%s' overwrote existing value '%s' with '%s'." % (key, newkey, oldvalue, newvalue), d)
37
38def incorrect_nonempty_PACKAGES(cfgdata, d):
39 if bb.data.inherits_class("native", d) or \
40 bb.data.inherits_class("cross", d):
41 if d.getVar("PACKAGES", True):
42 return True
43
44def can_use_autotools_base(cfgdata, d):
45 cfg = d.getVar("do_configure", True)
46 if not bb.data.inherits_class("autotools", d):
47 return False
48
49 for i in ["autoreconf"] + ["%s_do_configure" % cls for cls in ["gnomebase", "gnome", "e", "autotools", "efl", "gpephone", "openmoko", "openmoko2", "xfce", "xlibs"]]:
50 if cfg.find(i) != -1:
51 return False
52
53 for clsfile in d.getVar("__inherit_cache", 0):
54 (base, _) = os.path.splitext(os.path.basename(clsfile))
55 if cfg.find("%s_do_configure" % base) != -1:
56 __note("autotools_base usage needs verification, spotted %s_do_configure" % base, d)
57
58 return True
59
60def can_delete_FILESPATH(cfgdata, d):
61 expected = cfgdata.get("FILESPATH")
62 #expected = "${@':'.join([os.path.normpath(os.path.join(fp, p, o)) for fp in d.getVar('FILESPATHBASE', True).split(':') for p in d.getVar('FILESPATHPKG', True).split(':') for o in (d.getVar('OVERRIDES', True) + ':').split(':') if os.path.exists(os.path.join(fp, p, o))])}:${FILESDIR}"
63 expectedpaths = d.expand(expected)
64 unexpanded = d.getVar("FILESPATH", 0)
65 filespath = d.getVar("FILESPATH", True).split(":")
66 filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
67 for fp in filespath:
68 if not fp in expectedpaths:
69 # __note("Path %s in FILESPATH not in the expected paths %s" %
70 # (fp, expectedpaths), d)
71 return False
72 return expected != unexpanded
73
74def can_delete_FILESDIR(cfgdata, d):
75 expected = cfgdata.get("FILESDIR")
76 #expected = "${@bb.utils.which(d.getVar('FILESPATH', True), '.')}"
77 unexpanded = d.getVar("FILESDIR", 0)
78 if unexpanded is None:
79 return False
80
81 expanded = os.path.normpath(d.getVar("FILESDIR", True))
82 filespath = d.getVar("FILESPATH", True).split(":")
83 filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
84
85 return unexpanded != expected and \
86 os.path.exists(expanded) and \
87 (expanded in filespath or
88 expanded == d.expand(expected))
89
90def can_delete_others(p, cfgdata, d):
91 for k in ["S", "PV", "PN", "DESCRIPTION", "DEPENDS",
92 "SECTION", "PACKAGES", "EXTRA_OECONF", "EXTRA_OEMAKE"]:
93 #for k in cfgdata:
94 unexpanded = d.getVar(k, 0)
95 cfgunexpanded = cfgdata.get(k)
96 if not cfgunexpanded:
97 continue
98
99 try:
100 expanded = d.getVar(k, True)
101 cfgexpanded = d.expand(cfgunexpanded)
102 except bb.fetch.ParameterError:
103 continue
104
105 if unexpanded != cfgunexpanded and \
106 cfgexpanded == expanded:
107 __note("candidate for removal of %s" % k, d)
108 bb.debug(1, "%s: recipe_sanity: cfg's '%s' and d's '%s' both expand to %s" %
109 (p, cfgunexpanded, unexpanded, expanded))
110
111python do_recipe_sanity () {
112 p = d.getVar("P", True)
113 p = "%s %s %s" % (d.getVar("PN", True), d.getVar("PV", True), d.getVar("PR", True))
114
115 sanitychecks = [
116 (can_delete_FILESDIR, "candidate for removal of FILESDIR"),
117 (can_delete_FILESPATH, "candidate for removal of FILESPATH"),
118 #(can_use_autotools_base, "candidate for use of autotools_base"),
119 (incorrect_nonempty_PACKAGES, "native or cross recipe with non-empty PACKAGES"),
120 ]
121 cfgdata = d.getVar("__recipe_sanity_cfgdata", 0)
122
123 for (func, msg) in sanitychecks:
124 if func(cfgdata, d):
125 __note(msg, d)
126
127 can_delete_others(p, cfgdata, d)
128 var_renames_overwrite(cfgdata, d)
129 req_vars(cfgdata, d)
130 bad_runtime_vars(cfgdata, d)
131}
132do_recipe_sanity[nostamp] = "1"
133addtask recipe_sanity
134
135do_recipe_sanity_all[nostamp] = "1"
136do_recipe_sanity_all[recrdeptask] = "do_recipe_sanity_all do_recipe_sanity"
137do_recipe_sanity_all () {
138 :
139}
140addtask recipe_sanity_all after do_recipe_sanity
141
142python recipe_sanity_eh () {
143 d = e.data
144
145 cfgdata = {}
146 for k in d.keys():
147 #for k in ["S", "PR", "PV", "PN", "DESCRIPTION", "LICENSE", "DEPENDS",
148 # "SECTION"]:
149 cfgdata[k] = d.getVar(k, 0)
150
151 d.setVar("__recipe_sanity_cfgdata", cfgdata)
152 #d.setVar("__recipe_sanity_cfgdata", d)
153
154 # Sick, very sick..
155 from bb.data_smart import DataSmart
156 old = DataSmart.renameVar
157 def myrename(self, key, newkey):
158 oldvalue = self.getVar(newkey, 0)
159 old(self, key, newkey)
160 newvalue = self.getVar(newkey, 0)
161 if oldvalue:
162 renames = self.getVar("__recipe_sanity_renames", 0) or set()
163 renames.add((key, newkey, oldvalue, newvalue))
164 self.setVar("__recipe_sanity_renames", renames)
165 DataSmart.renameVar = myrename
166}
167addhandler recipe_sanity_eh
168recipe_sanity_eh[eventmask] = "bb.event.ConfigParsed"
diff --git a/meta/classes/relocatable.bbclass b/meta/classes/relocatable.bbclass
new file mode 100644
index 0000000000..4ca9981f44
--- /dev/null
+++ b/meta/classes/relocatable.bbclass
@@ -0,0 +1,7 @@
1inherit chrpath
2
3SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess"
4
5python relocatable_binaries_preprocess() {
6 rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
7}
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
new file mode 100644
index 0000000000..479b38deb0
--- /dev/null
+++ b/meta/classes/report-error.bbclass
@@ -0,0 +1,66 @@
1#
2# Collects debug information in order to create error report files.
3#
4# Copyright (C) 2013 Intel Corporation
5# Author: Andreea Brandusa Proca <andreea.b.proca@intel.com>
6#
7# Licensed under the MIT license, see COPYING.MIT for details
8
9ERR_REPORT_DIR ?= "${LOG_DIR}/error-report"
10
11def errorreport_getdata(e):
12 logpath = e.data.getVar('ERR_REPORT_DIR', True)
13 datafile = os.path.join(logpath, "error-report.txt")
14 with open(datafile) as f:
15 data = f.read()
16 return data
17
18def errorreport_savedata(e, newdata, file):
19 import json
20 logpath = e.data.getVar('ERR_REPORT_DIR', True)
21 bb.utils.mkdirhier(logpath)
22 datafile = os.path.join(logpath, file)
23 with open(datafile, "w") as f:
24 json.dump(newdata, f, indent=4, sort_keys=True)
25 return datafile
26
27python errorreport_handler () {
28 import json
29
30 if isinstance(e, bb.event.BuildStarted):
31 data = {}
32 machine = e.data.getVar("MACHINE")
33 data['machine'] = machine
34 data['build_sys'] = e.data.getVar("BUILD_SYS", True)
35 data['nativelsb'] = e.data.getVar("NATIVELSBSTRING")
36 data['distro'] = e.data.getVar("DISTRO")
37 data['target_sys'] = e.data.getVar("TARGET_SYS", True)
38 data['failures'] = []
39 data['component'] = e.getPkgs()[0]
40 data['branch_commit'] = base_detect_branch(e.data) + ": " + base_detect_revision(e.data)
41 errorreport_savedata(e, data, "error-report.txt")
42
43 elif isinstance(e, bb.build.TaskFailed):
44 task = e.task
45 taskdata={}
46 log = e.data.getVar('BB_LOGFILE', True)
47 logFile = open(log, 'r')
48 taskdata['package'] = e.data.expand("${PF}")
49 taskdata['task'] = task
50 taskdata['log'] = logFile.read()
51 logFile.close()
52 jsondata = json.loads(errorreport_getdata(e))
53 jsondata['failures'].append(taskdata)
54 errorreport_savedata(e, jsondata, "error-report.txt")
55
56 elif isinstance(e, bb.event.BuildCompleted):
57 jsondata = json.loads(errorreport_getdata(e))
58 failures = jsondata['failures']
59 if(len(failures) > 0):
60 filename = "error_report_" + e.data.getVar("BUILDNAME")+".txt"
61 datafile = errorreport_savedata(e, jsondata, filename)
62 bb.note("The errors of this build are stored in: %s. You can send the errors to an upstream server by running: send-error-report %s [server]" % (datafile, datafile))
63}
64
65addhandler errorreport_handler
66errorreport_handler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskFailed"
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
new file mode 100644
index 0000000000..f0f6d18249
--- /dev/null
+++ b/meta/classes/rm_work.bbclass
@@ -0,0 +1,99 @@
1#
2# Removes source after build
3#
4# To use it add that line to conf/local.conf:
5#
6# INHERIT += "rm_work"
7#
8# To inhibit rm_work for some recipes, specify them in RM_WORK_EXCLUDE.
9# For example, in conf/local.conf:
10#
11# RM_WORK_EXCLUDE += "icu-native icu busybox"
12#
13
14# Use the completion scheduler by default when rm_work is active
15# to try and reduce disk usage
16BB_SCHEDULER ?= "completion"
17
18RMWORK_ORIG_TASK := "${BB_DEFAULT_TASK}"
19BB_DEFAULT_TASK = "rm_work_all"
20
21do_rm_work () {
22 # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
23 for p in ${RM_WORK_EXCLUDE}; do
24 if [ "$p" = "${PN}" ]; then
25 bbnote "rm_work: Skipping ${PN} since it is in RM_WORK_EXCLUDE"
26 exit 0
27 fi
28 done
29
30 cd ${WORKDIR}
31 for dir in *
32 do
33 # Retain only logs and other files in temp, safely ignore
34 # failures of removing pseudo folers on NFS2/3 server.
35 if [ $dir = 'pseudo' ]; then
36 rm -rf $dir 2> /dev/null || true
37 elif [ $dir != 'temp' ]; then
38 rm -rf $dir
39 fi
40 done
41
42 # Need to add pseudo back or subsqeuent work in this workdir
43 # might fail since setscene may not rerun to recreate it
44 mkdir -p ${WORKDIR}/pseudo/
45
46 # Change normal stamps into setscene stamps as they better reflect the
47 # fact WORKDIR is now empty
48 # Also leave noexec stamps since setscene stamps don't cover them
49 cd `dirname ${STAMP}`
50 for i in `basename ${STAMP}`*
51 do
52 for j in ${SSTATETASKS}
53 do
54 case $i in
55 *do_setscene*)
56 break
57 ;;
58 *sigdata*)
59 i=dummy
60 break
61 ;;
62 *do_package_write*)
63 i=dummy
64 break
65 ;;
66 *do_build*)
67 i=dummy
68 break
69 ;;
70 # We remove do_package entirely, including any
71 # sstate version since otherwise we'd need to leave 'plaindirs' around
72 # such as 'packages' and 'packages-split' and these can be large. No end
73 # of chain tasks depend directly on do_package anymore.
74 *do_package|*do_package.*|*do_package_setscene.*)
75 rm -f $i;
76 i=dummy
77 break
78 ;;
79 *_setscene*)
80 i=dummy
81 break
82 ;;
83 *$j|*$j.*)
84 mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
85 i=dummy
86 break
87 ;;
88 esac
89 done
90 rm -f $i
91 done
92}
93addtask rm_work after do_${RMWORK_ORIG_TASK}
94
95do_rm_work_all () {
96 :
97}
98do_rm_work_all[recrdeptask] = "do_rm_work"
99addtask rm_work_all after do_rm_work
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
new file mode 100644
index 0000000000..a42a472822
--- /dev/null
+++ b/meta/classes/rootfs_deb.bbclass
@@ -0,0 +1,24 @@
1#
2# Copyright 2006-2007 Openedhand Ltd.
3#
4
5ROOTFS_PKGMANAGE = "dpkg apt"
6ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
7
8do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
9do_rootfs[recrdeptask] += "do_package_write_deb"
10rootfs_deb_do_rootfs[vardepsexclude] += "BUILDNAME"
11do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
12
13do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
14
15python rootfs_deb_bad_recommendations() {
16 if d.getVar("BAD_RECOMMENDATIONS", True):
17 bb.warn("Debian package install does not support BAD_RECOMMENDATIONS")
18}
19do_rootfs[prefuncs] += "rootfs_deb_bad_recommendations"
20
21DEB_POSTPROCESS_COMMANDS = ""
22
23opkglibdir = "${localstatedir}/lib/opkg"
24
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
new file mode 100644
index 0000000000..f5fef00166
--- /dev/null
+++ b/meta/classes/rootfs_ipk.bbclass
@@ -0,0 +1,38 @@
1#
2# Creates a root filesystem out of IPKs
3#
4# This rootfs can be mounted via root-nfs or it can be put into an cramfs/jffs etc.
5# See image.bbclass for a usage of this.
6#
7
8EXTRAOPKGCONFIG ?= ""
9ROOTFS_PKGMANAGE = "opkg opkg-collateral ${EXTRAOPKGCONFIG}"
10ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
11
12do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
13do_rootfs[recrdeptask] += "do_package_write_ipk"
14do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
15rootfs_ipk_do_rootfs[vardepsexclude] += "BUILDNAME"
16
17do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
18
19OPKG_PREPROCESS_COMMANDS = ""
20
21OPKG_POSTPROCESS_COMMANDS = ""
22
23OPKGLIBDIR = "${localstatedir}/lib"
24
25MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg"
26
27python () {
28
29 if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
30 flags = d.getVarFlag('do_rootfs', 'recrdeptask')
31 flags = flags.replace("do_package_write_ipk", "")
32 flags = flags.replace("do_deploy", "")
33 flags = flags.replace("do_populate_sysroot", "")
34 d.setVarFlag('do_rootfs', 'recrdeptask', flags)
35 d.setVar('OPKG_PREPROCESS_COMMANDS', "")
36 d.setVar('OPKG_POSTPROCESS_COMMANDS', '')
37}
38
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
new file mode 100644
index 0000000000..4b02247d49
--- /dev/null
+++ b/meta/classes/rootfs_rpm.bbclass
@@ -0,0 +1,42 @@
1#
2# Creates a root filesystem out of rpm packages
3#
4
5ROOTFS_PKGMANAGE = "rpm smartpm"
6ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
7
8# Add 50Meg of extra space for Smart
9IMAGE_ROOTFS_EXTRA_SPACE_append = "${@base_contains("PACKAGE_INSTALL", "smartpm", " + 51200", "" ,d)}"
10
11# Smart is python based, so be sure python-native is available to us.
12EXTRANATIVEPATH += "python-native"
13
14do_rootfs[depends] += "rpm-native:do_populate_sysroot"
15do_rootfs[depends] += "rpmresolve-native:do_populate_sysroot"
16do_rootfs[depends] += "python-smartpm-native:do_populate_sysroot"
17
18# Needed for update-alternatives
19do_rootfs[depends] += "opkg-native:do_populate_sysroot"
20
21# Creating the repo info in do_rootfs
22do_rootfs[depends] += "createrepo-native:do_populate_sysroot"
23
24do_rootfs[recrdeptask] += "do_package_write_rpm"
25rootfs_rpm_do_rootfs[vardepsexclude] += "BUILDNAME"
26do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
27
28# RPM doesn't work with multiple rootfs generation at once due to collisions in the use of files
29# in ${DEPLOY_DIR_RPM}. This can be removed if package_update_index_rpm can be called concurrently
30do_rootfs[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
31
32python () {
33 if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
34 flags = d.getVarFlag('do_rootfs', 'recrdeptask')
35 flags = flags.replace("do_package_write_rpm", "")
36 flags = flags.replace("do_deploy", "")
37 flags = flags.replace("do_populate_sysroot", "")
38 d.setVarFlag('do_rootfs', 'recrdeptask', flags)
39 d.setVar('RPM_PREPROCESS_COMMANDS', '')
40 d.setVar('RPM_POSTPROCESS_COMMANDS', '')
41
42}
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
new file mode 100644
index 0000000000..989bdcd7cd
--- /dev/null
+++ b/meta/classes/sanity.bbclass
@@ -0,0 +1,800 @@
1#
2# Sanity check the users setup for common misconfigurations
3#
4
5SANITY_REQUIRED_UTILITIES ?= "patch diffstat makeinfo git bzip2 tar \
6 gzip gawk chrpath wget cpio perl"
7
8def bblayers_conf_file(d):
9 return os.path.join(d.getVar('TOPDIR', True), 'conf/bblayers.conf')
10
11def sanity_conf_read(fn):
12 with open(fn, 'r') as f:
13 lines = f.readlines()
14 return lines
15
16def sanity_conf_find_line(pattern, lines):
17 import re
18 return next(((index, line)
19 for index, line in enumerate(lines)
20 if re.search(pattern, line)), (None, None))
21
22def sanity_conf_update(fn, lines, version_var_name, new_version):
23 index, line = sanity_conf_find_line(version_var_name, lines)
24 lines[index] = '%s = "%d"\n' % (version_var_name, new_version)
25 with open(fn, "w") as f:
26 f.write(''.join(lines))
27
28# Functions added to this variable MUST throw an exception (or sys.exit()) unless they
29# successfully changed LCONF_VERSION in bblayers.conf
30BBLAYERS_CONF_UPDATE_FUNCS += "oecore_update_bblayers"
31
32python oecore_update_bblayers() {
33 # bblayers.conf is out of date, so see if we can resolve that
34
35 current_lconf = int(d.getVar('LCONF_VERSION', True))
36 if not current_lconf:
37 sys.exit()
38 lconf_version = int(d.getVar('LAYER_CONF_VERSION', True))
39 lines = []
40
41 if current_lconf < 4:
42 sys.exit()
43
44 bblayers_fn = bblayers_conf_file(d)
45 lines = sanity_conf_read(bblayers_fn)
46
47 if current_lconf == 4 and lconf_version > 4:
48 topdir_var = '$' + '{TOPDIR}'
49 index, bbpath_line = sanity_conf_find_line('BBPATH', lines)
50 if bbpath_line:
51 start = bbpath_line.find('"')
52 if start != -1 and (len(bbpath_line) != (start + 1)):
53 if bbpath_line[start + 1] == '"':
54 lines[index] = (bbpath_line[:start + 1] +
55 topdir_var + bbpath_line[start + 1:])
56 else:
57 if not topdir_var in bbpath_line:
58 lines[index] = (bbpath_line[:start + 1] +
59 topdir_var + ':' + bbpath_line[start + 1:])
60 else:
61 sys.exit()
62 else:
63 index, bbfiles_line = sanity_conf_find_line('BBFILES', lines)
64 if bbfiles_line:
65 lines.insert(index, 'BBPATH = "' + topdir_var + '"\n')
66 else:
67 sys.exit()
68
69 current_lconf += 1
70 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
71 return
72
73 sys.exit()
74}
75
76def raise_sanity_error(msg, d, network_error=False):
77 if d.getVar("SANITY_USE_EVENTS", True) == "1":
78 try:
79 bb.event.fire(bb.event.SanityCheckFailed(msg, network_error), d)
80 except TypeError:
81 bb.event.fire(bb.event.SanityCheckFailed(msg), d)
82 return
83
84 bb.fatal(""" OE-core's config sanity checker detected a potential misconfiguration.
85 Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
86 Following is the list of potential problems / advisories:
87
88 %s""" % msg)
89
90# Check a single tune for validity.
91def check_toolchain_tune(data, tune, multilib):
92 tune_errors = []
93 if not tune:
94 return "No tuning found for %s multilib." % multilib
95 bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
96 features = (data.getVar("TUNE_FEATURES_tune-%s" % tune, True) or "").split()
97 if not features:
98 return "Tuning '%s' has no defined features, and cannot be used." % tune
99 valid_tunes = data.getVarFlags('TUNEVALID') or {}
100 conflicts = data.getVarFlags('TUNECONFLICTS') or {}
101 # [doc] is the documentation for the variable, not a real feature
102 if 'doc' in valid_tunes:
103 del valid_tunes['doc']
104 if 'doc' in conflicts:
105 del conflicts['doc']
106 for feature in features:
107 if feature in conflicts:
108 for conflict in conflicts[feature].split():
109 if conflict in features:
110 tune_errors.append("Feature '%s' conflicts with '%s'." %
111 (feature, conflict))
112 if feature in valid_tunes:
113 bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
114 else:
115 tune_errors.append("Feature '%s' is not defined." % feature)
116 whitelist = data.getVar("TUNEABI_WHITELIST", True) or ''
117 override = data.getVar("TUNEABI_OVERRIDE", True) or ''
118 if whitelist:
119 tuneabi = data.getVar("TUNEABI_tune-%s" % tune, True) or ''
120 if not tuneabi:
121 tuneabi = tune
122 if True not in [x in whitelist.split() for x in tuneabi.split()]:
123 tune_errors.append("Tuning '%s' (%s) cannot be used with any supported tuning/ABI." %
124 (tune, tuneabi))
125 if tune_errors:
126 return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
127
128def check_toolchain(data):
129 tune_error_set = []
130 deftune = data.getVar("DEFAULTTUNE", True)
131 tune_errors = check_toolchain_tune(data, deftune, 'default')
132 if tune_errors:
133 tune_error_set.append(tune_errors)
134
135 multilibs = (data.getVar("MULTILIB_VARIANTS", True) or "").split()
136 global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS", True) or "").split()
137
138 if multilibs:
139 seen_libs = []
140 seen_tunes = []
141 for lib in multilibs:
142 if lib in seen_libs:
143 tune_error_set.append("The multilib '%s' appears more than once." % lib)
144 else:
145 seen_libs.append(lib)
146 if not lib in global_multilibs:
147 tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
148 tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib, True)
149 if tune in seen_tunes:
150 tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
151 else:
152 seen_libs.append(tune)
153 if tune == deftune:
154 tune_error_set.append("Multilib '%s' (%s) is also the default tuning." % (lib, deftune))
155 else:
156 tune_errors = check_toolchain_tune(data, tune, lib)
157 if tune_errors:
158 tune_error_set.append(tune_errors)
159 if tune_error_set:
160 return "Toolchain tunings invalid:\n" + '\n'.join(tune_error_set) + "\n"
161
162 return ""
163
164def check_conf_exists(fn, data):
165 bbpath = []
166 fn = data.expand(fn)
167 vbbpath = data.getVar("BBPATH")
168 if vbbpath:
169 bbpath += vbbpath.split(":")
170 for p in bbpath:
171 currname = os.path.join(data.expand(p), fn)
172 if os.access(currname, os.R_OK):
173 return True
174 return False
175
176def check_create_long_filename(filepath, pathname):
177 import string, random
178 testfile = os.path.join(filepath, ''.join(random.choice(string.ascii_letters) for x in range(200)))
179 try:
180 if not os.path.exists(filepath):
181 bb.utils.mkdirhier(filepath)
182 f = open(testfile, "w")
183 f.close()
184 os.remove(testfile)
185 except IOError as e:
186 import errno
187 err, strerror = e.args
188 if err == errno.ENAMETOOLONG:
189 return "Failed to create a file with a long name in %s. Please use a filesystem that does not unreasonably limit filename length.\n" % pathname
190 else:
191 return "Failed to create a file in %s: %s.\n" % (pathname, strerror)
192 except OSError as e:
193 errno, strerror = e.args
194 return "Failed to create %s directory in which to run long name sanity check: %s.\n" % (pathname, strerror)
195 return ""
196
197def check_path_length(filepath, pathname, limit):
198 if len(filepath) > limit:
199 return "The length of %s is longer than 410, this would cause unexpected errors, please use a shorter path.\n" % pathname
200 return ""
201
202def get_filesystem_id(path):
203 status, result = oe.utils.getstatusoutput("stat -f -c '%s' %s" % ("%t", path))
204 if status == 0:
205 return result
206 else:
207 bb.warn("Can't get the filesystem id of: %s" % path)
208 return None
209
210# Check that the path isn't located on nfs.
211def check_not_nfs(path, name):
212 # The nfs' filesystem id is 6969
213 if get_filesystem_id(path) == "6969":
214 return "The %s: %s can't be located on nfs.\n" % (name, path)
215 return ""
216
217def check_connectivity(d):
218 # URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable
219 # using the same syntax as for SRC_URI. If the variable is not set
220 # the check is skipped
221 test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS', True) or "").split()
222 retval = ""
223
224 # Only check connectivity if network enabled and the
225 # CONNECTIVITY_CHECK_URIS are set
226 network_enabled = not d.getVar('BB_NO_NETWORK', True)
227 check_enabled = len(test_uris)
228 # Take a copy of the data store and unset MIRRORS and PREMIRROS
229 data = bb.data.createCopy(d)
230 data.delVar('PREMIRRORS')
231 data.delVar('MIRRORS')
232 if check_enabled and network_enabled:
233 try:
234 fetcher = bb.fetch2.Fetch(test_uris, data)
235 fetcher.checkstatus()
236 except Exception:
237 # Allow the message to be configured so that users can be
238 # pointed to a support mechanism.
239 msg = data.getVar('CONNECTIVITY_CHECK_MSG', True) or ""
240 if len(msg) == 0:
241 msg = "Failed to fetch test data from the network. Please ensure your network is configured correctly.\n"
242 retval = msg
243
244 return retval
245
246def check_supported_distro(sanity_data):
247 from fnmatch import fnmatch
248
249 tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS', True)
250 if not tested_distros:
251 return
252
253 try:
254 distro = oe.lsb.distro_identifier()
255 except Exception:
256 distro = None
257
258 if not distro:
259 bb.warn('Host distribution could not be determined; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.')
260
261 for supported in [x.strip() for x in tested_distros.split('\\n')]:
262 if fnmatch(distro, supported):
263 return
264
265 bb.warn('Host distribution "%s" has not been validated with this version of the build system; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.' % distro)
266
267# Checks we should only make if MACHINE is set correctly
268def check_sanity_validmachine(sanity_data):
269 messages = ""
270
271 # Check TUNE_ARCH is set
272 if sanity_data.getVar('TUNE_ARCH', True) == 'INVALID':
273 messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n'
274
275 # Check TARGET_OS is set
276 if sanity_data.getVar('TARGET_OS', True) == 'INVALID':
277 messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
278
279 # Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
280 pkgarchs = sanity_data.getVar('PACKAGE_ARCHS', True)
281 tunepkg = sanity_data.getVar('TUNE_PKGARCH', True)
282 tunefound = False
283 seen = {}
284 dups = []
285
286 for pa in pkgarchs.split():
287 if seen.get(pa, 0) == 1:
288 dups.append(pa)
289 else:
290 seen[pa] = 1
291 if pa == tunepkg:
292 tunefound = True
293
294 if len(dups):
295 messages = messages + "Error, the PACKAGE_ARCHS variable contains duplicates. The following archs are listed more than once: %s" % " ".join(dups)
296
297 if tunefound == False:
298 messages = messages + "Error, the PACKAGE_ARCHS variable does not contain TUNE_PKGARCH (%s)." % tunepkg
299
300 return messages
301
302# Checks if necessary to add option march to host gcc
303def check_gcc_march(sanity_data):
304 result = True
305 message = ""
306
307 # Check if -march not in BUILD_CFLAGS
308 if sanity_data.getVar("BUILD_CFLAGS",True).find("-march") < 0:
309 result = False
310
311 # Construct a test file
312 f = open("gcc_test.c", "w")
313 f.write("int main (){ volatile int atomic = 2; __sync_bool_compare_and_swap (&atomic, 2, 3); return 0; }\n")
314 f.close()
315
316 # Check if GCC could work without march
317 if not result:
318 status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc gcc_test.c -o gcc_test")
319 if status == 0:
320 result = True;
321
322 if not result:
323 status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc -march=native gcc_test.c -o gcc_test")
324 if status == 0:
325 message = "BUILD_CFLAGS_append = \" -march=native\""
326 result = True;
327
328 if not result:
329 build_arch = sanity_data.getVar('BUILD_ARCH', True)
330 status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc -march=%s gcc_test.c -o gcc_test" % build_arch)
331 if status == 0:
332 message = "BUILD_CFLAGS_append = \" -march=%s\"" % build_arch
333 result = True;
334
335 os.remove("gcc_test.c")
336 if os.path.exists("gcc_test"):
337 os.remove("gcc_test")
338
339 return (result, message)
340
341# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612.
342# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate.
343def check_make_version(sanity_data):
344 from distutils.version import LooseVersion
345 status, result = oe.utils.getstatusoutput("make --version")
346 if status != 0:
347 return "Unable to execute make --version, exit code %s\n" % status
348 version = result.split()[2]
349 if LooseVersion(version) == LooseVersion("3.82"):
350 # Construct a test file
351 f = open("makefile_test", "w")
352 f.write("makefile_test.a: makefile_test_a.c makefile_test_b.c makefile_test.a( makefile_test_a.c makefile_test_b.c)\n")
353 f.write("\n")
354 f.write("makefile_test_a.c:\n")
355 f.write(" touch $@\n")
356 f.write("\n")
357 f.write("makefile_test_b.c:\n")
358 f.write(" touch $@\n")
359 f.close()
360
361 # Check if make 3.82 has been patched
362 status,result = oe.utils.getstatusoutput("make -f makefile_test")
363
364 os.remove("makefile_test")
365 if os.path.exists("makefile_test_a.c"):
366 os.remove("makefile_test_a.c")
367 if os.path.exists("makefile_test_b.c"):
368 os.remove("makefile_test_b.c")
369 if os.path.exists("makefile_test.a"):
370 os.remove("makefile_test.a")
371
372 if status != 0:
373 return "Your version of make 3.82 is broken. Please revert to 3.81 or install a patched version.\n"
374 return None
375
376
377# Tar version 1.24 and onwards handle overwriting symlinks correctly
378# but earlier versions do not; this needs to work properly for sstate
379def check_tar_version(sanity_data):
380 from distutils.version import LooseVersion
381 status, result = oe.utils.getstatusoutput("tar --version")
382 if status != 0:
383 return "Unable to execute tar --version, exit code %s\n" % status
384 version = result.split()[3]
385 if LooseVersion(version) < LooseVersion("1.24"):
386 return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar.\n"
387 return None
388
389# We use git parameters and functionality only found in 1.7.5 or later
390def check_git_version(sanity_data):
391 from distutils.version import LooseVersion
392 status, result = oe.utils.getstatusoutput("git --version 2> /dev/null")
393 if status != 0:
394 return "Unable to execute git --version, exit code %s\n" % status
395 version = result.split()[2]
396 if LooseVersion(version) < LooseVersion("1.7.5"):
397 return "Your version of git is older than 1.7.5 and has bugs which will break builds. Please install a newer version of git.\n"
398 return None
399
400# Check the required perl modules which may not be installed by default
401def check_perl_modules(sanity_data):
402 ret = ""
403 modules = ( "Text::ParseWords", "Thread::Queue", "Data::Dumper" )
404 for m in modules:
405 status, result = oe.utils.getstatusoutput("perl -e 'use %s' 2> /dev/null" % m)
406 if status != 0:
407 ret += "%s " % m
408 if ret:
409 return "Required perl module(s) not found: %s\n" % ret
410 return None
411
412def sanity_check_conffiles(status, d):
413 # Check we are using a valid local.conf
414 current_conf = d.getVar('CONF_VERSION', True)
415 conf_version = d.getVar('LOCALCONF_VERSION', True)
416
417 if current_conf != conf_version:
418 status.addresult("Your version of local.conf was generated from an older/newer version of local.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/local.conf ${COREBASE}/meta*/conf/local.conf.sample\" is a good way to visualise the changes.\n")
419
420 # Check bblayers.conf is valid
421 current_lconf = d.getVar('LCONF_VERSION', True)
422 lconf_version = d.getVar('LAYER_CONF_VERSION', True)
423 if current_lconf != lconf_version:
424 funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS', True).split()
425 for func in funcs:
426 success = True
427 try:
428 bb.build.exec_func(func, d)
429 except Exception:
430 success = False
431 if success:
432 bb.note("Your conf/bblayers.conf has been automatically updated.")
433 status.reparse = True
434 break
435 if not status.reparse:
436 status.addresult("Your version of bblayers.conf has the wrong LCONF_VERSION (has %s, expecting %s).\nPlease compare the your file against bblayers.conf.sample and merge any changes before continuing.\n\"meld conf/bblayers.conf ${COREBASE}/meta*/conf/bblayers.conf.sample\" is a good way to visualise the changes.\n" % (current_lconf, lconf_version))
437
438 # If we have a site.conf, check it's valid
439 if check_conf_exists("conf/site.conf", d):
440 current_sconf = d.getVar('SCONF_VERSION', True)
441 sconf_version = d.getVar('SITE_CONF_VERSION', True)
442 if current_sconf != sconf_version:
443 status.addresult("Your version of site.conf was generated from an older version of site.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/site.conf ${COREBASE}/meta*/conf/site.conf.sample\" is a good way to visualise the changes.\n")
444
445
446def sanity_handle_abichanges(status, d):
447 #
448 # Check the 'ABI' of TMPDIR
449 #
450 current_abi = d.getVar('OELAYOUT_ABI', True)
451 abifile = d.getVar('SANITY_ABIFILE', True)
452 if os.path.exists(abifile):
453 with open(abifile, "r") as f:
454 abi = f.read().strip()
455 if not abi.isdigit():
456 with open(abifile, "w") as f:
457 f.write(current_abi)
458 elif abi == "2" and current_abi == "3":
459 bb.note("Converting staging from layout version 2 to layout version 3")
460 subprocess.call(d.expand("mv ${TMPDIR}/staging ${TMPDIR}/sysroots"), shell=True)
461 subprocess.call(d.expand("ln -s sysroots ${TMPDIR}/staging"), shell=True)
462 subprocess.call(d.expand("cd ${TMPDIR}/stamps; for i in */*do_populate_staging; do new=`echo $i | sed -e 's/do_populate_staging/do_populate_sysroot/'`; mv $i $new; done"), shell=True)
463 with open(abifile, "w") as f:
464 f.write(current_abi)
465 elif abi == "3" and current_abi == "4":
466 bb.note("Converting staging layout from version 3 to layout version 4")
467 if os.path.exists(d.expand("${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}")):
468 subprocess.call(d.expand("mv ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS} ${STAGING_BINDIR_CROSS}"), shell=True)
469 subprocess.call(d.expand("ln -s ${STAGING_BINDIR_CROSS} ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}"), shell=True)
470 with open(abifile, "w") as f:
471 f.write(current_abi)
472 elif abi == "4":
473 status.addresult("Staging layout has changed. The cross directory has been deprecated and cross packages are now built under the native sysroot.\nThis requires a rebuild.\n")
474 elif abi == "5" and current_abi == "6":
475 bb.note("Converting staging layout from version 5 to layout version 6")
476 subprocess.call(d.expand("mv ${TMPDIR}/pstagelogs ${SSTATE_MANIFESTS}"), shell=True)
477 with open(abifile, "w") as f:
478 f.write(current_abi)
479 elif abi == "7" and current_abi == "8":
480 status.addresult("Your configuration is using stamp files including the sstate hash but your build directory was built with stamp files that do not include this.\nTo continue, either rebuild or switch back to the OEBasic signature handler with BB_SIGNATURE_HANDLER = 'OEBasic'.\n")
481 elif (abi != current_abi and current_abi == "9"):
482 status.addresult("The layout of the TMPDIR STAMPS directory has changed. Please clean out TMPDIR and rebuild (sstate will be still be valid and reused)\n")
483 elif (abi != current_abi):
484 # Code to convert from one ABI to another could go here if possible.
485 status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
486 else:
487 with open(abifile, "w") as f:
488 f.write(current_abi)
489
490def check_sanity_sstate_dir_change(sstate_dir, data):
491 # Sanity checks to be done when the value of SSTATE_DIR changes
492
493 # Check that SSTATE_DIR isn't on a filesystem with limited filename length (eg. eCryptFS)
494 testmsg = ""
495 if sstate_dir != "":
496 testmsg = check_create_long_filename(sstate_dir, "SSTATE_DIR")
497 # If we don't have permissions to SSTATE_DIR, suggest the user set it as an SSTATE_MIRRORS
498 try:
499 err = testmsg.split(': ')[1].strip()
500 if err == "Permission denied.":
501 testmsg = testmsg + "You could try using %s in SSTATE_MIRRORS rather than as an SSTATE_CACHE.\n" % (sstate_dir)
502 except IndexError:
503 pass
504 return testmsg
505
506def check_sanity_version_change(status, d):
507 # Sanity checks to be done when SANITY_VERSION changes
508 # In other words, these tests run once in a given build directory and then
509 # never again until the sanity version changes.
510
511 # Check the python install is complete. glib-2.0-natives requries
512 # xml.parsers.expat
513 try:
514 import xml.parsers.expat
515 except ImportError:
516 status.addresult('Your python is not a full install. Please install the module xml.parsers.expat (python-xml on openSUSE and SUSE Linux).\n')
517
518 status.addresult(check_make_version(d))
519 status.addresult(check_tar_version(d))
520 status.addresult(check_git_version(d))
521 status.addresult(check_perl_modules(d))
522
523 missing = ""
524
525 if not check_app_exists("${MAKE}", d):
526 missing = missing + "GNU make,"
527
528 if not check_app_exists('${BUILD_PREFIX}gcc', d):
529 missing = missing + "C Compiler (%sgcc)," % d.getVar("BUILD_PREFIX", True)
530
531 if not check_app_exists('${BUILD_PREFIX}g++', d):
532 missing = missing + "C++ Compiler (%sg++)," % d.getVar("BUILD_PREFIX", True)
533
534 required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES', True)
535
536 for util in required_utilities.split():
537 if not check_app_exists(util, d):
538 missing = missing + "%s," % util
539
540 if missing:
541 missing = missing.rstrip(',')
542 status.addresult("Please install the following missing utilities: %s\n" % missing)
543
544 assume_provided = d.getVar('ASSUME_PROVIDED', True).split()
545 # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
546 if "diffstat-native" not in assume_provided:
547 status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
548
549 if "qemu-native" in assume_provided:
550 if not check_app_exists("qemu-arm", d):
551 status.addresult("qemu-native was in ASSUME_PROVIDED but the QEMU binaries (qemu-arm) can't be found in PATH")
552
553 (result, message) = check_gcc_march(d)
554 if result and message:
555 status.addresult("Your gcc version is older than 4.5, please add the following param to local.conf\n \
556 %s\n" % message)
557 if not result:
558 status.addresult("Your gcc version is older than 4.5 or is not working properly. Please verify you can build")
559 status.addresult(" and link something that uses atomic operations, such as: \n")
560 status.addresult(" __sync_bool_compare_and_swap (&atomic, 2, 3);\n")
561
562 # Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
563 tmpdir = d.getVar('TMPDIR', True)
564 status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
565
566 # Some third-party software apparently relies on chmod etc. being suid root (!!)
567 import stat
568 suid_check_bins = "chown chmod mknod".split()
569 for bin_cmd in suid_check_bins:
570 bin_path = bb.utils.which(os.environ["PATH"], bin_cmd)
571 if bin_path:
572 bin_stat = os.stat(bin_path)
573 if bin_stat.st_uid == 0 and bin_stat.st_mode & stat.S_ISUID:
574 status.addresult('%s has the setuid bit set. This interferes with pseudo and may cause other issues that break the build process.\n' % bin_path)
575
576 # Check that we can fetch from various network transports
577 netcheck = check_connectivity(d)
578 status.addresult(netcheck)
579 if netcheck:
580 status.network_error = True
581
582 nolibs = d.getVar('NO32LIBS', True)
583 if not nolibs:
584 lib32path = '/lib'
585 if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ):
586 lib32path = '/lib32'
587
588 if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'):
589 status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n")
590
591 bbpaths = d.getVar('BBPATH', True).split(":")
592 if ("." in bbpaths or "" in bbpaths) and not status.reparse:
593 status.addresult("BBPATH references the current directory, either through " \
594 "an empty entry, or a '.'.\n\t This is unsafe and means your "\
595 "layer configuration is adding empty elements to BBPATH.\n\t "\
596 "Please check your layer.conf files and other BBPATH " \
597 "settings to remove the current working directory " \
598 "references.\n" \
599 "Parsed BBPATH is" + str(bbpaths));
600
601 oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF', True)
602 if not oes_bb_conf:
603 status.addresult('You are not using the OpenEmbedded version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n')
604
605 # The length of TMPDIR can't be longer than 410
606 status.addresult(check_path_length(tmpdir, "TMPDIR", 410))
607
608 # Check that TMPDIR isn't located on nfs
609 status.addresult(check_not_nfs(tmpdir, "TMPDIR"))
610
611def check_sanity_everybuild(status, d):
612 # Sanity tests which test the users environment so need to run at each build (or are so cheap
613 # it makes sense to always run them.
614
615 if 0 == os.getuid():
616 raise_sanity_error("Do not use Bitbake as root.", d)
617
618 # Check the Python version, we now have a minimum of Python 2.7.3
619 import sys
620 if sys.hexversion < 0x020703F0:
621 status.addresult('The system requires at least Python 2.7.3 to run. Please update your Python interpreter.\n')
622
623 # Check the bitbake version meets minimum requirements
624 from distutils.version import LooseVersion
625 minversion = d.getVar('BB_MIN_VERSION', True)
626 if (LooseVersion(bb.__version__) < LooseVersion(minversion)):
627 status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
628
629 sanity_check_conffiles(status, d)
630
631 paths = d.getVar('PATH', True).split(":")
632 if "." in paths or "" in paths:
633 status.addresult("PATH contains '.' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
634
635 # Check that the DISTRO is valid, if set
636 # need to take into account DISTRO renaming DISTRO
637 distro = d.getVar('DISTRO', True)
638 if distro and distro != "nodistro":
639 if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ):
640 status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO", True))
641
642 # Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
643 # set, since so much relies on it being set.
644 dldir = d.getVar('DL_DIR', True)
645 if not dldir:
646 status.addresult("DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n")
647 if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
648 status.addresult("DL_DIR: %s exists but you do not appear to have write access to it. \n" % dldir)
649
650 # Check that the MACHINE is valid, if it is set
651 machinevalid = True
652 if d.getVar('MACHINE', True):
653 if not check_conf_exists("conf/machine/${MACHINE}.conf", d):
654 status.addresult('Please set a valid MACHINE in your local.conf or environment\n')
655 machinevalid = False
656 else:
657 status.addresult(check_sanity_validmachine(d))
658 else:
659 status.addresult('Please set a MACHINE in your local.conf or environment\n')
660 machinevalid = False
661 if machinevalid:
662 status.addresult(check_toolchain(d))
663
664 # Check that the SDKMACHINE is valid, if it is set
665 if d.getVar('SDKMACHINE', True):
666 if not check_conf_exists("conf/machine-sdk/${SDKMACHINE}.conf", d):
667 status.addresult('Specified SDKMACHINE value is not valid\n')
668 elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
669 status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
670
671 check_supported_distro(d)
672
673 # Check if DISPLAY is set if TEST_IMAGE is set
674 if d.getVar('TEST_IMAGE', True) == '1' or d.getVar('DEFAULT_TEST_SUITES', True):
675 testtarget = d.getVar('TEST_TARGET', True)
676 if testtarget == 'qemu' or testtarget == 'QemuTarget':
677 display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True)
678 if not display:
679 status.addresult('testimage needs an X desktop to start qemu, please set DISPLAY correctly (e.g. DISPLAY=:1.0)\n')
680
681 omask = os.umask(022)
682 if omask & 0755:
683 status.addresult("Please use a umask which allows a+rx and u+rwx\n")
684 os.umask(omask)
685
686 if d.getVar('TARGET_ARCH', True) == "arm":
687 # This path is no longer user-readable in modern (very recent) Linux
688 try:
689 if os.path.exists("/proc/sys/vm/mmap_min_addr"):
690 f = open("/proc/sys/vm/mmap_min_addr", "r")
691 try:
692 if (int(f.read().strip()) > 65536):
693 status.addresult("/proc/sys/vm/mmap_min_addr is not <= 65536. This will cause problems with qemu so please fix the value (as root).\n\nTo fix this in later reboots, set vm.mmap_min_addr = 65536 in /etc/sysctl.conf.\n")
694 finally:
695 f.close()
696 except:
697 pass
698
699 oeroot = d.getVar('COREBASE', True)
700 if oeroot.find('+') != -1:
701 status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.")
702 if oeroot.find('@') != -1:
703 status.addresult("Error, you have an invalid character (@) in your COREBASE directory path. Please move the installation to a directory which doesn't include any @ characters.")
704 if oeroot.find(' ') != -1:
705 status.addresult("Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this.")
706
707 # Check that TMPDIR hasn't changed location since the last time we were run
708 tmpdir = d.getVar('TMPDIR', True)
709 checkfile = os.path.join(tmpdir, "saved_tmpdir")
710 if os.path.exists(checkfile):
711 with open(checkfile, "r") as f:
712 saved_tmpdir = f.read().strip()
713 if (saved_tmpdir != tmpdir):
714 status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or rebuild\n" % saved_tmpdir)
715 else:
716 bb.utils.mkdirhier(tmpdir)
717 with open(checkfile, "w") as f:
718 f.write(tmpdir)
719
720def check_sanity(sanity_data):
721 import subprocess
722
723 class SanityStatus(object):
724 def __init__(self):
725 self.messages = ""
726 self.network_error = False
727 self.reparse = False
728
729 def addresult(self, message):
730 if message:
731 self.messages = self.messages + message
732
733 status = SanityStatus()
734
735 tmpdir = sanity_data.getVar('TMPDIR', True)
736 sstate_dir = sanity_data.getVar('SSTATE_DIR', True)
737
738 # Check saved sanity info
739 last_sanity_version = 0
740 last_tmpdir = ""
741 last_sstate_dir = ""
742 sanityverfile = sanity_data.expand("${TOPDIR}/conf/sanity_info")
743 if os.path.exists(sanityverfile):
744 with open(sanityverfile, 'r') as f:
745 for line in f:
746 if line.startswith('SANITY_VERSION'):
747 last_sanity_version = int(line.split()[1])
748 if line.startswith('TMPDIR'):
749 last_tmpdir = line.split()[1]
750 if line.startswith('SSTATE_DIR'):
751 last_sstate_dir = line.split()[1]
752
753 check_sanity_everybuild(status, sanity_data)
754
755 sanity_version = int(sanity_data.getVar('SANITY_VERSION', True) or 1)
756 network_error = False
757 if last_sanity_version < sanity_version:
758 check_sanity_version_change(status, sanity_data)
759 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
760 else:
761 if last_sstate_dir != sstate_dir:
762 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
763
764 if os.path.exists(os.path.dirname(sanityverfile)) and not status.messages:
765 with open(sanityverfile, 'w') as f:
766 f.write("SANITY_VERSION %s\n" % sanity_version)
767 f.write("TMPDIR %s\n" % tmpdir)
768 f.write("SSTATE_DIR %s\n" % sstate_dir)
769
770 sanity_handle_abichanges(status, sanity_data)
771
772 if status.messages != "":
773 raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error)
774 return status.reparse
775
776# Create a copy of the datastore and finalise it to ensure appends and
777# overrides are set - the datastore has yet to be finalised at ConfigParsed
778def copy_data(e):
779 sanity_data = bb.data.createCopy(e.data)
780 sanity_data.finalize()
781 return sanity_data
782
783addhandler check_sanity_eventhandler
784check_sanity_eventhandler[eventmask] = "bb.event.SanityCheck bb.event.NetworkTest"
785python check_sanity_eventhandler() {
786 if bb.event.getName(e) == "SanityCheck":
787 sanity_data = copy_data(e)
788 if e.generateevents:
789 sanity_data.setVar("SANITY_USE_EVENTS", "1")
790 reparse = check_sanity(sanity_data)
791 e.data.setVar("BB_INVALIDCONF", reparse)
792 bb.event.fire(bb.event.SanityCheckPassed(), e.data)
793 elif bb.event.getName(e) == "NetworkTest":
794 sanity_data = copy_data(e)
795 if e.generateevents:
796 sanity_data.setVar("SANITY_USE_EVENTS", "1")
797 bb.event.fire(bb.event.NetworkTestFailed() if check_connectivity(sanity_data) else bb.event.NetworkTestPassed(), e.data)
798
799 return
800}
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
new file mode 100644
index 0000000000..fc0f26b17b
--- /dev/null
+++ b/meta/classes/scons.bbclass
@@ -0,0 +1,15 @@
1DEPENDS += "python-scons-native"
2
3EXTRA_OESCONS ?= ""
4
5scons_do_compile() {
6 ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
7 bbfatal "scons build execution failed."
8}
9
10scons_do_install() {
11 ${STAGING_BINDIR_NATIVE}/scons PREFIX=${D}${prefix} prefix=${D}${prefix} install ${EXTRA_OESCONS}|| \
12 bbfatal "scons install execution failed."
13}
14
15EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/sdl.bbclass b/meta/classes/sdl.bbclass
new file mode 100644
index 0000000000..cc31288f61
--- /dev/null
+++ b/meta/classes/sdl.bbclass
@@ -0,0 +1,6 @@
1#
2# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
3#
4
5DEPENDS += "virtual/libsdl libsdl-mixer libsdl-image"
6SECTION = "x11/games"
diff --git a/meta/classes/setuptools.bbclass b/meta/classes/setuptools.bbclass
new file mode 100644
index 0000000000..56343b1c73
--- /dev/null
+++ b/meta/classes/setuptools.bbclass
@@ -0,0 +1,8 @@
1inherit distutils
2
3DEPENDS += "python-distribute-native"
4
5DISTUTILS_INSTALL_ARGS = "--root=${D} \
6 --prefix=${prefix} \
7 --install-lib=${PYTHON_SITEPACKAGES_DIR} \
8 --install-data=${datadir}"
diff --git a/meta/classes/setuptools3.bbclass b/meta/classes/setuptools3.bbclass
new file mode 100644
index 0000000000..40c18c8976
--- /dev/null
+++ b/meta/classes/setuptools3.bbclass
@@ -0,0 +1,8 @@
1inherit distutils3
2
3DEPENDS += "python3-distribute-native"
4
5DISTUTILS_INSTALL_ARGS = "--root=${D} \
6 --prefix=${prefix} \
7 --install-lib=${PYTHON_SITEPACKAGES_DIR} \
8 --install-data=${datadir}"
diff --git a/meta/classes/sip.bbclass b/meta/classes/sip.bbclass
new file mode 100644
index 0000000000..711f851593
--- /dev/null
+++ b/meta/classes/sip.bbclass
@@ -0,0 +1,63 @@
1# Build Class for Sip based Python Bindings
2# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
3#
4STAGING_SIPDIR ?= "${STAGING_DATADIR_NATIVE}/sip"
5
6DEPENDS =+ "sip-native"
7RDEPENDS_${PN} += "python-sip"
8
9# default stuff, do not uncomment
10# EXTRA_SIPTAGS = "-tWS_X11 -tQt_4_3_0"
11
12# do_generate is before do_configure so ensure that sip_native is populated in sysroot before executing it
13do_generate[depends] += "sip-native:do_populate_sysroot"
14
15sip_do_generate() {
16 if [ -z "${SIP_MODULES}" ]; then
17 MODULES="`ls sip/*mod.sip`"
18 else
19 MODULES="${SIP_MODULES}"
20 fi
21
22 if [ -z "$MODULES" ]; then
23 die "SIP_MODULES not set and no modules found in $PWD"
24 else
25 bbnote "using modules '${SIP_MODULES}' and tags '${EXTRA_SIPTAGS}'"
26 fi
27
28 if [ -z "${EXTRA_SIPTAGS}" ]; then
29 die "EXTRA_SIPTAGS needs to be set!"
30 else
31 SIPTAGS="${EXTRA_SIPTAGS}"
32 fi
33
34 if [ ! -z "${SIP_FEATURES}" ]; then
35 FEATURES="-z ${SIP_FEATURES}"
36 bbnote "sip feature file: ${SIP_FEATURES}"
37 fi
38
39 for module in $MODULES
40 do
41 install -d ${module}/
42 echo "calling 'sip4 -I sip -I ${STAGING_SIPDIR} ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.pro.in sip/${module}/${module}mod.sip'"
43 sip4 -I ${STAGING_SIPDIR} -I sip ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.sbf \
44 sip/${module}/${module}mod.sip || die "Error calling sip on ${module}"
45 cat ${module}/${module}.sbf | sed s,target,TARGET, \
46 | sed s,sources,SOURCES, \
47 | sed s,headers,HEADERS, \
48 | sed s,"moc_HEADERS =","HEADERS +=", \
49 >${module}/${module}.pro
50 echo "TEMPLATE=lib" >>${module}/${module}.pro
51 [ "${module}" = "qt" ] && echo "" >>${module}/${module}.pro
52 [ "${module}" = "qtcanvas" ] && echo "" >>${module}/${module}.pro
53 [ "${module}" = "qttable" ] && echo "" >>${module}/${module}.pro
54 [ "${module}" = "qwt" ] && echo "" >>${module}/${module}.pro
55 [ "${module}" = "qtpe" ] && echo "" >>${module}/${module}.pro
56 [ "${module}" = "qtpe" ] && echo "LIBS+=-lqpe" >>${module}/${module}.pro
57 true
58 done
59}
60
61EXPORT_FUNCTIONS do_generate
62
63addtask generate after do_unpack do_patch before do_configure
diff --git a/meta/classes/siteconfig.bbclass b/meta/classes/siteconfig.bbclass
new file mode 100644
index 0000000000..9a4d03b887
--- /dev/null
+++ b/meta/classes/siteconfig.bbclass
@@ -0,0 +1,33 @@
1python siteconfig_do_siteconfig () {
2 shared_state = sstate_state_fromvars(d)
3 if shared_state['task'] != 'populate_sysroot':
4 return
5 if not os.path.isdir(os.path.join(d.getVar('FILE_DIRNAME', True), 'site_config')):
6 bb.debug(1, "No site_config directory, skipping do_siteconfig")
7 return
8 bb.build.exec_func('do_siteconfig_gencache', d)
9 sstate_clean(shared_state, d)
10 sstate_install(shared_state, d)
11}
12
13EXTRASITECONFIG ?= ""
14
15siteconfig_do_siteconfig_gencache () {
16 mkdir -p ${WORKDIR}/site_config_${MACHINE}
17 gen-site-config ${FILE_DIRNAME}/site_config \
18 >${WORKDIR}/site_config_${MACHINE}/configure.ac
19 cd ${WORKDIR}/site_config_${MACHINE}
20 autoconf
21 rm -f ${PN}_cache
22 CONFIG_SITE="" ${EXTRASITECONFIG} ./configure ${CONFIGUREOPTS} --cache-file ${PN}_cache
23 sed -n -e "/ac_cv_c_bigendian/p" -e "/ac_cv_sizeof_/p" \
24 -e "/ac_cv_type_/p" -e "/ac_cv_header_/p" -e "/ac_cv_func_/p" \
25 < ${PN}_cache > ${PN}_config
26 mkdir -p ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
27 cp ${PN}_config ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
28
29}
30
31do_populate_sysroot[sstate-interceptfuncs] += "do_siteconfig "
32
33EXPORT_FUNCTIONS do_siteconfig do_siteconfig_gencache
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
new file mode 100644
index 0000000000..9ae2561c88
--- /dev/null
+++ b/meta/classes/siteinfo.bbclass
@@ -0,0 +1,151 @@
1# This class exists to provide information about the targets that
2# may be needed by other classes and/or recipes. If you add a new
3# target this will probably need to be updated.
4
5#
6# Returns information about 'what' for the named target 'target'
7# where 'target' == "<arch>-<os>"
8#
9# 'what' can be one of
10# * target: Returns the target name ("<arch>-<os>")
11# * endianess: Return "be" for big endian targets, "le" for little endian
12# * bits: Returns the bit size of the target, either "32" or "64"
13# * libc: Returns the name of the c library used by the target
14#
15# It is an error for the target not to exist.
16# If 'what' doesn't exist then an empty value is returned
17#
18def siteinfo_data(d):
19 archinfo = {
20 "allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
21 "aarch64": "endian-little bit-64 arm-common",
22 "aarch64_be": "endian-big bit-64 arm-common",
23 "arm": "endian-little bit-32 arm-common",
24 "armeb": "endian-big bit-32 arm-common",
25 "avr32": "endian-big bit-32 avr32-common",
26 "bfin": "endian-little bit-32 bfin-common",
27 "i386": "endian-little bit-32 ix86-common",
28 "i486": "endian-little bit-32 ix86-common",
29 "i586": "endian-little bit-32 ix86-common",
30 "i686": "endian-little bit-32 ix86-common",
31 "ia64": "endian-little bit-64",
32 "microblaze": "endian-big bit-32 microblaze-common",
33 "microblazeel": "endian-little bit-32 microblaze-common",
34 "mips": "endian-big bit-32 mips-common",
35 "mips64": "endian-big bit-64 mips-common",
36 "mips64el": "endian-little bit-64 mips-common",
37 "mipsel": "endian-little bit-32 mips-common",
38 "powerpc": "endian-big bit-32 powerpc-common",
39 "nios2": "endian-little bit-32 nios2-common",
40 "powerpc64": "endian-big bit-64 powerpc-common",
41 "ppc": "endian-big bit-32 powerpc-common",
42 "ppc64": "endian-big bit-64 powerpc-common",
43 "sh3": "endian-little bit-32 sh-common",
44 "sh4": "endian-little bit-32 sh-common",
45 "sparc": "endian-big bit-32",
46 "viac3": "endian-little bit-32 ix86-common",
47 "x86_64": "endian-little", # bitinfo specified in targetinfo
48 }
49 osinfo = {
50 "darwin": "common-darwin",
51 "darwin9": "common-darwin",
52 "linux": "common-linux common-glibc",
53 "linux-gnu": "common-linux common-glibc",
54 "linux-gnux32": "common-linux common-glibc",
55 "linux-gnun32": "common-linux common-glibc",
56 "linux-gnueabi": "common-linux common-glibc",
57 "linux-gnuspe": "common-linux common-glibc",
58 "linux-uclibc": "common-linux common-uclibc",
59 "linux-uclibceabi": "common-linux common-uclibc",
60 "linux-uclibcspe": "common-linux common-uclibc",
61 "uclinux-uclibc": "common-uclibc",
62 "cygwin": "common-cygwin",
63 "mingw32": "common-mingw",
64 }
65 targetinfo = {
66 "aarch64-linux-gnu": "aarch64-linux",
67 "aarch64_be-linux-gnu": "aarch64_be-linux",
68 "arm-linux-gnueabi": "arm-linux",
69 "arm-linux-uclibceabi": "arm-linux-uclibc",
70 "armeb-linux-gnueabi": "armeb-linux",
71 "armeb-linux-uclibceabi": "armeb-linux-uclibc",
72 "mips64-linux-gnun32": "mips-linux bit-32",
73 "mips64el-linux-gnun32": "mipsel-linux bit-32",
74 "powerpc-linux": "powerpc32-linux",
75 "powerpc-linux-uclibc": "powerpc-linux powerpc32-linux",
76 "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux",
77 "powerpc-linux-uclibcspe": "powerpc-linux powerpc32-linux powerpc-linux-uclibc",
78 "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
79 "powerpc64-linux": "powerpc-linux",
80 "x86_64-cygwin": "bit-64",
81 "x86_64-darwin": "bit-64",
82 "x86_64-darwin9": "bit-64",
83 "x86_64-linux": "bit-64",
84 "x86_64-linux-uclibc": "bit-64",
85 "x86_64-linux-gnu": "bit-64 x86_64-linux",
86 "x86_64-linux-gnux32": "bit-32 ix86-common x32-linux",
87 "x86_64-mingw32": "bit-64",
88 }
89
90 hostarch = d.getVar("HOST_ARCH", True)
91 hostos = d.getVar("HOST_OS", True)
92 target = "%s-%s" % (hostarch, hostos)
93
94 sitedata = []
95 if hostarch in archinfo:
96 sitedata.extend(archinfo[hostarch].split())
97 if hostos in osinfo:
98 sitedata.extend(osinfo[hostos].split())
99 if target in targetinfo:
100 sitedata.extend(targetinfo[target].split())
101 sitedata.append(target)
102 sitedata.append("common")
103
104 bb.debug(1, "SITE files %s" % sitedata);
105 return sitedata
106
107python () {
108 sitedata = set(siteinfo_data(d))
109 if "endian-little" in sitedata:
110 d.setVar("SITEINFO_ENDIANNESS", "le")
111 elif "endian-big" in sitedata:
112 d.setVar("SITEINFO_ENDIANNESS", "be")
113 else:
114 bb.error("Unable to determine endianness for architecture '%s'" %
115 d.getVar("HOST_ARCH", True))
116 bb.fatal("Please add your architecture to siteinfo.bbclass")
117
118 if "bit-32" in sitedata:
119 d.setVar("SITEINFO_BITS", "32")
120 elif "bit-64" in sitedata:
121 d.setVar("SITEINFO_BITS", "64")
122 else:
123 bb.error("Unable to determine bit size for architecture '%s'" %
124 d.getVar("HOST_ARCH", True))
125 bb.fatal("Please add your architecture to siteinfo.bbclass")
126}
127
128def siteinfo_get_files(d, no_cache = False):
129 sitedata = siteinfo_data(d)
130 sitefiles = ""
131 for path in d.getVar("BBPATH", True).split(":"):
132 for element in sitedata:
133 filename = os.path.join(path, "site", element)
134 if os.path.exists(filename):
135 sitefiles += filename + " "
136
137 if no_cache: return sitefiles
138
139 # Now check for siteconfig cache files
140 path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE', True)
141 if os.path.isdir(path_siteconfig):
142 for i in os.listdir(path_siteconfig):
143 filename = os.path.join(path_siteconfig, i)
144 sitefiles += filename + " "
145
146 return sitefiles
147
148#
149# Make some information available via variables
150#
151SITECONFIG_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
diff --git a/meta/classes/spdx.bbclass b/meta/classes/spdx.bbclass
new file mode 100644
index 0000000000..55ce3aff4f
--- /dev/null
+++ b/meta/classes/spdx.bbclass
@@ -0,0 +1,321 @@
1# This class integrates real-time license scanning, generation of SPDX standard
2# output and verifiying license info during the building process.
3# It is a combination of efforts from the OE-Core, SPDX and Fossology projects.
4#
5# For more information on FOSSology:
6# http://www.fossology.org
7#
8# For more information on FOSSologySPDX commandline:
9# https://github.com/spdx-tools/fossology-spdx/wiki/Fossology-SPDX-Web-API
10#
11# For more information on SPDX:
12# http://www.spdx.org
13#
14
15# SPDX file will be output to the path which is defined as[SPDX_MANIFEST_DIR]
16# in ./meta/conf/licenses.conf.
17
18SPDXOUTPUTDIR = "${WORKDIR}/spdx_output_dir"
19SPDXSSTATEDIR = "${WORKDIR}/spdx_sstate_dir"
20
21python do_spdx () {
22 import os, sys
23 import json
24
25 info = {}
26 info['workdir'] = (d.getVar('WORKDIR', True) or "")
27 info['sourcedir'] = (d.getVar('S', True) or "")
28 info['pn'] = (d.getVar( 'PN', True ) or "")
29 info['pv'] = (d.getVar( 'PV', True ) or "")
30 info['src_uri'] = (d.getVar( 'SRC_URI', True ) or "")
31 info['spdx_version'] = (d.getVar('SPDX_VERSION', True) or '')
32 info['data_license'] = (d.getVar('DATA_LICENSE', True) or '')
33
34 spdx_sstate_dir = (d.getVar('SPDXSSTATEDIR', True) or "")
35 manifest_dir = (d.getVar('SPDX_MANIFEST_DIR', True) or "")
36 info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" )
37 sstatefile = os.path.join(spdx_sstate_dir,
38 info['pn'] + info['pv'] + ".spdx" )
39 info['spdx_temp_dir'] = (d.getVar('SPDX_TEMP_DIR', True) or "")
40 info['tar_file'] = os.path.join( info['workdir'], info['pn'] + ".tar.gz" )
41
42
43 ## get everything from cache. use it to decide if
44 ## something needs to be rerun
45 cur_ver_code = get_ver_code( info['sourcedir'] )
46 cache_cur = False
47 if not os.path.exists( spdx_sstate_dir ):
48 bb.utils.mkdirhier( spdx_sstate_dir )
49 if not os.path.exists( info['spdx_temp_dir'] ):
50 bb.utils.mkdirhier( info['spdx_temp_dir'] )
51 if os.path.exists( sstatefile ):
52 ## cache for this package exists. read it in
53 cached_spdx = get_cached_spdx( sstatefile )
54
55 if cached_spdx['PackageVerificationCode'] == cur_ver_code:
56 bb.warn(info['pn'] + "'s ver code same as cache's. do nothing")
57 cache_cur = True
58 else:
59 local_file_info = setup_foss_scan( info,
60 True, cached_spdx['Files'] )
61 else:
62 local_file_info = setup_foss_scan( info, False, None )
63
64 if cache_cur:
65 spdx_file_info = cached_spdx['Files']
66 else:
67 ## setup fossology command
68 foss_server = (d.getVar('FOSS_SERVER', True) or "")
69 foss_flags = (d.getVar('FOSS_WGET_FLAGS', True) or "")
70 foss_command = "wget %s --post-file=%s %s"\
71 % (foss_flags,info['tar_file'],foss_server)
72
73 #bb.warn(info['pn'] + json.dumps(local_file_info))
74 foss_file_info = run_fossology( foss_command )
75 spdx_file_info = create_spdx_doc( local_file_info, foss_file_info )
76 ## write to cache
77 write_cached_spdx(sstatefile,cur_ver_code,spdx_file_info)
78
79 ## Get document and package level information
80 spdx_header_info = get_header_info(info, cur_ver_code, spdx_file_info)
81
82 ## CREATE MANIFEST
83 create_manifest(info,spdx_header_info,spdx_file_info)
84
85 ## clean up the temp stuff
86 remove_dir_tree( info['spdx_temp_dir'] )
87 if os.path.exists(info['tar_file']):
88 remove_file( info['tar_file'] )
89}
90addtask spdx after do_patch before do_configure
91
92def create_manifest(info,header,files):
93 with open(info['outfile'], 'w') as f:
94 f.write(header + '\n')
95 for chksum, block in files.iteritems():
96 for key, value in block.iteritems():
97 f.write(key + ": " + value)
98 f.write('\n')
99 f.write('\n')
100
101def get_cached_spdx( sstatefile ):
102 import json
103 cached_spdx_info = {}
104 with open( sstatefile, 'r' ) as f:
105 try:
106 cached_spdx_info = json.load(f)
107 except ValueError as e:
108 cached_spdx_info = None
109 return cached_spdx_info
110
111def write_cached_spdx( sstatefile, ver_code, files ):
112 import json
113 spdx_doc = {}
114 spdx_doc['PackageVerificationCode'] = ver_code
115 spdx_doc['Files'] = {}
116 spdx_doc['Files'] = files
117 with open( sstatefile, 'w' ) as f:
118 f.write(json.dumps(spdx_doc))
119
120def setup_foss_scan( info, cache, cached_files ):
121 import errno, shutil
122 import tarfile
123 file_info = {}
124 cache_dict = {}
125
126 for f_dir, f in list_files( info['sourcedir'] ):
127 full_path = os.path.join( f_dir, f )
128 abs_path = os.path.join(info['sourcedir'], full_path)
129 dest_dir = os.path.join( info['spdx_temp_dir'], f_dir )
130 dest_path = os.path.join( info['spdx_temp_dir'], full_path )
131 try:
132 stats = os.stat(abs_path)
133 except OSError as e:
134 bb.warn( "Stat failed" + str(e) + "\n")
135 continue
136
137 checksum = hash_file( abs_path )
138 mtime = time.asctime(time.localtime(stats.st_mtime))
139
140 ## retain cache information if it exists
141 file_info[checksum] = {}
142 if cache and checksum in cached_files:
143 file_info[checksum] = cached_files[checksum]
144 else:
145 file_info[checksum]['FileName'] = full_path
146
147 try:
148 os.makedirs( dest_dir )
149 except OSError as e:
150 if e.errno == errno.EEXIST and os.path.isdir(dest_dir):
151 pass
152 else:
153 bb.warn( "mkdir failed " + str(e) + "\n" )
154 continue
155
156 if(cache and checksum not in cached_files) or not cache:
157 try:
158 shutil.copyfile( abs_path, dest_path )
159 except shutil.Error as e:
160 bb.warn( str(e) + "\n" )
161 except IOError as e:
162 bb.warn( str(e) + "\n" )
163
164 with tarfile.open( info['tar_file'], "w:gz" ) as tar:
165 tar.add( info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir']) )
166 tar.close()
167
168 return file_info
169
170
171def remove_dir_tree( dir_name ):
172 import shutil
173 try:
174 shutil.rmtree( dir_name )
175 except:
176 pass
177
178def remove_file( file_name ):
179 try:
180 os.remove( file_name )
181 except OSError as e:
182 pass
183
184def list_files( dir ):
185 for root, subFolders, files in os.walk( dir ):
186 for f in files:
187 rel_root = os.path.relpath( root, dir )
188 yield rel_root, f
189 return
190
191def hash_file( file_name ):
192 try:
193 f = open( file_name, 'rb' )
194 data_string = f.read()
195 except:
196 return None
197 finally:
198 f.close()
199 sha1 = hash_string( data_string )
200 return sha1
201
202def hash_string( data ):
203 import hashlib
204 sha1 = hashlib.sha1()
205 sha1.update( data )
206 return sha1.hexdigest()
207
208def run_fossology( foss_command ):
209 import string, re
210 import subprocess
211
212 p = subprocess.Popen(foss_command.split(),
213 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
214 foss_output, foss_error = p.communicate()
215
216 records = []
217 records = re.findall('FileName:.*?</text>', foss_output, re.S)
218
219 file_info = {}
220 for rec in records:
221 rec = string.replace( rec, '\r', '' )
222 chksum = re.findall( 'FileChecksum: SHA1: (.*)\n', rec)[0]
223 file_info[chksum] = {}
224 file_info[chksum]['FileCopyrightText'] = re.findall( 'FileCopyrightText: '
225 + '(.*?</text>)', rec, re.S )[0]
226 fields = ['FileType','LicenseConcluded',
227 'LicenseInfoInFile','FileName']
228 for field in fields:
229 file_info[chksum][field] = re.findall(field + ': (.*)', rec)[0]
230
231 return file_info
232
233def create_spdx_doc( file_info, scanned_files ):
234 import json
235 ## push foss changes back into cache
236 for chksum, lic_info in scanned_files.iteritems():
237 if chksum in file_info:
238 file_info[chksum]['FileName'] = file_info[chksum]['FileName']
239 file_info[chksum]['FileType'] = lic_info['FileType']
240 file_info[chksum]['FileChecksum: SHA1'] = chksum
241 file_info[chksum]['LicenseInfoInFile'] = lic_info['LicenseInfoInFile']
242 file_info[chksum]['LicenseConcluded'] = lic_info['LicenseConcluded']
243 file_info[chksum]['FileCopyrightText'] = lic_info['FileCopyrightText']
244 else:
245 bb.warn(lic_info['FileName'] + " : " + chksum
246 + " : is not in the local file info: "
247 + json.dumps(lic_info,indent=1))
248 return file_info
249
250def get_ver_code( dirname ):
251 chksums = []
252 for f_dir, f in list_files( dirname ):
253 try:
254 stats = os.stat(os.path.join(dirname,f_dir,f))
255 except OSError as e:
256 bb.warn( "Stat failed" + str(e) + "\n")
257 continue
258 chksums.append(hash_file(os.path.join(dirname,f_dir,f)))
259 ver_code_string = ''.join( chksums ).lower()
260 ver_code = hash_string( ver_code_string )
261 return ver_code
262
263def get_header_info( info, spdx_verification_code, spdx_files ):
264 """
265 Put together the header SPDX information.
266 Eventually this needs to become a lot less
267 of a hardcoded thing.
268 """
269 from datetime import datetime
270 import os
271 head = []
272 DEFAULT = "NOASSERTION"
273
274 #spdx_verification_code = get_ver_code( info['sourcedir'] )
275 package_checksum = ''
276 if os.path.exists(info['tar_file']):
277 package_checksum = hash_file( info['tar_file'] )
278 else:
279 package_checksum = DEFAULT
280
281 ## document level information
282 head.append("SPDXVersion: " + info['spdx_version'])
283 head.append("DataLicense: " + info['data_license'])
284 head.append("DocumentComment: <text>SPDX for "
285 + info['pn'] + " version " + info['pv'] + "</text>")
286 head.append("")
287
288 ## Creator information
289 now = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
290 head.append("## Creation Information")
291 head.append("Creator: fossology-spdx")
292 head.append("Created: " + now)
293 head.append("CreatorComment: <text>UNO</text>")
294 head.append("")
295
296 ## package level information
297 head.append("## Package Information")
298 head.append("PackageName: " + info['pn'])
299 head.append("PackageVersion: " + info['pv'])
300 head.append("PackageDownloadLocation: " + DEFAULT)
301 head.append("PackageSummary: <text></text>")
302 head.append("PackageFileName: " + os.path.basename(info['tar_file']))
303 head.append("PackageSupplier: Person:" + DEFAULT)
304 head.append("PackageOriginator: Person:" + DEFAULT)
305 head.append("PackageChecksum: SHA1: " + package_checksum)
306 head.append("PackageVerificationCode: " + spdx_verification_code)
307 head.append("PackageDescription: <text>" + info['pn']
308 + " version " + info['pv'] + "</text>")
309 head.append("")
310 head.append("PackageCopyrightText: <text>" + DEFAULT + "</text>")
311 head.append("")
312 head.append("PackageLicenseDeclared: " + DEFAULT)
313 head.append("PackageLicenseConcluded: " + DEFAULT)
314 head.append("PackageLicenseInfoFromFiles: " + DEFAULT)
315 head.append("")
316
317 ## header for file level
318 head.append("## File Information")
319 head.append("")
320
321 return '\n'.join(head)
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
new file mode 100644
index 0000000000..7b16bc04d5
--- /dev/null
+++ b/meta/classes/sstate.bbclass
@@ -0,0 +1,798 @@
1SSTATE_VERSION = "3"
2
3SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
4SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
5
6def generate_sstatefn(spec, hash, d):
7 if not hash:
8 hash = "INVALID"
9 return hash[:2] + "/" + spec + hash
10
11SSTATE_PKGARCH = "${PACKAGE_ARCH}"
12SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
13SSTATE_SWSPEC = "sstate:${BPN}::${PV}:${PR}::${SSTATE_VERSION}:"
14SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC', True), d.getVar('BB_TASKHASH', True), d)}"
15SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
16SSTATE_EXTRAPATH = ""
17SSTATE_EXTRAPATHWILDCARD = ""
18SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC}"
19
20# We don't want the sstate to depend on things like the distro string
21# of the system, we let the sstate paths take care of this.
22SSTATE_EXTRAPATH[vardepvalue] = ""
23
24SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/"
25# Also need to make cross recipes append to ${PN} and install once for any given PACAGE_ARCH so
26# can avoid multiple installs (e.g. routerstationpro+qemumips both using mips32)
27SSTATE_DUPWHITELIST += "${STAGING_LIBDIR_NATIVE}/${MULTIMACH_TARGET_SYS} ${STAGING_DIR_NATIVE}/usr/libexec/${MULTIMACH_TARGET_SYS} ${STAGING_BINDIR_NATIVE}/${MULTIMACH_TARGET_SYS} ${STAGING_DIR_NATIVE}${includedir_native}/gcc-build-internal-${MULTIMACH_TARGET_SYS}"
28SSTATE_DUPWHITELIST += "${STAGING_DIR_NATIVE}/sysroot-providers/virtual_${TARGET_PREFIX} ${STAGING_DIR_NATIVE}/sysroot-providers/binutils-cross ${STAGING_DIR_NATIVE}/sysroot-providers/gcc-cross"
29# Avoid docbook/sgml catalog warnings for now
30SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
31
32SSTATE_SCAN_FILES ?= "*.la *-config *_config"
33SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
34
35BB_HASHFILENAME = "${SSTATE_EXTRAPATH} ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
36
37SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
38
39SSTATEPREINSTFUNCS ?= ""
40SSTATEPOSTINSTFUNCS ?= ""
41EXTRA_STAGING_FIXMES ?= ""
42
43# Specify dirs in which the shell function is executed and don't use ${B}
44# as default dirs to avoid possible race about ${B} with other task.
45sstate_create_package[dirs] = "${SSTATE_BUILDDIR}"
46sstate_unpack_package[dirs] = "${SSTATE_INSTDIR}"
47
48python () {
49 if bb.data.inherits_class('native', d):
50 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH'))
51 elif bb.data.inherits_class('crosssdk', d):
52 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}"))
53 elif bb.data.inherits_class('cross', d):
54 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TUNE_PKGARCH}"))
55 d.setVar('SSTATE_MANMACH', d.expand("${BUILD_ARCH}_${MACHINE}"))
56 elif bb.data.inherits_class('nativesdk', d):
57 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}"))
58 elif bb.data.inherits_class('cross-canadian', d):
59 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
60 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH", True) == "all":
61 d.setVar('SSTATE_PKGARCH', "allarch")
62 else:
63 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
64
65 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
66 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
67 d.setVar('SSTATE_EXTRAPATHWILDCARD', "*/")
68
69 # These classes encode staging paths into their scripts data so can only be
70 # reused if we manipulate the paths
71 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d):
72 scan_cmd = "grep -Irl ${STAGING_DIR} ${SSTATE_BUILDDIR}"
73 d.setVar('SSTATE_SCAN_CMD', scan_cmd)
74
75 unique_tasks = set((d.getVar('SSTATETASKS', True) or "").split())
76 d.setVar('SSTATETASKS', " ".join(unique_tasks))
77 for task in unique_tasks:
78 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
79 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
80}
81
82def sstate_init(task, d):
83 ss = {}
84 ss['task'] = task
85 ss['dirs'] = []
86 ss['plaindirs'] = []
87 ss['lockfiles'] = []
88 ss['lockfiles-shared'] = []
89 return ss
90
91def sstate_state_fromvars(d, task = None):
92 if task is None:
93 task = d.getVar('BB_CURRENTTASK', True)
94 if not task:
95 bb.fatal("sstate code running without task context?!")
96 task = task.replace("_setscene", "")
97
98 if task.startswith("do_"):
99 task = task[3:]
100 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs', True) or "").split()
101 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs', True) or "").split()
102 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs', True) or "").split()
103 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile', True) or "").split()
104 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared', True) or "").split()
105 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs', True) or "").split()
106 if not task or len(inputs) != len(outputs):
107 bb.fatal("sstate variables not setup correctly?!")
108
109 if task == "populate_lic":
110 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
111 d.setVar("SSTATE_EXTRAPATH", "")
112
113 ss = sstate_init(task, d)
114 for i in range(len(inputs)):
115 sstate_add(ss, inputs[i], outputs[i], d)
116 ss['lockfiles'] = lockfiles
117 ss['lockfiles-shared'] = lockfilesshared
118 ss['plaindirs'] = plaindirs
119 ss['interceptfuncs'] = interceptfuncs
120 return ss
121
122def sstate_add(ss, source, dest, d):
123 if not source.endswith("/"):
124 source = source + "/"
125 if not dest.endswith("/"):
126 dest = dest + "/"
127 source = os.path.normpath(source)
128 dest = os.path.normpath(dest)
129 srcbase = os.path.basename(source)
130 ss['dirs'].append([srcbase, source, dest])
131 return ss
132
133def sstate_install(ss, d):
134 import oe.path
135 import subprocess
136
137 sharedfiles = []
138 shareddirs = []
139 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
140
141 d2 = d.createCopy()
142 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True)
143 if extrainf:
144 d2.setVar("SSTATE_MANMACH", extrainf)
145 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
146
147 if os.access(manifest, os.R_OK):
148 bb.fatal("Package already staged (%s)?!" % manifest)
149
150 locks = []
151 for lock in ss['lockfiles-shared']:
152 locks.append(bb.utils.lockfile(lock, True))
153 for lock in ss['lockfiles']:
154 locks.append(bb.utils.lockfile(lock))
155
156 for state in ss['dirs']:
157 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
158 for walkroot, dirs, files in os.walk(state[1]):
159 for file in files:
160 srcpath = os.path.join(walkroot, file)
161 dstpath = srcpath.replace(state[1], state[2])
162 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
163 sharedfiles.append(dstpath)
164 for dir in dirs:
165 srcdir = os.path.join(walkroot, dir)
166 dstdir = srcdir.replace(state[1], state[2])
167 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
168 if not dstdir.endswith("/"):
169 dstdir = dstdir + "/"
170 shareddirs.append(dstdir)
171
172 # Check the file list for conflicts against files which already exist
173 whitelist = (d.getVar("SSTATE_DUPWHITELIST", True) or "").split()
174 match = []
175 for f in sharedfiles:
176 if os.path.exists(f):
177 f = os.path.normpath(f)
178 realmatch = True
179 for w in whitelist:
180 if f.startswith(w):
181 realmatch = False
182 break
183 if realmatch:
184 match.append(f)
185 sstate_search_cmd = "grep -rl '%s' %s --exclude=master.list | sed -e 's:^.*/::' -e 's:\.populate-sysroot::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
186 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
187 if search_output != "":
188 match.append("Matched in %s" % search_output.rstrip())
189 if match:
190 bb.warn("The recipe %s is trying to install files into a shared area when those files already exist. Those files and their manifest location are:\n %s\nPlease verify which package should provide the above files." % (d.getVar('PN', True), "\n ".join(match)))
191
192 # Write out the manifest
193 f = open(manifest, "w")
194 for file in sharedfiles:
195 f.write(file + "\n")
196
197 # We want to ensure that directories appear at the end of the manifest
198 # so that when we test to see if they should be deleted any contents
199 # added by the task will have been removed first.
200 dirs = sorted(shareddirs, key=len)
201 # Must remove children first, which will have a longer path than the parent
202 for di in reversed(dirs):
203 f.write(di + "\n")
204 f.close()
205
206 # Run the actual file install
207 for state in ss['dirs']:
208 if os.path.exists(state[1]):
209 oe.path.copyhardlinktree(state[1], state[2])
210
211 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS', True) or '').split():
212 bb.build.exec_func(postinst, d)
213
214 for lock in locks:
215 bb.utils.unlockfile(lock)
216
217sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX"
218sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
219
220def sstate_installpkg(ss, d):
221 import oe.path
222 import subprocess
223
224 def prepdir(dir):
225 # remove dir if it exists, ensure any parent directories do exist
226 if os.path.exists(dir):
227 oe.path.remove(dir)
228 bb.utils.mkdirhier(dir)
229 oe.path.remove(dir)
230
231 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
232 sstatefetch = d.getVar('SSTATE_PKGNAME', True) + '_' + ss['task'] + ".tgz"
233 sstatepkg = d.getVar('SSTATE_PKG', True) + '_' + ss['task'] + ".tgz"
234
235 if not os.path.exists(sstatepkg):
236 pstaging_fetch(sstatefetch, sstatepkg, d)
237
238 if not os.path.isfile(sstatepkg):
239 bb.note("Staging package %s does not exist" % sstatepkg)
240 return False
241
242 sstate_clean(ss, d)
243
244 d.setVar('SSTATE_INSTDIR', sstateinst)
245 d.setVar('SSTATE_PKG', sstatepkg)
246
247 for preinst in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split():
248 bb.build.exec_func(preinst, d)
249
250 bb.build.exec_func('sstate_unpack_package', d)
251
252 # Fixup hardcoded paths
253 #
254 # Note: The logic below must match the reverse logic in
255 # sstate_hardcode_path(d)
256
257 fixmefn = sstateinst + "fixmepath"
258 if os.path.isfile(fixmefn):
259 staging = d.getVar('STAGING_DIR', True)
260 staging_target = d.getVar('STAGING_DIR_TARGET', True)
261 staging_host = d.getVar('STAGING_DIR_HOST', True)
262
263 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
264 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIR:%s:g'" % (staging)
265 elif bb.data.inherits_class('cross', d):
266 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIR:%s:g'" % (staging_target, staging)
267 else:
268 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
269
270 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
271 for fixmevar in extra_staging_fixmes.split():
272 fixme_path = d.getVar(fixmevar, True)
273 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
274
275 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
276 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
277
278 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
279 subprocess.call(sstate_hardcode_cmd, shell=True)
280
281 # Need to remove this or we'd copy it into the target directory and may
282 # conflict with another writer
283 os.remove(fixmefn)
284
285 for state in ss['dirs']:
286 prepdir(state[1])
287 os.rename(sstateinst + state[0], state[1])
288 sstate_install(ss, d)
289
290 for plain in ss['plaindirs']:
291 workdir = d.getVar('WORKDIR', True)
292 src = sstateinst + "/" + plain.replace(workdir, '')
293 dest = plain
294 bb.utils.mkdirhier(src)
295 prepdir(dest)
296 os.rename(src, dest)
297
298 return True
299
300def sstate_clean_cachefile(ss, d):
301 import oe.path
302
303 sstatepkgfile = d.getVar('SSTATE_PATHSPEC', True) + "*_" + ss['task'] + ".tgz*"
304 bb.note("Removing %s" % sstatepkgfile)
305 oe.path.remove(sstatepkgfile)
306
307def sstate_clean_cachefiles(d):
308 for task in (d.getVar('SSTATETASKS', True) or "").split():
309 ld = d.createCopy()
310 ss = sstate_state_fromvars(ld, task)
311 sstate_clean_cachefile(ss, ld)
312
313def sstate_clean_manifest(manifest, d):
314 import oe.path
315
316 mfile = open(manifest)
317 entries = mfile.readlines()
318 mfile.close()
319
320 for entry in entries:
321 entry = entry.strip()
322 bb.debug(2, "Removing manifest: %s" % entry)
323 # We can race against another package populating directories as we're removing them
324 # so we ignore errors here.
325 try:
326 if entry.endswith("/"):
327 if os.path.islink(entry[:-1]):
328 os.remove(entry[:-1])
329 elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
330 os.rmdir(entry[:-1])
331 else:
332 oe.path.remove(entry)
333 except OSError:
334 pass
335
336 oe.path.remove(manifest)
337
338def sstate_clean(ss, d):
339 import oe.path
340 import glob
341
342 d2 = d.createCopy()
343 stamp_clean = d.getVar("STAMPCLEAN", True)
344 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True)
345 if extrainf:
346 d2.setVar("SSTATE_MANMACH", extrainf)
347 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
348 else:
349 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
350
351 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
352
353 if os.path.exists(manifest):
354 locks = []
355 for lock in ss['lockfiles-shared']:
356 locks.append(bb.utils.lockfile(lock))
357 for lock in ss['lockfiles']:
358 locks.append(bb.utils.lockfile(lock))
359
360 sstate_clean_manifest(manifest, d)
361
362 for lock in locks:
363 bb.utils.unlockfile(lock)
364
365 # Remove the current and previous stamps, but keep the sigdata.
366 #
367 # The glob() matches do_task* which may match multiple tasks, for
368 # example: do_package and do_package_write_ipk, so we need to
369 # exactly match *.do_task.* and *.do_task_setscene.*
370 rm_stamp = '.do_%s.' % ss['task']
371 rm_setscene = '.do_%s_setscene.' % ss['task']
372 # For BB_SIGNATURE_HANDLER = "noop"
373 rm_nohash = ".do_%s" % ss['task']
374 for stfile in glob.glob(wildcard_stfile):
375 # Keep the sigdata
376 if ".sigdata." in stfile:
377 continue
378 # Preserve taint files in the stamps directory
379 if stfile.endswith('.taint'):
380 continue
381 if rm_stamp in stfile or rm_setscene in stfile or \
382 stfile.endswith(rm_nohash):
383 oe.path.remove(stfile)
384
385sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
386
387CLEANFUNCS += "sstate_cleanall"
388
389python sstate_cleanall() {
390 bb.note("Removing shared state for package %s" % d.getVar('PN', True))
391
392 manifest_dir = d.getVar('SSTATE_MANIFESTS', True)
393 if not os.path.exists(manifest_dir):
394 return
395
396 tasks = d.getVar('SSTATETASKS', True).split()
397 for name in tasks:
398 ld = d.createCopy()
399 shared_state = sstate_state_fromvars(ld, name)
400 sstate_clean(shared_state, ld)
401}
402
403def sstate_hardcode_path(d):
404 import subprocess, platform
405
406 # Need to remove hardcoded paths and fix these when we install the
407 # staging packages.
408 #
409 # Note: the logic in this function needs to match the reverse logic
410 # in sstate_installpkg(ss, d)
411
412 staging = d.getVar('STAGING_DIR', True)
413 staging_target = d.getVar('STAGING_DIR_TARGET', True)
414 staging_host = d.getVar('STAGING_DIR_HOST', True)
415 sstate_builddir = d.getVar('SSTATE_BUILDDIR', True)
416
417 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
418 sstate_grep_cmd = "grep -l -e '%s'" % (staging)
419 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIR:g'" % (staging)
420 elif bb.data.inherits_class('cross', d):
421 sstate_grep_cmd = "grep -l -e '(%s|%s)'" % (staging_target, staging)
422 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIR:g'" % (staging_target, staging)
423 else:
424 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
425 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % (staging_host)
426
427 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
428 for fixmevar in extra_staging_fixmes.split():
429 fixme_path = d.getVar(fixmevar, True)
430 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
431
432 fixmefn = sstate_builddir + "fixmepath"
433
434 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD', True)
435 sstate_filelist_cmd = "tee %s" % (fixmefn)
436
437 # fixmepath file needs relative paths, drop sstate_builddir prefix
438 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
439
440 xargs_no_empty_run_cmd = '--no-run-if-empty'
441 if platform.system() == 'Darwin':
442 xargs_no_empty_run_cmd = ''
443
444 # Limit the fixpaths and sed operations based on the initial grep search
445 # This has the side effect of making sure the vfs cache is hot
446 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
447
448 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
449 subprocess.call(sstate_hardcode_cmd, shell=True)
450
451 # If the fixmefn is empty, remove it..
452 if os.stat(fixmefn).st_size == 0:
453 os.remove(fixmefn)
454 else:
455 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
456 subprocess.call(sstate_filelist_relative_cmd, shell=True)
457
458def sstate_package(ss, d):
459 import oe.path
460
461 def make_relative_symlink(path, outputpath, d):
462 # Replace out absolute TMPDIR paths in symlinks with relative ones
463 if not os.path.islink(path):
464 return
465 link = os.readlink(path)
466 if not os.path.isabs(link):
467 return
468 if not link.startswith(tmpdir):
469 return
470
471 depth = outputpath.rpartition(tmpdir)[2].count('/')
472 base = link.partition(tmpdir)[2].strip()
473 while depth > 1:
474 base = "/.." + base
475 depth -= 1
476 base = "." + base
477
478 bb.debug(2, "Replacing absolute path %s with relative path %s for %s" % (link, base, outputpath))
479 os.remove(path)
480 os.symlink(base, path)
481
482 tmpdir = d.getVar('TMPDIR', True)
483
484 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
485 sstatepkg = d.getVar('SSTATE_PKG', True) + '_'+ ss['task'] + ".tgz"
486 bb.utils.remove(sstatebuild, recurse=True)
487 bb.utils.mkdirhier(sstatebuild)
488 bb.utils.mkdirhier(os.path.dirname(sstatepkg))
489 for state in ss['dirs']:
490 if not os.path.exists(state[1]):
491 continue
492 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
493 for walkroot, dirs, files in os.walk(state[1]):
494 for file in files:
495 srcpath = os.path.join(walkroot, file)
496 dstpath = srcpath.replace(state[1], state[2])
497 make_relative_symlink(srcpath, dstpath, d)
498 for dir in dirs:
499 srcpath = os.path.join(walkroot, dir)
500 dstpath = srcpath.replace(state[1], state[2])
501 make_relative_symlink(srcpath, dstpath, d)
502 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
503 oe.path.copyhardlinktree(state[1], sstatebuild + state[0])
504
505 workdir = d.getVar('WORKDIR', True)
506 for plain in ss['plaindirs']:
507 pdir = plain.replace(workdir, sstatebuild)
508 bb.utils.mkdirhier(plain)
509 bb.utils.mkdirhier(pdir)
510 oe.path.copyhardlinktree(plain, pdir)
511
512 d.setVar('SSTATE_BUILDDIR', sstatebuild)
513 d.setVar('SSTATE_PKG', sstatepkg)
514 sstate_hardcode_path(d)
515 bb.build.exec_func('sstate_create_package', d)
516
517 bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
518
519 return
520
521def pstaging_fetch(sstatefetch, sstatepkg, d):
522 import bb.fetch2
523
524 # Only try and fetch if the user has configured a mirror
525 mirrors = d.getVar('SSTATE_MIRRORS', True)
526 if not mirrors:
527 return
528
529 # Copy the data object and override DL_DIR and SRC_URI
530 localdata = bb.data.createCopy(d)
531 bb.data.update_data(localdata)
532
533 dldir = localdata.expand("${SSTATE_DIR}")
534 bb.utils.mkdirhier(dldir)
535
536 localdata.delVar('MIRRORS')
537 localdata.delVar('FILESPATH')
538 localdata.setVar('DL_DIR', dldir)
539 localdata.setVar('PREMIRRORS', mirrors)
540
541 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
542 # we'll want to allow network access for the current set of fetches.
543 if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
544 localdata.delVar('BB_NO_NETWORK')
545
546 # Try a fetch from the sstate mirror, if it fails just return and
547 # we will build the package
548 for srcuri in ['file://{0}'.format(sstatefetch),
549 'file://{0}.siginfo'.format(sstatefetch)]:
550 localdata.setVar('SRC_URI', srcuri)
551 try:
552 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
553 fetcher.download()
554
555 # Need to optimise this, if using file:// urls, the fetcher just changes the local path
556 # For now work around by symlinking
557 localpath = bb.data.expand(fetcher.localpath(srcuri), localdata)
558 if localpath != sstatepkg and os.path.exists(localpath) and not os.path.exists(sstatepkg):
559 os.symlink(localpath, sstatepkg)
560
561 except bb.fetch2.BBFetchException:
562 break
563
564def sstate_setscene(d):
565 shared_state = sstate_state_fromvars(d)
566 accelerate = sstate_installpkg(shared_state, d)
567 if not accelerate:
568 raise bb.build.FuncFailed("No suitable staging package found")
569
570python sstate_task_prefunc () {
571 shared_state = sstate_state_fromvars(d)
572 sstate_clean(shared_state, d)
573}
574
575python sstate_task_postfunc () {
576 shared_state = sstate_state_fromvars(d)
577 sstate_install(shared_state, d)
578 for intercept in shared_state['interceptfuncs']:
579 bb.build.exec_func(intercept, d)
580 omask = os.umask(002)
581 if omask != 002:
582 bb.note("Using umask 002 (not %0o) for sstate packaging" % omask)
583 sstate_package(shared_state, d)
584 os.umask(omask)
585}
586
587
588#
589# Shell function to generate a sstate package from a directory
590# set as SSTATE_BUILDDIR
591#
592sstate_create_package () {
593 cd ${SSTATE_BUILDDIR}
594 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
595 # Need to handle empty directories
596 if [ "$(ls -A)" ]; then
597 set +e
598 tar -czf $TFILE *
599 if [ $? -ne 0 ] && [ $? -ne 1 ]; then
600 exit 1
601 fi
602 set -e
603 else
604 tar -cz --file=$TFILE --files-from=/dev/null
605 fi
606 chmod 0664 $TFILE
607 mv -f $TFILE ${SSTATE_PKG}
608
609 cd ${WORKDIR}
610 rm -rf ${SSTATE_BUILDDIR}
611}
612
613#
614# Shell function to decompress and prepare a package for installation
615#
616sstate_unpack_package () {
617 mkdir -p ${SSTATE_INSTDIR}
618 cd ${SSTATE_INSTDIR}
619 tar -xmvzf ${SSTATE_PKG}
620}
621
622BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
623
624def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d):
625
626 ret = []
627 missed = []
628
629 def getpathcomponents(task, d):
630 # Magic data from BB_HASHFILENAME
631 splithashfn = sq_hashfn[task].split(" ")
632 spec = splithashfn[1]
633 extrapath = splithashfn[0]
634
635 tname = sq_task[task][3:]
636
637 if tname in ["fetch", "unpack", "patch", "populate_lic"] and splithashfn[2]:
638 spec = splithashfn[2]
639 extrapath = ""
640
641 return spec, extrapath, tname
642
643
644 for task in range(len(sq_fn)):
645
646 spec, extrapath, tname = getpathcomponents(task, d)
647
648 sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz.siginfo")
649
650 if os.path.exists(sstatefile):
651 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
652 ret.append(task)
653 continue
654 else:
655 missed.append(task)
656 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
657
658 mirrors = d.getVar("SSTATE_MIRRORS", True)
659 if mirrors:
660 # Copy the data object and override DL_DIR and SRC_URI
661 localdata = bb.data.createCopy(d)
662 bb.data.update_data(localdata)
663
664 dldir = localdata.expand("${SSTATE_DIR}")
665 localdata.setVar('DL_DIR', dldir)
666 localdata.setVar('PREMIRRORS', mirrors)
667
668 bb.debug(2, "SState using premirror of: %s" % mirrors)
669
670 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
671 # we'll want to allow network access for the current set of fetches.
672 if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
673 localdata.delVar('BB_NO_NETWORK')
674
675 for task in range(len(sq_fn)):
676 if task in ret:
677 continue
678
679 spec, extrapath, tname = getpathcomponents(task, d)
680
681 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz.siginfo")
682
683 srcuri = "file://" + sstatefile
684 localdata.setVar('SRC_URI', srcuri)
685 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
686
687 try:
688 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata)
689 fetcher.checkstatus()
690 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
691 ret.append(task)
692 if task in missed:
693 missed.remove(task)
694 except:
695 missed.append(task)
696 bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
697 pass
698
699 inheritlist = d.getVar("INHERIT", True)
700 if "toaster" in inheritlist:
701 evdata = {'missed': [], 'found': []};
702 for task in missed:
703 spec, extrapath, tname = getpathcomponents(task, d)
704 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
705 evdata['missed'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
706 for task in ret:
707 spec, extrapath, tname = getpathcomponents(task, d)
708 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
709 evdata['found'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
710 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
711
712 return ret
713
714BB_SETSCENE_DEPVALID = "setscene_depvalid"
715
716def setscene_depvalid(task, taskdependees, notneeded, d):
717 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
718 # task is included in taskdependees too
719
720 bb.debug(2, "Considering setscene task: %s" % (str(taskdependees[task])))
721
722 def isNativeCross(x):
723 return x.endswith("-native") or x.endswith("-cross") or x.endswith("-cross-initial") or x.endswith("-crosssdk") or x.endswith("-crosssdk-initial")
724
725 def isPostInstDep(x):
726 if x in ["qemu-native", "gdk-pixbuf-native", "qemuwrapper-cross", "depmodwrapper-cross", "systemd-systemctl-native", "gtk-update-icon-cache-native"]:
727 return True
728 return False
729
730 # We only need to trigger populate_lic through direct dependencies
731 if taskdependees[task][1] == "do_populate_lic":
732 return True
733
734 for dep in taskdependees:
735 bb.debug(2, " considering dependency: %s" % (str(taskdependees[dep])))
736 if task == dep:
737 continue
738 if dep in notneeded:
739 continue
740 # do_package_write_* and do_package doesn't need do_package
741 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata']:
742 continue
743 # do_package_write_* and do_package doesn't need do_populate_sysroot, unless is a postinstall dependency
744 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata']:
745 if isPostInstDep(taskdependees[task][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
746 return False
747 continue
748 # Native/Cross packages don't exist and are noexec anyway
749 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package']:
750 continue
751
752 # Consider sysroot depending on sysroot tasks
753 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
754 # base-passwd/shadow-sysroot don't need their dependencies
755 if taskdependees[dep][0].endswith(("base-passwd", "shadow-sysroot")):
756 continue
757 # Nothing need depend on libc-initial/gcc-cross-initial
758 if taskdependees[task][0].endswith("-initial"):
759 continue
760 # Native/Cross populate_sysroot need their dependencies
761 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
762 return False
763 # Target populate_sysroot depended on by cross tools need to be installed
764 if isNativeCross(taskdependees[dep][0]):
765 return False
766 # Native/cross tools depended upon by target sysroot are not needed
767 if isNativeCross(taskdependees[task][0]):
768 continue
769 # Target populate_sysroot need their dependencies
770 return False
771
772 # This is due to the [depends] in useradd.bbclass complicating matters
773 # The logic *is* reversed here due to the way hard setscene dependencies are injected
774 if taskdependees[task][1] == 'do_package' and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
775 continue
776
777 # Safe fallthrough default
778 bb.debug(2, " Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])))
779 return False
780 return True
781
782addhandler sstate_eventhandler
783sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
784python sstate_eventhandler() {
785 d = e.data
786 # When we write an sstate package we rewrite the SSTATE_PKG
787 spkg = d.getVar('SSTATE_PKG', True)
788 if not spkg.endswith(".tgz"):
789 taskname = d.getVar("BB_RUNTASK", True)[3:]
790 spec = d.getVar('SSTATE_PKGSPEC', True)
791 swspec = d.getVar('SSTATE_SWSPEC', True)
792 if taskname in ["fetch", "unpack", "patch", "populate_lic"] and swspec:
793 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
794 d.setVar("SSTATE_EXTRAPATH", "")
795 sstatepkg = d.getVar('SSTATE_PKG', True)
796 bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d)
797}
798
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
new file mode 100644
index 0000000000..7c43e7618d
--- /dev/null
+++ b/meta/classes/staging.bbclass
@@ -0,0 +1,121 @@
1
2sysroot_stage_dir() {
3 src="$1"
4 dest="$2"
5 # if the src doesn't exist don't do anything
6 if [ ! -d "$src" ]; then
7 return
8 fi
9
10 mkdir -p "$dest"
11 (
12 cd $src
13 find . -print0 | cpio --null -pdlu $dest
14 )
15}
16
17sysroot_stage_libdir() {
18 src="$1"
19 dest="$2"
20
21 sysroot_stage_dir $src $dest
22}
23
24sysroot_stage_dirs() {
25 from="$1"
26 to="$2"
27
28 sysroot_stage_dir $from${includedir} $to${includedir}
29 if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
30 sysroot_stage_dir $from${bindir} $to${bindir}
31 sysroot_stage_dir $from${sbindir} $to${sbindir}
32 sysroot_stage_dir $from${base_bindir} $to${base_bindir}
33 sysroot_stage_dir $from${base_sbindir} $to${base_sbindir}
34 sysroot_stage_dir $from${libexecdir} $to${libexecdir}
35 sysroot_stage_dir $from${sysconfdir} $to${sysconfdir}
36 sysroot_stage_dir $from${localstatedir} $to${localstatedir}
37 fi
38 if [ -d $from${libdir} ]
39 then
40 sysroot_stage_libdir $from${libdir} $to${libdir}
41 fi
42 if [ -d $from${base_libdir} ]
43 then
44 sysroot_stage_libdir $from${base_libdir} $to${base_libdir}
45 fi
46 if [ -d $from${nonarch_base_libdir} ]
47 then
48 sysroot_stage_libdir $from${nonarch_base_libdir} $to${nonarch_base_libdir}
49 fi
50 sysroot_stage_dir $from${datadir} $to${datadir}
51 # We don't care about docs/info/manpages/locales
52 rm -rf $to${mandir}/ $to${docdir}/ $to${infodir}/ ${to}${datadir}/locale/
53 rm -rf $to${datadir}/applications/ $to${datadir}/fonts/ $to${datadir}/pixmaps/
54}
55
56sysroot_stage_all() {
57 sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
58}
59
60do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
61do_populate_sysroot[umask] = "022"
62
63addtask populate_sysroot after do_install
64
65SYSROOT_PREPROCESS_FUNCS ?= ""
66SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir/"
67SYSROOT_LOCK = "${STAGING_DIR}/staging.lock"
68
69# We clean out any existing sstate from the sysroot if we rerun configure
70python sysroot_cleansstate () {
71 ss = sstate_state_fromvars(d, "populate_sysroot")
72 sstate_clean(ss, d)
73}
74do_configure[prefuncs] += "sysroot_cleansstate"
75
76
77BB_SETSCENE_VERIFY_FUNCTION = "sysroot_checkhashes"
78
79def sysroot_checkhashes(covered, tasknames, fnids, fns, d, invalidtasks = None):
80 problems = set()
81 configurefnids = set()
82 if not invalidtasks:
83 invalidtasks = xrange(len(tasknames))
84 for task in invalidtasks:
85 if tasknames[task] == "do_configure" and task not in covered:
86 configurefnids.add(fnids[task])
87 for task in covered:
88 if tasknames[task] == "do_populate_sysroot" and fnids[task] in configurefnids:
89 problems.add(task)
90 return problems
91
92python do_populate_sysroot () {
93 bb.build.exec_func("sysroot_stage_all", d)
94 for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS', True) or '').split():
95 bb.build.exec_func(f, d)
96 pn = d.getVar("PN", True)
97 multiprov = d.getVar("MULTI_PROVIDER_WHITELIST", True).split()
98 provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
99 bb.utils.mkdirhier(provdir)
100 for p in d.getVar("PROVIDES", True).split():
101 if p in multiprov:
102 continue
103 p = p.replace("/", "_")
104 with open(provdir + p, "w") as f:
105 f.write(pn)
106}
107
108do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
109
110SSTATETASKS += "do_populate_sysroot"
111do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
112do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
113do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_HOST}/"
114do_populate_sysroot[stamp-extra-info] = "${MACHINE}"
115
116python do_populate_sysroot_setscene () {
117 sstate_setscene(d)
118}
119addtask do_populate_sysroot_setscene
120
121
diff --git a/meta/classes/syslinux.bbclass b/meta/classes/syslinux.bbclass
new file mode 100644
index 0000000000..8964d3ff5e
--- /dev/null
+++ b/meta/classes/syslinux.bbclass
@@ -0,0 +1,187 @@
1# syslinux.bbclass
2# Copyright (C) 2004-2006, Advanced Micro Devices, Inc. All Rights Reserved
3# Released under the MIT license (see packages/COPYING)
4
5# Provide syslinux specific functions for building bootable images.
6
7# External variables
8# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
9# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
10# ${AUTO_SYSLINUXMENU} - set this to 1 to enable creating an automatic menu
11# ${LABELS} - a list of targets for the automatic config
12# ${APPEND} - an override list of append strings for each label
13# ${SYSLINUX_OPTS} - additional options to add to the syslinux file ';' delimited
14# ${SYSLINUX_SPLASH} - A background for the vga boot menu if using the boot menu
15# ${SYSLINUX_DEFAULT_CONSOLE} - set to "console=ttyX" to change kernel boot default console
16# ${SYSLINUX_SERIAL} - Set an alternate serial port or turn off serial with empty string
17# ${SYSLINUX_SERIAL_TTY} - Set alternate console=tty... kernel boot argument
18# ${SYSLINUX_KERNEL_ARGS} - Add additional kernel arguments
19
20do_bootimg[depends] += "syslinux:do_populate_sysroot \
21 syslinux-native:do_populate_sysroot"
22
23SYSLINUXCFG = "${S}/syslinux.cfg"
24
25ISOLINUXDIR = "/isolinux"
26SYSLINUXDIR = "/"
27# The kernel has an internal default console, which you can override with
28# a console=...some_tty...
29SYSLINUX_DEFAULT_CONSOLE ?= ""
30SYSLINUX_SERIAL ?= "0 115200"
31SYSLINUX_SERIAL_TTY ?= "console=ttyS0,115200"
32ISO_BOOTIMG = "isolinux/isolinux.bin"
33ISO_BOOTCAT = "isolinux/boot.cat"
34MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table"
35APPEND_prepend = " ${SYSLINUX_ROOT} "
36
37syslinux_populate() {
38 DEST=$1
39 BOOTDIR=$2
40 CFGNAME=$3
41
42 install -d ${DEST}${BOOTDIR}
43
44 # Install the config files
45 install -m 0644 ${SYSLINUXCFG} ${DEST}${BOOTDIR}/${CFGNAME}
46 if [ "${AUTO_SYSLINUXMENU}" = 1 ] ; then
47 install -m 0644 ${STAGING_DATADIR}/syslinux/vesamenu.c32 ${DEST}${BOOTDIR}/vesamenu.c32
48 install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${DEST}${BOOTDIR}/libcom32.c32
49 install -m 0444 ${STAGING_DATADIR}/syslinux/libutil.c32 ${DEST}${BOOTDIR}/libutil.c32
50 if [ "${SYSLINUX_SPLASH}" != "" ] ; then
51 install -m 0644 ${SYSLINUX_SPLASH} ${DEST}${BOOTDIR}/splash.lss
52 fi
53 fi
54}
55
56syslinux_iso_populate() {
57 iso_dir=$1
58 syslinux_populate $iso_dir ${ISOLINUXDIR} isolinux.cfg
59 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin $iso_dir${ISOLINUXDIR}
60 install -m 0644 ${STAGING_DATADIR}/syslinux/ldlinux.c32 $iso_dir${ISOLINUXDIR}
61}
62
63syslinux_hddimg_populate() {
64 hdd_dir=$1
65 syslinux_populate $hdd_dir ${SYSLINUXDIR} syslinux.cfg
66 install -m 0444 ${STAGING_DATADIR}/syslinux/ldlinux.sys $hdd_dir${SYSLINUXDIR}/ldlinux.sys
67}
68
69syslinux_hddimg_install() {
70 syslinux ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
71}
72
73syslinux_hdddirect_install() {
74 DEST=$1
75 syslinux $DEST
76}
77
78python build_syslinux_cfg () {
79 import copy
80 import sys
81
82 workdir = d.getVar('WORKDIR', True)
83 if not workdir:
84 bb.error("WORKDIR not defined, unable to package")
85 return
86
87 labels = d.getVar('LABELS', True)
88 if not labels:
89 bb.debug(1, "LABELS not defined, nothing to do")
90 return
91
92 if labels == []:
93 bb.debug(1, "No labels, nothing to do")
94 return
95
96 cfile = d.getVar('SYSLINUXCFG', True)
97 if not cfile:
98 raise bb.build.FuncFailed('Unable to read SYSLINUXCFG')
99
100 try:
101 cfgfile = file(cfile, 'w')
102 except OSError:
103 raise bb.build.funcFailed('Unable to open %s' % (cfile))
104
105 cfgfile.write('# Automatically created by OE\n')
106
107 opts = d.getVar('SYSLINUX_OPTS', True)
108
109 if opts:
110 for opt in opts.split(';'):
111 cfgfile.write('%s\n' % opt)
112
113 cfgfile.write('ALLOWOPTIONS 1\n');
114 syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE', True)
115 syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY', True)
116 syslinux_serial = d.getVar('SYSLINUX_SERIAL', True)
117 if syslinux_serial:
118 cfgfile.write('SERIAL %s\n' % syslinux_serial)
119
120 menu = d.getVar('AUTO_SYSLINUXMENU', True)
121
122 if menu and syslinux_serial:
123 cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0]))
124 else:
125 cfgfile.write('DEFAULT %s\n' % (labels.split()[0]))
126
127 timeout = d.getVar('SYSLINUX_TIMEOUT', True)
128
129 if timeout:
130 cfgfile.write('TIMEOUT %s\n' % timeout)
131 else:
132 cfgfile.write('TIMEOUT 50\n')
133
134 prompt = d.getVar('SYSLINUX_PROMPT', True)
135 if prompt:
136 cfgfile.write('PROMPT %s\n' % prompt)
137 else:
138 cfgfile.write('PROMPT 1\n')
139
140 if menu:
141 cfgfile.write('ui vesamenu.c32\n')
142 cfgfile.write('menu title Select kernel options and boot kernel\n')
143 cfgfile.write('menu tabmsg Press [Tab] to edit, [Return] to select\n')
144 splash = d.getVar('SYSLINUX_SPLASH', True)
145 if splash:
146 cfgfile.write('menu background splash.lss\n')
147
148 for label in labels.split():
149 localdata = bb.data.createCopy(d)
150
151 overrides = localdata.getVar('OVERRIDES', True)
152 if not overrides:
153 raise bb.build.FuncFailed('OVERRIDES not defined')
154
155 localdata.setVar('OVERRIDES', label + ':' + overrides)
156 bb.data.update_data(localdata)
157
158 btypes = [ [ "", syslinux_default_console ] ]
159 if menu and syslinux_serial:
160 btypes = [ [ "Graphics console ", syslinux_default_console ],
161 [ "Serial console ", syslinux_serial_tty ] ]
162
163 for btype in btypes:
164 cfgfile.write('LABEL %s%s\nKERNEL /vmlinuz\n' % (btype[0], label))
165
166 exargs = d.getVar('SYSLINUX_KERNEL_ARGS', True)
167 if exargs:
168 btype[1] += " " + exargs
169
170 append = localdata.getVar('APPEND', True)
171 initrd = localdata.getVar('INITRD', True)
172
173 if append:
174 cfgfile.write('APPEND ')
175
176 if initrd:
177 cfgfile.write('initrd=/initrd ')
178
179 cfgfile.write('LABEL=%s '% (label))
180
181 cfgfile.write('%s %s\n' % (append, btype[1]))
182 else:
183 cfgfile.write('APPEND %s\n' % btype[1])
184
185 cfgfile.close()
186}
187build_syslinux_cfg[vardeps] += "APPEND"
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
new file mode 100644
index 0000000000..2bd63a405b
--- /dev/null
+++ b/meta/classes/systemd.bbclass
@@ -0,0 +1,198 @@
1# The list of packages that should have systemd packaging scripts added. For
2# each entry, optionally have a SYSTEMD_SERVICE_[package] that lists the service
3# files in this package. If this variable isn't set, [package].service is used.
4SYSTEMD_PACKAGES ?= "${PN}"
5SYSTEMD_PACKAGES_class-native ?= ""
6SYSTEMD_PACKAGES_class-nativesdk ?= ""
7
8# Whether to enable or disable the services on installation.
9SYSTEMD_AUTO_ENABLE ??= "enable"
10
11# This class will be included in any recipe that supports systemd init scripts,
12# even if systemd is not in DISTRO_FEATURES. As such don't make any changes
13# directly but check the DISTRO_FEATURES first.
14python __anonymous() {
15 # If the distro features have systemd but not sysvinit, inhibit update-rcd
16 # from doing any work so that pure-systemd images don't have redundant init
17 # files.
18 if oe.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
19 d.appendVar("DEPENDS", " systemd-systemctl-native")
20 if not oe.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d):
21 d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1")
22}
23
24systemd_postinst() {
25OPTS=""
26
27if [ -n "$D" ]; then
28 OPTS="--root=$D"
29fi
30
31if type systemctl >/dev/null 2>/dev/null; then
32 systemctl $OPTS ${SYSTEMD_AUTO_ENABLE} ${SYSTEMD_SERVICE}
33
34 if [ -z "$D" -a "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
35 systemctl restart ${SYSTEMD_SERVICE}
36 fi
37fi
38}
39
40systemd_prerm() {
41OPTS=""
42
43if [ -n "$D" ]; then
44 OPTS="--root=$D"
45fi
46
47if type systemctl >/dev/null 2>/dev/null; then
48 if [ -z "$D" ]; then
49 systemctl stop ${SYSTEMD_SERVICE}
50 fi
51
52 systemctl $OPTS disable ${SYSTEMD_SERVICE}
53fi
54}
55
56
57systemd_populate_packages[vardeps] += "systemd_prerm systemd_postinst"
58
59python systemd_populate_packages() {
60 if not oe.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
61 return
62
63 def get_package_var(d, var, pkg):
64 val = (d.getVar('%s_%s' % (var, pkg), True) or "").strip()
65 if val == "":
66 val = (d.getVar(var, True) or "").strip()
67 return val
68
69 # Check if systemd-packages already included in PACKAGES
70 def systemd_check_package(pkg_systemd):
71 packages = d.getVar('PACKAGES', True)
72 if not pkg_systemd in packages.split():
73 bb.error('%s does not appear in package list, please add it' % pkg_systemd)
74
75
76 def systemd_generate_package_scripts(pkg):
77 bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
78
79 # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE_pkg
80 # variable.
81 localdata = d.createCopy()
82 localdata.prependVar("OVERRIDES", pkg + ":")
83 bb.data.update_data(localdata)
84
85 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
86 if not postinst:
87 postinst = '#!/bin/sh\n'
88 postinst += localdata.getVar('systemd_postinst', True)
89 d.setVar('pkg_postinst_%s' % pkg, postinst)
90
91 prerm = d.getVar('pkg_prerm_%s' % pkg, True)
92 if not prerm:
93 prerm = '#!/bin/sh\n'
94 prerm += localdata.getVar('systemd_prerm', True)
95 d.setVar('pkg_prerm_%s' % pkg, prerm)
96
97
98 # Add files to FILES_*-systemd if existent and not already done
99 def systemd_append_file(pkg_systemd, file_append):
100 appended = False
101 if os.path.exists(oe.path.join(d.getVar("D", True), file_append)):
102 var_name = "FILES_" + pkg_systemd
103 files = d.getVar(var_name, False) or ""
104 if file_append not in files.split():
105 d.appendVar(var_name, " " + file_append)
106 appended = True
107 return appended
108
109 # Add systemd files to FILES_*-systemd, parse for Also= and follow recursive
110 def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
111 # avoid infinite recursion
112 if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
113 fullpath = oe.path.join(d.getVar("D", True), path, service)
114 if service.find('.service') != -1:
115 # for *.service add *@.service
116 service_base = service.replace('.service', '')
117 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
118 if service.find('.socket') != -1:
119 # for *.socket add *.service and *@.service
120 service_base = service.replace('.socket', '')
121 systemd_add_files_and_parse(pkg_systemd, path, service_base + '.service', keys)
122 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
123 for key in keys.split():
124 # recurse all dependencies found in keys ('Also';'Conflicts';..) and add to files
125 cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, fullpath, key)
126 pipe = os.popen(cmd, 'r')
127 line = pipe.readline()
128 while line:
129 line = line.replace('\n', '')
130 systemd_add_files_and_parse(pkg_systemd, path, line, keys)
131 line = pipe.readline()
132 pipe.close()
133
134 # Check service-files and call systemd_add_files_and_parse for each entry
135 def systemd_check_services():
136 searchpaths = [oe.path.join(d.getVar("sysconfdir", True), "systemd", "system"),]
137 searchpaths.append(oe.path.join(d.getVar("nonarch_base_libdir", True), "systemd", "system"))
138 searchpaths.append(oe.path.join(d.getVar("exec_prefix", True), d.getVar("nonarch_base_libdir", True), "systemd", "system"))
139 systemd_packages = d.getVar('SYSTEMD_PACKAGES', True)
140 has_exactly_one_service = len(systemd_packages.split()) == 1
141 if has_exactly_one_service:
142 has_exactly_one_service = len(get_package_var(d, 'SYSTEMD_SERVICE', systemd_packages).split()) == 1
143
144 keys = 'Also' # Conflicts??
145 if has_exactly_one_service:
146 # single service gets also the /dev/null dummies
147 keys = 'Also Conflicts'
148 # scan for all in SYSTEMD_SERVICE[]
149 for pkg_systemd in systemd_packages.split():
150 for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split():
151 path_found = ''
152 for path in searchpaths:
153 if os.path.exists(oe.path.join(d.getVar("D", True), path, service)):
154 path_found = path
155 break
156 if path_found != '':
157 systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
158 else:
159 raise bb.build.FuncFailed("SYSTEMD_SERVICE_%s value %s does not exist" % \
160 (pkg_systemd, service))
161
162 # Run all modifications once when creating package
163 if os.path.exists(d.getVar("D", True)):
164 for pkg in d.getVar('SYSTEMD_PACKAGES', True).split():
165 systemd_check_package(pkg)
166 if d.getVar('SYSTEMD_SERVICE_' + pkg, True):
167 systemd_generate_package_scripts(pkg)
168 systemd_check_services()
169}
170
171PACKAGESPLITFUNCS_prepend = "systemd_populate_packages "
172
173python rm_systemd_unitdir (){
174 import shutil
175 if not oe.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
176 systemd_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_unitdir', True))
177 if os.path.exists(systemd_unitdir):
178 shutil.rmtree(systemd_unitdir)
179 systemd_libdir = os.path.dirname(systemd_unitdir)
180 if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)):
181 os.rmdir(systemd_libdir)
182}
183do_install[postfuncs] += "rm_systemd_unitdir "
184
185python rm_sysvinit_initddir (){
186 import shutil
187 sysv_initddir = oe.path.join(d.getVar("D", True), (d.getVar('INIT_D_DIR', True) or "/etc/init.d"))
188
189 if oe.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \
190 not oe.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \
191 os.path.exists(sysv_initddir):
192 systemd_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_unitdir', True), "system")
193
194 # If systemd_unitdir contains anything, delete sysv_initddir
195 if (os.path.exists(systemd_unitdir) and os.listdir(systemd_unitdir)):
196 shutil.rmtree(sysv_initddir)
197}
198do_install[postfuncs] += "rm_sysvinit_initddir "
diff --git a/meta/classes/terminal.bbclass b/meta/classes/terminal.bbclass
new file mode 100644
index 0000000000..e577c6d594
--- /dev/null
+++ b/meta/classes/terminal.bbclass
@@ -0,0 +1,94 @@
1OE_TERMINAL ?= 'auto'
2OE_TERMINAL[type] = 'choice'
3OE_TERMINAL[choices] = 'auto none \
4 ${@" ".join(o.name \
5 for o in oe.terminal.prioritized())}'
6
7OE_TERMINAL_EXPORTS += 'EXTRA_OEMAKE'
8OE_TERMINAL_EXPORTS[type] = 'list'
9
10XAUTHORITY ?= "${HOME}/.Xauthority"
11SHELL ?= "bash"
12
13
14def emit_terminal_func(command, envdata, d):
15 cmd_func = 'do_terminal'
16
17 envdata.setVar(cmd_func, 'exec ' + command)
18 envdata.setVarFlag(cmd_func, 'func', 1)
19
20 runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}"
21 runfile = runfmt.format(func=cmd_func, task=cmd_func, taskfunc=cmd_func, pid=os.getpid())
22 runfile = os.path.join(d.getVar('T', True), runfile)
23 bb.utils.mkdirhier(os.path.dirname(runfile))
24
25 with open(runfile, 'w') as script:
26 script.write('#!/bin/sh -e\n')
27 bb.data.emit_func(cmd_func, script, envdata)
28 script.write(cmd_func)
29 script.write("\n")
30 os.chmod(runfile, 0755)
31
32 return runfile
33
34def oe_terminal(command, title, d):
35 import oe.data
36 import oe.terminal
37
38 envdata = bb.data.init()
39
40 for v in os.environ:
41 envdata.setVar(v, os.environ[v])
42 envdata.setVarFlag(v, 'export', 1)
43
44 for export in oe.data.typed_value('OE_TERMINAL_EXPORTS', d):
45 value = d.getVar(export, True)
46 if value is not None:
47 os.environ[export] = str(value)
48 envdata.setVar(export, str(value))
49 envdata.setVarFlag(export, 'export', 1)
50 if export == "PSEUDO_DISABLED":
51 if "PSEUDO_UNLOAD" in os.environ:
52 del os.environ["PSEUDO_UNLOAD"]
53 envdata.delVar("PSEUDO_UNLOAD")
54
55 # Add in all variables from the user's original environment which
56 # haven't subsequntly been set/changed
57 origbbenv = d.getVar("BB_ORIGENV", False) or {}
58 for key in origbbenv:
59 if key in envdata:
60 continue
61 value = origbbenv.getVar(key, True)
62 if value is not None:
63 os.environ[key] = str(value)
64 envdata.setVar(key, str(value))
65 envdata.setVarFlag(key, 'export', 1)
66
67 # A complex PS1 might need more escaping of chars.
68 # Lets not export PS1 instead.
69 envdata.delVar("PS1")
70
71 # Replace command with an executable wrapper script
72 command = emit_terminal_func(command, envdata, d)
73
74 terminal = oe.data.typed_value('OE_TERMINAL', d).lower()
75 if terminal == 'none':
76 bb.fatal('Devshell usage disabled with OE_TERMINAL')
77 elif terminal != 'auto':
78 try:
79 oe.terminal.spawn(terminal, command, title, None, d)
80 return
81 except oe.terminal.UnsupportedTerminal:
82 bb.warn('Unsupported terminal "%s", defaulting to "auto"' %
83 terminal)
84 except oe.terminal.ExecutionError as exc:
85 bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
86
87 try:
88 oe.terminal.spawn_preferred(command, title, None, d)
89 except oe.terminal.NoSupportedTerminals:
90 bb.fatal('No valid terminal found, unable to open devshell')
91 except oe.terminal.ExecutionError as exc:
92 bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
93
94oe_terminal[vardepsexclude] = "BB_ORIGENV"
diff --git a/meta/classes/testimage-auto.bbclass b/meta/classes/testimage-auto.bbclass
new file mode 100644
index 0000000000..860599d2b5
--- /dev/null
+++ b/meta/classes/testimage-auto.bbclass
@@ -0,0 +1,23 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5
6# Run tests automatically on an image after the image is constructed
7# (as opposed to testimage.bbclass alone where tests must be called
8# manually using bitbake -c testimage <image>).
9#
10# NOTE: to use this class, simply set TEST_IMAGE = "1" - no need to
11# inherit it since that will be done in image.bbclass when this variable
12# has been set.
13#
14# See testimage.bbclass for the test implementation.
15
16inherit testimage
17
18python do_testimage_auto() {
19 testimage_main(d)
20}
21addtask testimage_auto before do_build after do_rootfs
22do_testimage_auto[depends] += "${TESTIMAGEDEPENDS}"
23do_testimage_auto[lockfiles] += "${TESTIMAGELOCK}"
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
new file mode 100644
index 0000000000..691c7f6785
--- /dev/null
+++ b/meta/classes/testimage.bbclass
@@ -0,0 +1,232 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5
6# testimage.bbclass enables testing of qemu images using python unittests.
7# Most of the tests are commands run on target image over ssh.
8# To use it add testimage to global inherit and call your target image with -c testimage
9# You can try it out like this:
10# - first build a qemu core-image-sato
11# - add INHERIT += "testimage" in local.conf
12# - then bitbake core-image-sato -c testimage. That will run a standard suite of tests.
13
14# You can set (or append to) TEST_SUITES in local.conf to select the tests
15# which you want to run for your target.
16# The test names are the module names in meta/lib/oeqa/runtime.
17# Each name in TEST_SUITES represents a required test for the image. (no skipping allowed)
18# Appending "auto" means that it will try to run all tests that are suitable for the image (each test decides that on it's own).
19# Note that order in TEST_SUITES is important (it's the order tests run) and it influences tests dependencies.
20# A layer can add its own tests in lib/oeqa/runtime, provided it extends BBPATH as normal in its layer.conf.
21
22# TEST_LOG_DIR contains a ssh log (what command is running, output and return codes) and a qemu boot log till login
23# Booting is handled by this class, and it's not a test in itself.
24# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt.
25
26TEST_LOG_DIR ?= "${WORKDIR}/testimage"
27
28TEST_EXPORT_DIR ?= "${TMPDIR}/testimage/${PN}"
29TEST_EXPORT_ONLY ?= "0"
30
31DEFAULT_TEST_SUITES = "ping auto"
32DEFAULT_TEST_SUITES_pn-core-image-minimal = "ping"
33DEFAULT_TEST_SUITES_pn-core-image-sato = "ping ssh df connman syslog xorg scp vnc date rpm smart dmesg python"
34DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "ping ssh df connman syslog xorg scp vnc date perl ldd gcc rpm smart kernelmodule dmesg python"
35TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
36
37TEST_QEMUBOOT_TIMEOUT ?= "1000"
38TEST_TARGET ?= "qemu"
39TEST_TARGET_IP ?= ""
40TEST_SERVER_IP ?= ""
41
42TESTIMAGEDEPENDS = ""
43TESTIMAGEDEPENDS_qemuall = "qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot"
44
45TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
46TESTIMAGELOCK_qemuall = ""
47
48python do_testimage() {
49 testimage_main(d)
50}
51addtask testimage
52do_testimage[nostamp] = "1"
53do_testimage[depends] += "${TESTIMAGEDEPENDS}"
54do_testimage[lockfiles] += "${TESTIMAGELOCK}"
55
56
57def get_tests_list(d):
58 testsuites = d.getVar("TEST_SUITES", True).split()
59 bbpath = d.getVar("BBPATH", True).split(':')
60
61 # This relies on lib/ under each directory in BBPATH being added to sys.path
62 # (as done by default in base.bbclass)
63 testslist = []
64 for testname in testsuites:
65 if testname != "auto":
66 found = False
67 for p in bbpath:
68 if os.path.exists(os.path.join(p, 'lib', 'oeqa', 'runtime', testname + '.py')):
69 testslist.append("oeqa.runtime." + testname)
70 found = True
71 break
72 if not found:
73 bb.error('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname)
74
75 if "auto" in testsuites:
76 def add_auto_list(path):
77 if not os.path.exists(os.path.join(path, '__init__.py')):
78 bb.fatal('Tests directory %s exists but is missing __init__.py' % path)
79 files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
80 for f in files:
81 module = 'oeqa.runtime.' + f[:-3]
82 if module not in testslist:
83 testslist.append(module)
84
85 for p in bbpath:
86 testpath = os.path.join(p, 'lib', 'oeqa', 'runtime')
87 bb.debug(2, 'Searching for tests in %s' % testpath)
88 if os.path.exists(testpath):
89 add_auto_list(testpath)
90
91 return testslist
92
93
94def exportTests(d,tc):
95 import json
96 import shutil
97 import pkgutil
98
99 exportpath = d.getVar("TEST_EXPORT_DIR", True)
100
101 savedata = {}
102 savedata["d"] = {}
103 savedata["target"] = {}
104 for key in tc.__dict__:
105 # special cases
106 if key != "d" and key != "target":
107 savedata[key] = getattr(tc, key)
108 savedata["target"]["ip"] = tc.target.ip or d.getVar("TEST_TARGET_IP", True)
109 savedata["target"]["server_ip"] = tc.target.server_ip or d.getVar("TEST_SERVER_IP", True)
110
111 keys = [ key for key in d.keys() if not key.startswith("_") and not key.startswith("BB") \
112 and not key.startswith("B_pn") and not key.startswith("do_") and not d.getVarFlag(key, "func")]
113 for key in keys:
114 try:
115 savedata["d"][key] = d.getVar(key, True)
116 except bb.data_smart.ExpansionError:
117 # we don't care about those anyway
118 pass
119
120 with open(os.path.join(exportpath, "testdata.json"), "w") as f:
121 json.dump(savedata, f, skipkeys=True, indent=4, sort_keys=True)
122
123 # now start copying files
124 # we'll basically copy everything under meta/lib/oeqa, with these exceptions
125 # - oeqa/targetcontrol.py - not needed
126 # - oeqa/selftest - something else
127 # That means:
128 # - all tests from oeqa/runtime defined in TEST_SUITES (including from other layers)
129 # - the contents of oeqa/utils and oeqa/runtime/files
130 # - oeqa/oetest.py and oeqa/runexport.py (this will get copied to exportpath not exportpath/oeqa)
131 # - __init__.py files
132 bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/runtime/files"))
133 bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/utils"))
134 # copy test modules, this should cover tests in other layers too
135 for t in tc.testslist:
136 mod = pkgutil.get_loader(t)
137 shutil.copy2(mod.filename, os.path.join(exportpath, "oeqa/runtime"))
138 # copy __init__.py files
139 oeqadir = pkgutil.get_loader("oeqa").filename
140 shutil.copy2(os.path.join(oeqadir, "__init__.py"), os.path.join(exportpath, "oeqa"))
141 shutil.copy2(os.path.join(oeqadir, "runtime/__init__.py"), os.path.join(exportpath, "oeqa/runtime"))
142 # copy oeqa/oetest.py and oeqa/runexported.py
143 shutil.copy2(os.path.join(oeqadir, "oetest.py"), os.path.join(exportpath, "oeqa"))
144 shutil.copy2(os.path.join(oeqadir, "runexported.py"), exportpath)
145 # copy oeqa/utils/*.py
146 for root, dirs, files in os.walk(os.path.join(oeqadir, "utils")):
147 for f in files:
148 if f.endswith(".py"):
149 shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/utils"))
150 # copy oeqa/runtime/files/*
151 for root, dirs, files in os.walk(os.path.join(oeqadir, "runtime/files")):
152 for f in files:
153 shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/runtime/files"))
154
155 bb.plain("Exported tests to: %s" % exportpath)
156
157
158def testimage_main(d):
159 import unittest
160 import os
161 import oeqa.runtime
162 import time
163 from oeqa.oetest import loadTests, runTests
164 from oeqa.targetcontrol import get_target_controller
165
166 pn = d.getVar("PN", True)
167 export = oe.utils.conditional("TEST_EXPORT_ONLY", "1", True, False, d)
168 bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
169 if export:
170 bb.utils.remove(d.getVar("TEST_EXPORT_DIR", True), recurse=True)
171 bb.utils.mkdirhier(d.getVar("TEST_EXPORT_DIR", True))
172
173 # tests in TEST_SUITES become required tests
174 # they won't be skipped even if they aren't suitable for a image (like xorg for minimal)
175 # testslist is what we'll actually pass to the unittest loader
176 testslist = get_tests_list(d)
177 testsrequired = [t for t in d.getVar("TEST_SUITES", True).split() if t != "auto"]
178
179 # the robot dance
180 target = get_target_controller(d)
181
182 class TestContext(object):
183 def __init__(self):
184 self.d = d
185 self.testslist = testslist
186 self.testsrequired = testsrequired
187 self.filesdir = os.path.join(os.path.dirname(os.path.abspath(oeqa.runtime.__file__)),"files")
188 self.target = target
189 self.imagefeatures = d.getVar("IMAGE_FEATURES", True).split()
190 self.distrofeatures = d.getVar("DISTRO_FEATURES", True).split()
191 manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + ".manifest")
192 try:
193 with open(manifest) as f:
194 self.pkgmanifest = f.read()
195 except IOError as e:
196 bb.fatal("No package manifest file found. Did you build the image?\n%s" % e)
197
198 # test context
199 tc = TestContext()
200
201 # this is a dummy load of tests
202 # we are doing that to find compile errors in the tests themselves
203 # before booting the image
204 try:
205 loadTests(tc)
206 except Exception as e:
207 import traceback
208 bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
209
210 target.deploy()
211
212 try:
213 target.start()
214 if export:
215 exportTests(d,tc)
216 else:
217 starttime = time.time()
218 result = runTests(tc)
219 stoptime = time.time()
220 if result.wasSuccessful():
221 bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
222 msg = "%s - OK - All required tests passed" % pn
223 skipped = len(result.skipped)
224 if skipped:
225 msg += " (skipped=%d)" % skipped
226 bb.plain(msg)
227 else:
228 raise bb.build.FuncFailed("%s - FAILED - check the task log and the ssh log" % pn )
229 finally:
230 target.stop()
231
232testimage_main[vardepsexclude] =+ "BB_ORIGENV"
diff --git a/meta/classes/tinderclient.bbclass b/meta/classes/tinderclient.bbclass
new file mode 100644
index 0000000000..6984efd1be
--- /dev/null
+++ b/meta/classes/tinderclient.bbclass
@@ -0,0 +1,368 @@
1def tinder_http_post(server, selector, content_type, body):
2 import httplib
3 # now post it
4 for i in range(0,5):
5 try:
6 h = httplib.HTTP(server)
7 h.putrequest('POST', selector)
8 h.putheader('content-type', content_type)
9 h.putheader('content-length', str(len(body)))
10 h.endheaders()
11 h.send(body)
12 errcode, errmsg, headers = h.getreply()
13 #print errcode, errmsg, headers
14 return (errcode,errmsg, headers, h.file)
15 except:
16 print "Error sending the report!"
17 # try again
18 pass
19
20 # return some garbage
21 return (-1, "unknown", "unknown", None)
22
23def tinder_form_data(bound, dict, log):
24 output = []
25 # for each key in the dictionary
26 for name in dict:
27 assert dict[name]
28 output.append( "--" + bound )
29 output.append( 'Content-Disposition: form-data; name="%s"' % name )
30 output.append( "" )
31 output.append( dict[name] )
32 if log:
33 output.append( "--" + bound )
34 output.append( 'Content-Disposition: form-data; name="log"; filename="log.txt"' )
35 output.append( '' )
36 output.append( log )
37 output.append( '--' + bound + '--' )
38 output.append( '' )
39
40 return "\r\n".join(output)
41
42def tinder_time_string():
43 """
44 Return the time as GMT
45 """
46 return ""
47
48def tinder_format_http_post(d,status,log):
49 """
50 Format the Tinderbox HTTP post with the data needed
51 for the tinderbox to be happy.
52 """
53
54 import random
55
56 # the variables we will need to send on this form post
57 variables = {
58 "tree" : d.getVar('TINDER_TREE', True),
59 "machine_name" : d.getVar('TINDER_MACHINE', True),
60 "os" : os.uname()[0],
61 "os_version" : os.uname()[2],
62 "compiler" : "gcc",
63 "clobber" : d.getVar('TINDER_CLOBBER', True) or "0",
64 "srcdate" : d.getVar('SRCDATE', True),
65 "PN" : d.getVar('PN', True),
66 "PV" : d.getVar('PV', True),
67 "PR" : d.getVar('PR', True),
68 "FILE" : d.getVar('FILE', True) or "N/A",
69 "TARGETARCH" : d.getVar('TARGET_ARCH', True),
70 "TARGETFPU" : d.getVar('TARGET_FPU', True) or "Unknown",
71 "TARGETOS" : d.getVar('TARGET_OS', True) or "Unknown",
72 "MACHINE" : d.getVar('MACHINE', True) or "Unknown",
73 "DISTRO" : d.getVar('DISTRO', True) or "Unknown",
74 "zecke-rocks" : "sure",
75 }
76
77 # optionally add the status
78 if status:
79 variables["status"] = str(status)
80
81 # try to load the machine id
82 # we only need on build_status.pl but sending it
83 # always does not hurt
84 try:
85 f = file(d.getVar('TMPDIR',True)+'/tinder-machine.id', 'r')
86 id = f.read()
87 variables['machine_id'] = id
88 except:
89 pass
90
91 # the boundary we will need
92 boundary = "----------------------------------%d" % int(random.random()*1000000000000)
93
94 # now format the body
95 body = tinder_form_data( boundary, variables, log )
96
97 return ("multipart/form-data; boundary=%s" % boundary),body
98
99
100def tinder_build_start(d):
101 """
102 Inform the tinderbox that a build is starting. We do this
103 by posting our name and tree to the build_start.pl script
104 on the server.
105 """
106
107 # get the body and type
108 content_type, body = tinder_format_http_post(d,None,None)
109 server = d.getVar('TINDER_HOST', True )
110 url = d.getVar('TINDER_URL', True )
111
112 selector = url + "/xml/build_start.pl"
113
114 #print "selector %s and url %s" % (selector, url)
115
116 # now post it
117 errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
118 #print errcode, errmsg, headers
119 report = h_file.read()
120
121 # now let us find the machine id that was assigned to us
122 search = "<machine id='"
123 report = report[report.find(search)+len(search):]
124 report = report[0:report.find("'")]
125
126 bb.note("Machine ID assigned by tinderbox: %s" % report )
127
128 # now we will need to save the machine number
129 # we will override any previous numbers
130 f = file(d.getVar('TMPDIR', True)+"/tinder-machine.id", 'w')
131 f.write(report)
132
133
134def tinder_send_http(d, status, _log):
135 """
136 Send this log as build status
137 """
138
139 # get the body and type
140 server = d.getVar('TINDER_HOST', True)
141 url = d.getVar('TINDER_URL', True)
142
143 selector = url + "/xml/build_status.pl"
144
145 # now post it - in chunks of 10.000 charachters
146 new_log = _log
147 while len(new_log) > 0:
148 content_type, body = tinder_format_http_post(d,status,new_log[0:18000])
149 errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
150 #print errcode, errmsg, headers
151 #print h.file.read()
152 new_log = new_log[18000:]
153
154
155def tinder_print_info(d):
156 """
157 Print the TinderBox Info
158 Including informations of the BaseSystem and the Tree
159 we use.
160 """
161
162 # get the local vars
163 time = tinder_time_string()
164 ops = os.uname()[0]
165 version = os.uname()[2]
166 url = d.getVar( 'TINDER_URL' , True )
167 tree = d.getVar( 'TINDER_TREE', True )
168 branch = d.getVar( 'TINDER_BRANCH', True )
169 srcdate = d.getVar( 'SRCDATE', True )
170 machine = d.getVar( 'MACHINE', True )
171 distro = d.getVar( 'DISTRO', True )
172 bbfiles = d.getVar( 'BBFILES', True )
173 tarch = d.getVar( 'TARGET_ARCH', True )
174 fpu = d.getVar( 'TARGET_FPU', True )
175 oerev = d.getVar( 'OE_REVISION', True ) or "unknown"
176
177 # there is a bug with tipple quoted strings
178 # i will work around but will fix the original
179 # bug as well
180 output = []
181 output.append("== Tinderbox Info" )
182 output.append("Time: %(time)s" )
183 output.append("OS: %(ops)s" )
184 output.append("%(version)s" )
185 output.append("Compiler: gcc" )
186 output.append("Tinderbox Client: 0.1" )
187 output.append("Tinderbox Client Last Modified: yesterday" )
188 output.append("Tinderbox Protocol: 0.1" )
189 output.append("URL: %(url)s" )
190 output.append("Tree: %(tree)s" )
191 output.append("Config:" )
192 output.append("branch = '%(branch)s'" )
193 output.append("TARGET_ARCH = '%(tarch)s'" )
194 output.append("TARGET_FPU = '%(fpu)s'" )
195 output.append("SRCDATE = '%(srcdate)s'" )
196 output.append("MACHINE = '%(machine)s'" )
197 output.append("DISTRO = '%(distro)s'" )
198 output.append("BBFILES = '%(bbfiles)s'" )
199 output.append("OEREV = '%(oerev)s'" )
200 output.append("== End Tinderbox Client Info" )
201
202 # now create the real output
203 return "\n".join(output) % vars()
204
205
206def tinder_print_env():
207 """
208 Print the environment variables of this build
209 """
210 time_start = tinder_time_string()
211 time_end = tinder_time_string()
212
213 # build the environment
214 env = ""
215 for var in os.environ:
216 env += "%s=%s\n" % (var, os.environ[var])
217
218 output = []
219 output.append( "---> TINDERBOX RUNNING env %(time_start)s" )
220 output.append( env )
221 output.append( "<--- TINDERBOX FINISHED (SUCCESS) %(time_end)s" )
222
223 return "\n".join(output) % vars()
224
225def tinder_tinder_start(d, event):
226 """
227 PRINT the configuration of this build
228 """
229
230 time_start = tinder_time_string()
231 config = tinder_print_info(d)
232 #env = tinder_print_env()
233 time_end = tinder_time_string()
234 packages = " ".join( event.getPkgs() )
235
236 output = []
237 output.append( "---> TINDERBOX PRINTING CONFIGURATION %(time_start)s" )
238 output.append( config )
239 #output.append( env )
240 output.append( "<--- TINDERBOX FINISHED PRINTING CONFIGURATION %(time_end)s" )
241 output.append( "---> TINDERBOX BUILDING '%(packages)s'" )
242 output.append( "<--- TINDERBOX STARTING BUILD NOW" )
243
244 output.append( "" )
245
246 return "\n".join(output) % vars()
247
248def tinder_do_tinder_report(event):
249 """
250 Report to the tinderbox:
251 On the BuildStart we will inform the box directly
252 On the other events we will write to the TINDER_LOG and
253 when the Task is finished we will send the report.
254
255 The above is not yet fully implemented. Currently we send
256 information immediately. The caching/queuing needs to be
257 implemented. Also sending more or less information is not
258 implemented yet.
259
260 We have two temporary files stored in the TMP directory. One file
261 contains the assigned machine id for the tinderclient. This id gets
262 assigned when we connect the box and start the build process the second
263 file is used to workaround an EventHandler limitation. If BitBake is ran
264 with the continue option we want the Build to fail even if we get the
265 BuildCompleted Event. In this case we have to look up the status and
266 send it instead of 100/success.
267 """
268 import glob
269
270 # variables
271 name = bb.event.getName(event)
272 log = ""
273 status = 1
274 # Check what we need to do Build* shows we start or are done
275 if name == "BuildStarted":
276 tinder_build_start(event.data)
277 log = tinder_tinder_start(event.data,event)
278
279 try:
280 # truncate the tinder log file
281 f = file(event.data.getVar('TINDER_LOG', True), 'w')
282 f.write("")
283 f.close()
284 except:
285 pass
286
287 try:
288 # write a status to the file. This is needed for the -k option
289 # of BitBake
290 g = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
291 g.write("")
292 g.close()
293 except IOError:
294 pass
295
296 # Append the Task-Log (compile,configure...) to the log file
297 # we will send to the server
298 if name == "TaskSucceeded" or name == "TaskFailed":
299 log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
300
301 if len(log_file) != 0:
302 to_file = event.data.getVar('TINDER_LOG', True)
303 log += "".join(open(log_file[0], 'r').readlines())
304
305 # set the right 'HEADER'/Summary for the TinderBox
306 if name == "TaskStarted":
307 log += "---> TINDERBOX Task %s started\n" % event.task
308 elif name == "TaskSucceeded":
309 log += "<--- TINDERBOX Task %s done (SUCCESS)\n" % event.task
310 elif name == "TaskFailed":
311 log += "<--- TINDERBOX Task %s failed (FAILURE)\n" % event.task
312 elif name == "PkgStarted":
313 log += "---> TINDERBOX Package %s started\n" % event.data.getVar('PF', True)
314 elif name == "PkgSucceeded":
315 log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % event.data.getVar('PF', True)
316 elif name == "PkgFailed":
317 if not event.data.getVar('TINDER_AUTOBUILD', True) == "0":
318 build.exec_task('do_clean', event.data)
319 log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % event.data.getVar('PF', True)
320 status = 200
321 # remember the failure for the -k case
322 h = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
323 h.write("200")
324 elif name == "BuildCompleted":
325 log += "Build Completed\n"
326 status = 100
327 # Check if we have a old status...
328 try:
329 h = file(event.data.getVar('TMPDIR',True)+'/tinder-status', 'r')
330 status = int(h.read())
331 except:
332 pass
333
334 elif name == "MultipleProviders":
335 log += "---> TINDERBOX Multiple Providers\n"
336 log += "multiple providers are available (%s);\n" % ", ".join(event.getCandidates())
337 log += "consider defining PREFERRED_PROVIDER_%s\n" % event.getItem()
338 log += "is runtime: %d\n" % event.isRuntime()
339 log += "<--- TINDERBOX Multiple Providers\n"
340 elif name == "NoProvider":
341 log += "Error: No Provider for: %s\n" % event.getItem()
342 log += "Error:Was Runtime: %d\n" % event.isRuntime()
343 status = 200
344 # remember the failure for the -k case
345 h = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
346 h.write("200")
347
348 # now post the log
349 if len(log) == 0:
350 return
351
352 # for now we will use the http post method as it is the only one
353 log_post_method = tinder_send_http
354 log_post_method(event.data, status, log)
355
356
357# we want to be an event handler
358addhandler tinderclient_eventhandler
359python tinderclient_eventhandler() {
360 if e.data is None or bb.event.getName(e) == "MsgNote":
361 return
362
363 do_tinder_report = e.data.getVar('TINDER_REPORT', True)
364 if do_tinder_report and do_tinder_report == "1":
365 tinder_do_tinder_report(e)
366
367 return
368}
diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass
new file mode 100644
index 0000000000..4244b2ca7f
--- /dev/null
+++ b/meta/classes/toaster.bbclass
@@ -0,0 +1,331 @@
1#
2# Toaster helper class
3#
4# Copyright (C) 2013 Intel Corporation
5#
6# Released under the MIT license (see COPYING.MIT)
7#
8# This bbclass is designed to extract data used by OE-Core during the build process,
9# for recording in the Toaster system.
10# The data access is synchronous, preserving the build data integrity across
11# different builds.
12#
13# The data is transferred through the event system, using the MetadataEvent objects.
14#
15# The model is to enable the datadump functions as postfuncs, and have the dump
16# executed after the real taskfunc has been executed. This prevents task signature changing
17# is toaster is enabled or not. Build performance is not affected if Toaster is not enabled.
18#
19# To enable, use INHERIT in local.conf:
20#
21# INHERIT += "toaster"
22#
23#
24#
25#
26
27# Find and dump layer info when we got the layers parsed
28
29
30
31python toaster_layerinfo_dumpdata() {
32 import subprocess
33
34 def _get_git_branch(layer_path):
35 branch = subprocess.Popen("git symbolic-ref HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0]
36 branch = branch.replace('refs/heads/', '').rstrip()
37 return branch
38
39 def _get_git_revision(layer_path):
40 revision = subprocess.Popen("git rev-parse HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0].rstrip()
41 return revision
42
43 def _get_url_map_name(layer_name):
44 """ Some layers have a different name on openembedded.org site,
45 this method returns the correct name to use in the URL
46 """
47
48 url_name = layer_name
49 url_mapping = {'meta': 'openembedded-core'}
50
51 for key in url_mapping.keys():
52 if key == layer_name:
53 url_name = url_mapping[key]
54
55 return url_name
56
57 def _get_layer_version_information(layer_path):
58
59 layer_version_info = {}
60 layer_version_info['branch'] = _get_git_branch(layer_path)
61 layer_version_info['commit'] = _get_git_revision(layer_path)
62 layer_version_info['priority'] = 0
63
64 return layer_version_info
65
66
67 def _get_layer_dict(layer_path):
68
69 layer_info = {}
70 layer_name = layer_path.split('/')[-1]
71 layer_url = 'http://layers.openembedded.org/layerindex/layer/{layer}/'
72 layer_url_name = _get_url_map_name(layer_name)
73
74 layer_info['name'] = layer_name
75 layer_info['local_path'] = layer_path
76 layer_info['layer_index_url'] = layer_url.format(layer=layer_url_name)
77 layer_info['version'] = _get_layer_version_information(layer_path)
78
79 return layer_info
80
81
82 bblayers = e.data.getVar("BBLAYERS", True)
83
84 llayerinfo = {}
85
86 for layer in { l for l in bblayers.strip().split(" ") if len(l) }:
87 llayerinfo[layer] = _get_layer_dict(layer)
88
89
90 bb.event.fire(bb.event.MetadataEvent("LayerInfo", llayerinfo), e.data)
91}
92
93# Dump package file info data
94
95def _toaster_load_pkgdatafile(dirpath, filepath):
96 import json
97 import re
98 pkgdata = {}
99 with open(os.path.join(dirpath, filepath), "r") as fin:
100 for line in fin:
101 try:
102 kn, kv = line.strip().split(": ", 1)
103 m = re.match(r"^PKG_([^A-Z:]*)", kn)
104 if m:
105 pkgdata['OPKGN'] = m.group(1)
106 kn = "_".join([x for x in kn.split("_") if x.isupper()])
107 pkgdata[kn] = kv.strip()
108 if kn == 'FILES_INFO':
109 pkgdata[kn] = json.loads(kv)
110
111 except ValueError:
112 pass # ignore lines without valid key: value pairs
113 return pkgdata
114
115
116python toaster_package_dumpdata() {
117 """
118 Dumps the data created by emit_pkgdata
119 """
120 # replicate variables from the package.bbclass
121
122 packages = d.getVar('PACKAGES', True)
123 pkgdest = d.getVar('PKGDEST', True)
124
125 pkgdatadir = d.getVar('PKGDESTWORK', True)
126
127 # scan and send data for each package
128
129 lpkgdata = {}
130 for pkg in packages.split():
131
132 lpkgdata = _toaster_load_pkgdatafile(pkgdatadir + "/runtime/", pkg)
133
134 # Fire an event containing the pkg data
135 bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d)
136}
137
138# 2. Dump output image files information
139
140python toaster_image_dumpdata() {
141 """
142 Image filename for output images is not standardized.
143 image_types.bbclass will spell out IMAGE_CMD_xxx variables that actually
144 have hardcoded ways to create image file names in them.
145 So we look for files starting with the set name.
146 """
147
148 deploy_dir_image = d.getVar('DEPLOY_DIR_IMAGE', True);
149 image_name = d.getVar('IMAGE_NAME', True);
150
151 image_info_data = {}
152
153 for dirpath, dirnames, filenames in os.walk(deploy_dir_image):
154 for fn in filenames:
155 if fn.startswith(image_name):
156 image_output = os.path.join(dirpath, fn)
157 image_info_data[image_output] = os.stat(image_output).st_size
158
159 bb.event.fire(bb.event.MetadataEvent("ImageFileSize",image_info_data), d)
160}
161
162
163
164# collect list of buildstats files based on fired events; when the build completes, collect all stats and fire an event with collected data
165
166python toaster_collect_task_stats() {
167 import bb.build
168 import bb.event
169 import bb.data
170 import bb.utils
171 import os
172
173 if not e.data.getVar('BUILDSTATS_BASE', True):
174 return # if we don't have buildstats, we cannot collect stats
175
176 def _append_read_list(v):
177 lock = bb.utils.lockfile(e.data.expand("${TOPDIR}/toaster.lock"), False, True)
178
179 with open(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"), "a") as fout:
180 bn = get_bn(e)
181 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
182 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
183 fout.write("%s:%s:%s:%s\n" % (e.taskfile, e.taskname, os.path.join(taskdir, e.task), e.data.expand("${PN}")))
184
185 bb.utils.unlockfile(lock)
186
187 def _read_stats(filename):
188 cpu_usage = 0
189 disk_io = 0
190 startio = ''
191 endio = ''
192 pn = ''
193 taskname = ''
194 statinfo = {}
195
196 with open(filename, 'r') as task_bs:
197 for line in task_bs.readlines():
198 k,v = line.strip().split(": ", 1)
199 statinfo[k] = v
200
201 try:
202 cpu_usage = statinfo["CPU usage"]
203 endio = statinfo["EndTimeIO"]
204 startio = statinfo["StartTimeIO"]
205 except KeyError:
206 pass # we may have incomplete data here
207
208 if startio and endio:
209 disk_io = int(endio.strip('\n ')) - int(startio.strip('\n '))
210
211 if cpu_usage:
212 cpu_usage = float(cpu_usage.strip('% \n'))
213
214 return {'cpu_usage': cpu_usage, 'disk_io': disk_io}
215
216
217 if isinstance(e, (bb.build.TaskSucceeded, bb.build.TaskFailed)):
218 _append_read_list(e)
219 pass
220
221
222 if isinstance(e, bb.event.BuildCompleted) and os.path.exists(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist")):
223 events = []
224 with open(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"), "r") as fin:
225 for line in fin:
226 (taskfile, taskname, filename, recipename) = line.strip().split(":")
227 events.append((taskfile, taskname, _read_stats(filename), recipename))
228 bb.event.fire(bb.event.MetadataEvent("BuildStatsList", events), e.data)
229 os.unlink(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"))
230}
231
232# dump relevant build history data as an event when the build is completed
233
234python toaster_buildhistory_dump() {
235 import re
236 BUILDHISTORY_DIR = e.data.expand("${TOPDIR}/buildhistory")
237 BUILDHISTORY_DIR_IMAGE_BASE = e.data.expand("%s/images/${MACHINE_ARCH}/${TCLIBC}/"% BUILDHISTORY_DIR)
238 pkgdata_dir = e.data.getVar("PKGDATA_DIR", True)
239
240
241 # scan the build targets for this build
242 images = {}
243 allpkgs = {}
244 files = {}
245 for target in e._pkgs:
246 installed_img_path = e.data.expand(os.path.join(BUILDHISTORY_DIR_IMAGE_BASE, target))
247 if os.path.exists(installed_img_path):
248 images[target] = {}
249 files[target] = {}
250 files[target]['dirs'] = []
251 files[target]['syms'] = []
252 files[target]['files'] = []
253 with open("%s/installed-package-sizes.txt" % installed_img_path, "r") as fin:
254 for line in fin:
255 line = line.rstrip(";")
256 psize, px = line.split("\t")
257 punit, pname = px.split(" ")
258 # this size is "installed-size" as it measures how much space it takes on disk
259 images[target][pname.strip()] = {'size':int(psize)*1024, 'depends' : []}
260
261 with open("%s/depends.dot" % installed_img_path, "r") as fin:
262 p = re.compile(r' -> ')
263 dot = re.compile(r'.*style=dotted')
264 for line in fin:
265 line = line.rstrip(';')
266 linesplit = p.split(line)
267 if len(linesplit) == 2:
268 pname = linesplit[0].rstrip('"').strip('"')
269 dependsname = linesplit[1].split(" ")[0].strip().strip(";").strip('"').rstrip('"')
270 deptype = "depends"
271 if dot.match(line):
272 deptype = "recommends"
273 if not pname in images[target]:
274 images[target][pname] = {'size': 0, 'depends' : []}
275 if not dependsname in images[target]:
276 images[target][dependsname] = {'size': 0, 'depends' : []}
277 images[target][pname]['depends'].append((dependsname, deptype))
278
279 with open("%s/files-in-image.txt" % installed_img_path, "r") as fin:
280 for line in fin:
281 lc = [ x for x in line.strip().split(" ") if len(x) > 0 ]
282 if lc[0].startswith("l"):
283 files[target]['syms'].append(lc)
284 elif lc[0].startswith("d"):
285 files[target]['dirs'].append(lc)
286 else:
287 files[target]['files'].append(lc)
288
289 for pname in images[target]:
290 if not pname in allpkgs:
291 try:
292 pkgdata = _toaster_load_pkgdatafile("%s/runtime-reverse/" % pkgdata_dir, pname)
293 except IOError as err:
294 if err.errno == 2:
295 # We expect this e.g. for RRECOMMENDS that are unsatisfied at runtime
296 continue
297 else:
298 raise
299 allpkgs[pname] = pkgdata
300
301
302 data = { 'pkgdata' : allpkgs, 'imgdata' : images, 'filedata' : files }
303
304 bb.event.fire(bb.event.MetadataEvent("ImagePkgList", data), e.data)
305
306}
307
308# dump information related to license manifest path
309
310python toaster_licensemanifest_dump() {
311 deploy_dir = d.getVar('DEPLOY_DIR', True);
312 image_name = d.getVar('IMAGE_NAME', True);
313
314 data = { 'deploy_dir' : deploy_dir, 'image_name' : image_name }
315
316 bb.event.fire(bb.event.MetadataEvent("LicenseManifestPath", data), d)
317}
318
319# set event handlers
320addhandler toaster_layerinfo_dumpdata
321toaster_layerinfo_dumpdata[eventmask] = "bb.event.TreeDataPreparationCompleted"
322
323addhandler toaster_collect_task_stats
324toaster_collect_task_stats[eventmask] = "bb.event.BuildCompleted bb.build.TaskSucceeded bb.build.TaskFailed"
325
326addhandler toaster_buildhistory_dump
327toaster_buildhistory_dump[eventmask] = "bb.event.BuildCompleted"
328do_package[postfuncs] += "toaster_package_dumpdata "
329
330do_rootfs[postfuncs] += "toaster_image_dumpdata "
331do_rootfs[postfuncs] += "toaster_licensemanifest_dump "
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
new file mode 100644
index 0000000000..b9f2aea930
--- /dev/null
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -0,0 +1,116 @@
1inherit siteinfo kernel-arch
2
3# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
4# doesn't always match our expectations... but we default to the stock value
5REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
6
7# This function creates an environment-setup-script for use in a deployable SDK
8toolchain_create_sdk_env_script () {
9 # Create environment setup script
10 libdir=${4:-${libdir}}
11 sysroot=${3:-${SDKTARGETSYSROOT}}
12 multimach_target_sys=${2:-${REAL_MULTIMACH_TARGET_SYS}}
13 script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-$multimach_target_sys}
14 rm -f $script
15 touch $script
16 echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
17 echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${bindir_nativesdk}/${TARGET_SYS}:$PATH' >> $script
18 echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
19 echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig' >> $script
20 echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
21 echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script
22 echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
23 echo 'export OECORE_ACLOCAL_OPTS="-I ${SDKPATHNATIVE}/usr/share/aclocal"' >> $script
24 echo 'export PYTHONHOME=${SDKPATHNATIVE}${prefix_nativesdk}' >> $script
25
26 toolchain_shared_env_script
27}
28
29# This function creates an environment-setup-script in the TMPDIR which enables
30# a OE-core IDE to integrate with the build tree
31toolchain_create_tree_env_script () {
32 script=${TMPDIR}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
33 rm -f $script
34 touch $script
35 echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${PATH}' >> $script
36 echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
37 echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
38 echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
39 echo 'export SDKTARGETSYSROOT=${STAGING_DIR_TARGET}' >> $script
40 echo 'export OECORE_NATIVE_SYSROOT="${STAGING_DIR_NATIVE}"' >> $script
41 echo 'export OECORE_TARGET_SYSROOT="${STAGING_DIR_TARGET}"' >> $script
42 echo 'export OECORE_ACLOCAL_OPTS="-I ${STAGING_DIR_NATIVE}/usr/share/aclocal"' >> $script
43
44 toolchain_shared_env_script
45}
46
47toolchain_shared_env_script () {
48 echo 'export CC="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
49 echo 'export CXX="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
50 echo 'export CPP="${TARGET_PREFIX}gcc -E ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
51 echo 'export AS="${TARGET_PREFIX}as ${TARGET_AS_ARCH}"' >> $script
52 echo 'export LD="${TARGET_PREFIX}ld ${TARGET_LD_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
53 echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
54 echo 'export STRIP=${TARGET_PREFIX}strip' >> $script
55 echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
56 echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
57 echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
58 echo 'export AR=${TARGET_PREFIX}ar' >> $script
59 echo 'export NM=${TARGET_PREFIX}nm' >> $script
60 echo 'export M4=m4' >> $script
61 echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
62 echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=$SDKTARGETSYSROOT"' >> $script
63 echo 'export CFLAGS="${TARGET_CFLAGS}"' >> $script
64 echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script
65 echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script
66 echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script
67 echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
68 echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
69 echo 'export ARCH=${ARCH}' >> $script
70 echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script
71}
72
73#we get the cached site config in the runtime
74TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d, True)}"
75TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
76TOOLCHAIN_NEED_CONFIGSITE_CACHE = "${TCLIBC} ncurses"
77
78#This function create a site config file
79toolchain_create_sdk_siteconfig () {
80 local siteconfig=$1
81
82 rm -f $siteconfig
83 touch $siteconfig
84
85 for sitefile in ${TOOLCHAIN_CONFIGSITE_NOCACHE} ; do
86 cat $sitefile >> $siteconfig
87 done
88
89 #get cached site config
90 for sitefile in ${TOOLCHAIN_NEED_CONFIGSITE_CACHE}; do
91 if [ -r ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config ]; then
92 cat ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config >> $siteconfig
93 fi
94 done
95}
96# The immediate expansion above can result in unwanted path dependencies here
97toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE"
98
99#This function create a version information file
100toolchain_create_sdk_version () {
101 local versionfile=$1
102 rm -f $versionfile
103 touch $versionfile
104 echo 'Distro: ${DISTRO}' >> $versionfile
105 echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile
106 echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile
107 echo 'Timestamp: ${DATETIME}' >> $versionfile
108}
109toolchain_create_sdk_version[vardepsexclude] = "DATETIME"
110
111python __anonymous () {
112 deps = ""
113 for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE', True) or "").split():
114 deps += " %s:do_populate_sysroot" % dep
115 d.appendVarFlag('do_configure', 'depends', deps)
116}
diff --git a/meta/classes/typecheck.bbclass b/meta/classes/typecheck.bbclass
new file mode 100644
index 0000000000..72da932232
--- /dev/null
+++ b/meta/classes/typecheck.bbclass
@@ -0,0 +1,12 @@
1# Check types of bitbake configuration variables
2#
3# See oe.types for details.
4
5python check_types() {
6 import oe.types
7 for key in e.data.keys():
8 if e.data.getVarFlag(key, "type"):
9 oe.data.typed_value(key, e.data)
10}
11addhandler check_types
12check_types[eventmask] = "bb.event.ConfigParsed"
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
new file mode 100644
index 0000000000..8ac1b71bc2
--- /dev/null
+++ b/meta/classes/uboot-config.bbclass
@@ -0,0 +1,61 @@
1# Handle U-Boot config for a machine
2#
3# The format to specify it, in the machine, is:
4#
5# UBOOT_CONFIG ??= <default>
6# UBOOT_CONFIG[foo] = "config,images"
7#
8# or
9#
10# UBOOT_MACHINE = "config"
11#
12# Copyright 2013, 2014 (C) O.S. Systems Software LTDA.
13
14python () {
15 ubootmachine = d.getVar("UBOOT_MACHINE", True)
16 ubootconfigflags = d.getVarFlags('UBOOT_CONFIG')
17 # The "doc" varflag is special, we don't want to see it here
18 ubootconfigflags.pop('doc', None)
19
20 if not ubootmachine and not ubootconfigflags:
21 PN = d.getVar("PN", True)
22 FILE = os.path.basename(d.getVar("FILE", True))
23 bb.debug(1, "To build %s, see %s for instructions on \
24 setting up your machine config" % (PN, FILE))
25 raise bb.parse.SkipPackage("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE", True))
26
27 if ubootmachine and ubootconfigflags:
28 raise bb.parse.SkipPackage("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
29
30 if not ubootconfigflags:
31 return
32
33 ubootconfig = (d.getVar('UBOOT_CONFIG', True) or "").split()
34 if len(ubootconfig) > 1:
35 raise bb.parse.SkipPackage('You can only have a single default for UBOOT_CONFIG.')
36 elif len(ubootconfig) == 0:
37 raise bb.parse.SkipPackage('You must set a default in UBOOT_CONFIG.')
38 ubootconfig = ubootconfig[0]
39
40 for f, v in ubootconfigflags.items():
41 if f == 'defaultval':
42 continue
43
44 items = v.split(',')
45 if items[0] and len(items) > 2:
46 raise bb.parse.SkipPackage('Only config,images can be specified!')
47
48 if ubootconfig == f:
49 bb.debug(1, "Setting UBOOT_MACHINE to %s." % items[0])
50 d.setVar('UBOOT_MACHINE', items[0])
51
52 # IMAGE_FSTYPES appending
53 if len(items) > 1 and items[1]:
54 bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
55 d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
56
57 # Go out as we found a match!
58 break
59 else:
60 raise bb.parse.SkipPackage("UBOOT_CONFIG %s is not supported" % ubootconfig)
61}
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
new file mode 100644
index 0000000000..9f2c250d03
--- /dev/null
+++ b/meta/classes/update-alternatives.bbclass
@@ -0,0 +1,267 @@
1# This class is used to help the alternatives system which is useful when
2# multiple sources provide same command. You can use update-alternatives
3# command directly in your recipe, but in most cases this class simplifies
4# that job.
5#
6# To use this class a number of variables should be defined:
7#
8# List all of the alternatives needed by a package:
9# ALTERNATIVE_<pkg> = "name1 name2 name3 ..."
10#
11# i.e. ALTERNATIVE_busybox = "sh sed test bracket"
12#
13# The pathname of the link
14# ALTERNATIVE_LINK_NAME[name] = "target"
15#
16# This is the name of the binary once it's been installed onto the runtime.
17# This name is global to all split packages in this recipe, and should match
18# other recipes with the same functionality.
19# i.e. ALTERNATIVE_LINK_NAME[bracket] = "/usr/bin/["
20#
21# NOTE: If ALTERNATIVE_LINK_NAME is not defined, it defaults to ${bindir}/name
22#
23# The default link to create for all targets
24# ALTERNATIVE_TARGET = "target"
25#
26# This is useful in a multicall binary case
27# i.e. ALTERNATIVE_TARGET = "/bin/busybox"
28#
29# A non-default link to create for a target
30# ALTERNATIVE_TARGET[name] = "target"
31#
32# This is the name of the binary as it's been install by do_install
33# i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash"
34#
35# A package specific link for a target
36# ALTERNATIVE_TARGET_<pkg>[name] = "target"
37#
38# This is useful when a recipe provides multiple alternatives for the
39# same item.
40#
41# NOTE: If ALTERNATIVE_TARGET is not defined, it will inherit the value
42# from ALTERNATIVE_LINK_NAME.
43#
44# NOTE: If the ALTERNATIVE_LINK_NAME and ALTERNATIVE_TARGET are the same,
45# ALTERNATIVE_TARGET will have '.{BPN}' appended to it. If the file
46# referenced has not been renamed, it will also be renamed. (This avoids
47# the need to rename alternative files in the do_install step, but still
48# supports it if necessary for some reason.)
49#
50# The default priority for any alternatives
51# ALTERNATIVE_PRIORITY = "priority"
52#
53# i.e. default is ALTERNATIVE_PRIORITY = "10"
54#
55# The non-default priority for a specific target
56# ALTERNATIVE_PRIORITY[name] = "priority"
57#
58# The package priority for a specific target
59# ALTERNATIVE_PRIORITY_<pkg>[name] = "priority"
60
61ALTERNATIVE_PRIORITY = "10"
62
63# We need special processing for vardeps because it can not work on
64# modified flag values. So we agregate the flags into a new variable
65# and include that vairable in the set.
66UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
67
68def gen_updatealternativesvardeps(d):
69 pkgs = (d.getVar("PACKAGES", True) or "").split()
70 vars = (d.getVar("UPDALTVARS", True) or "").split()
71
72 # First compute them for non_pkg versions
73 for v in vars:
74 for flag in (d.getVarFlags(v) or {}):
75 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
76 continue
77 d.appendVar('%s_VARDEPS' % (v), ' %s:%s' % (flag, d.getVarFlag(v, flag, False)))
78
79 for p in pkgs:
80 for v in vars:
81 for flag in (d.getVarFlags("%s_%s" % (v,p)) or {}):
82 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
83 continue
84 d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
85
86def ua_extend_depends(d):
87 if not 'virtual/update-alternatives' in d.getVar('PROVIDES', True):
88 d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives')
89
90python __anonymous() {
91 # Update Alternatives only works on target packages...
92 if bb.data.inherits_class('native', d) or \
93 bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or \
94 bb.data.inherits_class('cross-canadian', d):
95 return
96
97 # compute special vardeps
98 gen_updatealternativesvardeps(d)
99
100 # extend the depends to include virtual/update-alternatives
101 ua_extend_depends(d)
102}
103
104def gen_updatealternativesvars(d):
105 ret = []
106 pkgs = (d.getVar("PACKAGES", True) or "").split()
107 vars = (d.getVar("UPDALTVARS", True) or "").split()
108
109 for v in vars:
110 ret.append(v + "_VARDEPS")
111
112 for p in pkgs:
113 for v in vars:
114 ret.append(v + "_" + p)
115 ret.append(v + "_VARDEPS_" + p)
116 return " ".join(ret)
117
118# Now the new stuff, we use a custom function to generate the right values
119populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}"
120
121# We need to do the rename after the image creation step, but before
122# the split and strip steps.. packagecopy seems to be the earliest reasonable
123# place.
124python perform_packagecopy_append () {
125 # Check for deprecated usage...
126 pn = d.getVar('BPN', True)
127 if d.getVar('ALTERNATIVE_LINKS', True) != None:
128 bb.fatal('%s: Use of ALTERNATIVE_LINKS/ALTERNATIVE_PATH/ALTERNATIVE_NAME is no longer supported, please convert to the updated syntax, see update-alternatives.bbclass for more info.' % pn)
129
130 # Do actual update alternatives processing
131 pkgdest = d.getVar('PKGD', True)
132 for pkg in (d.getVar('PACKAGES', True) or "").split():
133 # If the src == dest, we know we need to rename the dest by appending ${BPN}
134 link_rename = {}
135 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
136 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
137 if not alt_link:
138 alt_link = "%s/%s" % (d.getVar('bindir', True), alt_name)
139 d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link)
140
141 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
142 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
143 # Sometimes alt_target is specified as relative to the link name.
144 alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
145
146 # If the link and target are the same name, we need to rename the target.
147 if alt_link == alt_target:
148 src = '%s/%s' % (pkgdest, alt_target)
149 alt_target_rename = '%s.%s' % (alt_target, pn)
150 dest = '%s/%s' % (pkgdest, alt_target_rename)
151 if os.path.lexists(dest):
152 bb.note('%s: Already renamed: %s' % (pn, alt_target_rename))
153 elif os.path.lexists(src):
154 if os.path.islink(src):
155 # Delay rename of links
156 link_rename[alt_target] = alt_target_rename
157 else:
158 bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename))
159 os.rename(src, dest)
160 else:
161 bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename))
162 continue
163 d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, alt_target_rename)
164
165 # Process delayed link names
166 # Do these after other renames so we can correct broken links
167 for alt_target in link_rename:
168 src = '%s/%s' % (pkgdest, alt_target)
169 dest = '%s/%s' % (pkgdest, link_rename[alt_target])
170 link = os.readlink(src)
171 link_target = oe.path.realpath(src, pkgdest, True)
172
173 if os.path.lexists(link_target):
174 # Ok, the link_target exists, we can rename
175 bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, link_rename[alt_target]))
176 os.rename(src, dest)
177 else:
178 # Try to resolve the broken link to link.${BPN}
179 link_maybe = '%s.%s' % (os.readlink(src), pn)
180 if os.path.lexists(os.path.join(os.path.dirname(src), link_maybe)):
181 # Ok, the renamed link target exists.. create a new link, and remove the original
182 bb.note('%s: Creating new link %s -> %s' % (pn, link_rename[alt_target], link_maybe))
183 os.symlink(link_maybe, dest)
184 os.unlink(src)
185 else:
186 bb.warn('%s: Unable to resolve dangling symlink: %s' % (pn, alt_target))
187}
188
189PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives "
190
191python populate_packages_updatealternatives () {
192 pn = d.getVar('BPN', True)
193
194 # Do actual update alternatives processing
195 pkgdest = d.getVar('PKGD', True)
196 for pkg in (d.getVar('PACKAGES', True) or "").split():
197 # Create post install/removal scripts
198 alt_setup_links = ""
199 alt_remove_links = ""
200 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
201 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
202 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
203 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
204 # Sometimes alt_target is specified as relative to the link name.
205 alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
206
207 alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name, True)
208 alt_priority = alt_priority or d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg, True) or d.getVar('ALTERNATIVE_PRIORITY', True)
209
210 # This shouldn't trigger, as it should have been resolved earlier!
211 if alt_link == alt_target:
212 bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
213 alt_target = '%s.%s' % (alt_target, pn)
214
215 if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
216 bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
217 continue
218
219 # Default to generate shell script.. eventually we may want to change this...
220 alt_target = os.path.normpath(alt_target)
221
222 alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
223 alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target)
224
225 if alt_setup_links:
226 # RDEPENDS setup
227 provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives', True)
228 if provider:
229 #bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
230 d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX') + provider)
231
232 bb.note('adding update-alternatives calls to postinst/postrm for %s' % pkg)
233 bb.note('%s' % alt_setup_links)
234 postinst = d.getVar('pkg_postinst_%s' % pkg, True) or '#!/bin/sh\n'
235 postinst += alt_setup_links
236 d.setVar('pkg_postinst_%s' % pkg, postinst)
237
238 bb.note('%s' % alt_remove_links)
239 postrm = d.getVar('pkg_postrm_%s' % pkg, True) or '#!/bin/sh\n'
240 postrm += alt_remove_links
241 d.setVar('pkg_postrm_%s' % pkg, postrm)
242}
243
244python package_do_filedeps_append () {
245 pn = d.getVar('BPN', True)
246 pkgdest = d.getVar('PKGDEST', True)
247
248 for pkg in packages.split():
249 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
250 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
251 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
252 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
253
254 if alt_link == alt_target:
255 bb.warn('alt_link == alt_target: %s == %s' % (alt_link, alt_target))
256 alt_target = '%s.%s' % (alt_target, pn)
257
258 if not os.path.lexists('%s/%s/%s' % (pkgdest, pkg, alt_target)):
259 continue
260
261 # Add file provide
262 trans_target = oe.package.file_translate(alt_target)
263 d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link)
264 if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg, True) or ""):
265 d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target)
266}
267
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
new file mode 100644
index 0000000000..0ac2af7d97
--- /dev/null
+++ b/meta/classes/update-rc.d.bbclass
@@ -0,0 +1,130 @@
1UPDATERCPN ?= "${PN}"
2
3DEPENDS_append = " update-rc.d-native"
4UPDATERCD = "update-rc.d"
5UPDATERCD_class-cross = ""
6UPDATERCD_class-native = ""
7UPDATERCD_class-nativesdk = ""
8
9RRECOMMENDS_${UPDATERCPN}_append = " ${UPDATERCD}"
10
11INITSCRIPT_PARAMS ?= "defaults"
12
13INIT_D_DIR = "${sysconfdir}/init.d"
14
15updatercd_preinst() {
16if [ -z "$D" -a -f "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
17 ${INIT_D_DIR}/${INITSCRIPT_NAME} stop
18fi
19if type update-rc.d >/dev/null 2>/dev/null; then
20 if [ -n "$D" ]; then
21 OPT="-f -r $D"
22 else
23 OPT="-f"
24 fi
25 update-rc.d $OPT ${INITSCRIPT_NAME} remove
26fi
27}
28
29updatercd_postinst() {
30if type update-rc.d >/dev/null 2>/dev/null; then
31 if [ -n "$D" ]; then
32 OPT="-r $D"
33 else
34 OPT="-s"
35 fi
36 update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
37fi
38}
39
40updatercd_prerm() {
41if [ -z "$D" ]; then
42 ${INIT_D_DIR}/${INITSCRIPT_NAME} stop
43fi
44}
45
46updatercd_postrm() {
47if type update-rc.d >/dev/null 2>/dev/null; then
48 if [ -n "$D" ]; then
49 OPT="-r $D"
50 else
51 OPT=""
52 fi
53 update-rc.d $OPT ${INITSCRIPT_NAME} remove
54fi
55}
56
57
58def update_rc_after_parse(d):
59 if d.getVar('INITSCRIPT_PACKAGES') == None:
60 if d.getVar('INITSCRIPT_NAME') == None:
61 raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE'))
62 if d.getVar('INITSCRIPT_PARAMS') == None:
63 raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE'))
64
65python __anonymous() {
66 update_rc_after_parse(d)
67}
68
69PACKAGESPLITFUNCS_prepend = "populate_packages_updatercd "
70
71populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_preinst updatercd_postinst"
72
73python populate_packages_updatercd () {
74 def update_rcd_auto_depend(pkg):
75 import subprocess
76 import os
77 path = d.expand("${D}${INIT_D_DIR}/${INITSCRIPT_NAME}")
78 if not os.path.exists(path):
79 return
80 statement = "grep -q -w '/etc/init.d/functions' %s" % path
81 if subprocess.call(statement, shell=True) == 0:
82 d.appendVar('RDEPENDS_' + pkg, ' initscripts-functions')
83
84 def update_rcd_package(pkg):
85 bb.debug(1, 'adding update-rc.d calls to preinst/postinst/prerm/postrm for %s' % pkg)
86
87 localdata = bb.data.createCopy(d)
88 overrides = localdata.getVar("OVERRIDES", True)
89 localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides))
90 bb.data.update_data(localdata)
91
92 update_rcd_auto_depend(pkg)
93
94 preinst = d.getVar('pkg_preinst_%s' % pkg, True)
95 if not preinst:
96 preinst = '#!/bin/sh\n'
97 preinst += localdata.getVar('updatercd_preinst', True)
98 d.setVar('pkg_preinst_%s' % pkg, preinst)
99
100 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
101 if not postinst:
102 postinst = '#!/bin/sh\n'
103 postinst += localdata.getVar('updatercd_postinst', True)
104 d.setVar('pkg_postinst_%s' % pkg, postinst)
105
106 prerm = d.getVar('pkg_prerm_%s' % pkg, True)
107 if not prerm:
108 prerm = '#!/bin/sh\n'
109 prerm += localdata.getVar('updatercd_prerm', True)
110 d.setVar('pkg_prerm_%s' % pkg, prerm)
111
112 postrm = d.getVar('pkg_postrm_%s' % pkg, True)
113 if not postrm:
114 postrm = '#!/bin/sh\n'
115 postrm += localdata.getVar('updatercd_postrm', True)
116 d.setVar('pkg_postrm_%s' % pkg, postrm)
117
118 # Check that this class isn't being inhibited (generally, by
119 # systemd.bbclass) before doing any work.
120 if oe.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \
121 not d.getVar("INHIBIT_UPDATERCD_BBCLASS", True):
122 pkgs = d.getVar('INITSCRIPT_PACKAGES', True)
123 if pkgs == None:
124 pkgs = d.getVar('UPDATERCPN', True)
125 packages = (d.getVar('PACKAGES', True) or "").split()
126 if not pkgs in packages and packages != []:
127 pkgs = packages[0]
128 for pkg in pkgs.split():
129 update_rcd_package(pkg)
130}
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
new file mode 100644
index 0000000000..a89cb10a4a
--- /dev/null
+++ b/meta/classes/useradd-staticids.bbclass
@@ -0,0 +1,272 @@
1# In order to support a deterministic set of 'dynamic' users/groups,
2# we need a function to reformat the params based on a static file
3def update_useradd_static_config(d):
4 import argparse
5 import re
6
7 class myArgumentParser( argparse.ArgumentParser ):
8 def _print_message(self, message, file=None):
9 bb.warn("%s - %s: %s" % (d.getVar('PN', True), pkg, message))
10
11 # This should never be called...
12 def exit(self, status=0, message=None):
13 message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN', True), pkg))
14 error(message)
15
16 def error(self, message):
17 raise bb.build.FuncFailed(message)
18
19 # We parse and rewrite the useradd components
20 def rewrite_useradd(params):
21 # The following comes from --help on useradd from shadow
22 parser = myArgumentParser(prog='useradd')
23 parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
24 parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account")
25 parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account")
26 parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true")
27 parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account")
28 parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account")
29 parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account")
30 parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account")
31 parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
32 parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
33 parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
34 parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_true")
35 parser.add_argument("-M", "--no-create-home", help="do not create the user's home directory", action="store_true")
36 parser.add_argument("-N", "--no-user-group", help="do not create a group with the same name as the user", action="store_true")
37 parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
38 parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
39 parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
40 parser.add_argument("-r", "--system", help="create a system account", action="store_true")
41 parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
42 parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
43 parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_true")
44 parser.add_argument("LOGIN", help="Login name of the new user")
45
46 # Return a list of configuration files based on either the default
47 # files/passwd or the contents of USERADD_UID_TABLES
48 # paths are resulved via BBPATH
49 def get_passwd_list(d):
50 str = ""
51 bbpath = d.getVar('BBPATH', True)
52 passwd_tables = d.getVar('USERADD_UID_TABLES', True)
53 if not passwd_tables:
54 passwd_tables = 'files/passwd'
55 for conf_file in passwd_tables.split():
56 str += " %s" % bb.utils.which(bbpath, conf_file)
57 return str
58
59 newparams = []
60 for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
61 param=param.strip()
62 try:
63 uaargs = parser.parse_args(re.split('''[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
64 except:
65 raise bb.build.FuncFailed("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
66
67 # files/passwd or the contents of USERADD_UID_TABLES
68 # Use the standard passwd layout:
69 # username:password:user_id:group_id:comment:home_directory:login_shell
70 # (we want to process in reverse order, as 'last found' in the list wins)
71 #
72 # If a field is left blank, the original value will be used. The 'username'
73 # field is required.
74 #
75 # Note: we ignore the password field, as including even the hashed password
76 # in the useradd command may introduce a security hole. It's assumed that
77 # all new users get the default ('*' which prevents login) until the user is
78 # specifically configured by the system admin.
79 for conf in get_passwd_list(d).split()[::-1]:
80 if os.path.exists(conf):
81 f = open(conf, "r")
82 for line in f:
83 if line.startswith('#'):
84 continue
85 field = line.rstrip().split(":")
86 if field[0] == uaargs.LOGIN:
87 if uaargs.uid and field[2] and (uaargs.uid != field[2]):
88 bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.uid, field[2]))
89 uaargs.uid = [field[2], uaargs.uid][not field[2]]
90
91 # Determine the possible groupname
92 # Unless the group name (or gid) is specified, we assume that the LOGIN is the groupname
93 #
94 # By default the system has creation of the matching groups enabled
95 # So if the implicit username-group creation is on, then the implicit groupname (LOGIN)
96 # is used, and we disable the user_group option.
97 #
98 uaargs.groupname = [uaargs.gid, uaargs.LOGIN][not uaargs.gid or uaargs.user_group]
99 uaargs.groupid = [uaargs.gid, uaargs.groupname][not uaargs.gid]
100 uaargs.groupid = [field[3], uaargs.groupid][not field[3]]
101
102 if not uaargs.gid or uaargs.gid != uaargs.groupid:
103 if (uaargs.groupid and uaargs.groupid.isdigit()) and (uaargs.groupname and uaargs.groupname.isdigit()) and (uaargs.groupid != uaargs.groupname):
104 # We want to add a group, but we don't know it's name... so we can't add the group...
105 # We have to assume the group has previously been added or we'll fail on the adduser...
106 # Note: specifying the actual gid is very rare in OE, usually the group name is specified.
107 bb.warn("%s: Changing gid for login %s from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.groupname, uaargs.gid))
108 elif (uaargs.groupid and not uaargs.groupid.isdigit()) and uaargs.groupid == uaargs.groupname:
109 # We don't have a number, so we have to add a name
110 bb.debug(1, "Adding group %s!" % (uaargs.groupname))
111 uaargs.gid = uaargs.groupid
112 uaargs.user_group = False
113 groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg, True)
114 newgroup = "%s %s" % (['', ' --system'][uaargs.system], uaargs.groupname)
115 if groupadd:
116 d.setVar("GROUPADD_PARAM_%s" % pkg, "%s ; %s" % (groupadd, newgroup))
117 else:
118 d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
119 elif uaargs.groupname and (uaargs.groupid and uaargs.groupid.isdigit()):
120 # We have a group name and a group number to assign it to
121 bb.debug(1, "Adding group %s gid (%s)!" % (uaargs.groupname, uaargs.groupid))
122 uaargs.gid = uaargs.groupid
123 uaargs.user_group = False
124 groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg, True)
125 newgroup = "-g %s %s" % (uaargs.gid, uaargs.groupname)
126 if groupadd:
127 d.setVar("GROUPADD_PARAM_%s" % pkg, "%s ; %s" % (groupadd, newgroup))
128 else:
129 d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
130
131 uaargs.comment = ["'%s'" % field[4], uaargs.comment][not field[4]]
132 uaargs.home_dir = [field[5], uaargs.home_dir][not field[5]]
133 uaargs.shell = [field[6], uaargs.shell][not field[6]]
134 break
135
136 # Should be an error if a specific option is set...
137 if d.getVar('USERADD_ERROR_DYNAMIC', True) == '1' and not ((uaargs.uid and uaargs.uid.isdigit()) and uaargs.gid):
138 #bb.error("Skipping recipe %s, package %s which adds username %s does not have a static uid defined." % (d.getVar('PN', True), pkg, uaargs.LOGIN))
139 raise bb.build.FuncFailed("%s - %s: Username %s does not have a static uid defined." % (d.getVar('PN', True), pkg, uaargs.LOGIN))
140
141 # Reconstruct the args...
142 newparam = ['', ' --defaults'][uaargs.defaults]
143 newparam += ['', ' --base-dir %s' % uaargs.base_dir][uaargs.base_dir != None]
144 newparam += ['', ' --comment %s' % uaargs.comment][uaargs.comment != None]
145 newparam += ['', ' --home-dir %s' % uaargs.home_dir][uaargs.home_dir != None]
146 newparam += ['', ' --expiredata %s' % uaargs.expiredate][uaargs.expiredate != None]
147 newparam += ['', ' --inactive %s' % uaargs.inactive][uaargs.inactive != None]
148 newparam += ['', ' --gid %s' % uaargs.gid][uaargs.gid != None]
149 newparam += ['', ' --groups %s' % uaargs.groups][uaargs.groups != None]
150 newparam += ['', ' --skel %s' % uaargs.skel][uaargs.skel != None]
151 newparam += ['', ' --key %s' % uaargs.key][uaargs.key != None]
152 newparam += ['', ' --no-log-init'][uaargs.no_log_init]
153 newparam += ['', ' --create-home'][uaargs.create_home]
154 newparam += ['', ' --no-create-home'][uaargs.no_create_home]
155 newparam += ['', ' --no-user-group'][uaargs.no_user_group]
156 newparam += ['', ' --non-unique'][uaargs.non_unique]
157 newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
158 newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
159 newparam += ['', ' --system'][uaargs.system]
160 newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
161 newparam += ['', ' --uid %s' % uaargs.uid][uaargs.uid != None]
162 newparam += ['', ' --user-group'][uaargs.user_group]
163 newparam += ' %s' % uaargs.LOGIN
164
165 newparams.append(newparam)
166
167 return " ;".join(newparams).strip()
168
169 # We parse and rewrite the groupadd components
170 def rewrite_groupadd(params):
171 # The following comes from --help on groupadd from shadow
172 parser = myArgumentParser(prog='groupadd')
173 parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
174 parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group")
175 parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
176 parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
177 parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
178 parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
179 parser.add_argument("-r", "--system", help="create a system account", action="store_true")
180 parser.add_argument("GROUP", help="Group name of the new group")
181
182 # Return a list of configuration files based on either the default
183 # files/group or the contents of USERADD_GID_TABLES
184 # paths are resulved via BBPATH
185 def get_group_list(d):
186 str = ""
187 bbpath = d.getVar('BBPATH', True)
188 group_tables = d.getVar('USERADD_GID_TABLES', True)
189 if not group_tables:
190 group_tables = 'files/group'
191 for conf_file in group_tables.split():
192 str += " %s" % bb.utils.which(bbpath, conf_file)
193 return str
194
195 newparams = []
196 for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
197 param=param.strip()
198 try:
199 # If we're processing multiple lines, we could have left over values here...
200 gaargs = parser.parse_args(re.split('''[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
201 except:
202 raise bb.build.FuncFailed("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
203
204 # Need to iterate over layers and open the right file(s)
205 # Use the standard group layout:
206 # groupname:password:group_id:group_members
207 #
208 # If a field is left blank, the original value will be used. The 'groupname' field
209 # is required.
210 #
211 # Note: similar to the passwd file, the 'password' filed is ignored
212 # Note: group_members is ignored, group members must be configured with the GROUPMEMS_PARAM
213 for conf in get_group_list(d).split()[::-1]:
214 if os.path.exists(conf):
215 f = open(conf, "r")
216 for line in f:
217 if line.startswith('#'):
218 continue
219 field = line.rstrip().split(":")
220 if field[0] == gaargs.GROUP and field[2]:
221 if gaargs.gid and (gaargs.gid != field[2]):
222 bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), gaargs.GROUP, gaargs.gid, field[2]))
223 gaargs.gid = field[2]
224 break
225
226 if d.getVar('USERADD_ERROR_DYNAMIC', True) == '1' and not (gaargs.gid and gaargs.gid.isdigit()):
227 #bb.error("Skipping recipe %s, package %s which adds groupname %s does not have a static gid defined." % (d.getVar('PN', True), pkg, gaargs.GROUP))
228 raise bb.build.FuncFailed("%s - %s: Groupname %s does not have a static gid defined." % (d.getVar('PN', True), pkg, gaargs.GROUP))
229
230 # Reconstruct the args...
231 newparam = ['', ' --force'][gaargs.force]
232 newparam += ['', ' --gid %s' % gaargs.gid][gaargs.gid != None]
233 newparam += ['', ' --key %s' % gaargs.key][gaargs.key != None]
234 newparam += ['', ' --non-unique'][gaargs.non_unique]
235 newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
236 newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None]
237 newparam += ['', ' --system'][gaargs.system]
238 newparam += ' %s' % gaargs.GROUP
239
240 newparams.append(newparam)
241
242 return " ;".join(newparams).strip()
243
244 # Load and process the users and groups, rewriting the adduser/addgroup params
245 useradd_packages = d.getVar('USERADD_PACKAGES', True)
246
247 for pkg in useradd_packages.split():
248 # Groupmems doesn't have anything we might want to change, so simply validating
249 # is a bit of a waste -- only process useradd/groupadd
250 useradd_param = d.getVar('USERADD_PARAM_%s' % pkg, True)
251 if useradd_param:
252 #bb.warn("Before: 'USERADD_PARAM_%s' - '%s'" % (pkg, useradd_param))
253 d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param))
254 #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg, True)))
255
256 groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg, True)
257 if groupadd_param:
258 #bb.warn("Before: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, groupadd_param))
259 d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param))
260 #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg, True)))
261
262
263
264python __anonymous() {
265 if not bb.data.inherits_class('nativesdk', d) \
266 and not bb.data.inherits_class('native', d):
267 try:
268 update_useradd_static_config(d)
269 except bb.build.FuncFailed as f:
270 bb.debug(1, "Skipping recipe %s: %s" % (d.getVar('PN', True), f))
271 raise bb.parse.SkipPackage(f)
272}
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
new file mode 100644
index 0000000000..3dd7a610a9
--- /dev/null
+++ b/meta/classes/useradd.bbclass
@@ -0,0 +1,211 @@
1inherit useradd_base
2
3# base-passwd-cross provides the default passwd and group files in the
4# target sysroot, and shadow -native and -sysroot provide the utilities
5# and support files needed to add and modify user and group accounts
6DEPENDS_append = "${USERADDDEPENDS}"
7USERADDDEPENDS = " base-passwd shadow-native shadow-sysroot shadow"
8USERADDDEPENDS_class-cross = ""
9USERADDDEPENDS_class-native = ""
10USERADDDEPENDS_class-nativesdk = ""
11
12# This preinstall function can be run in four different contexts:
13#
14# a) Before do_install
15# b) At do_populate_sysroot_setscene when installing from sstate packages
16# c) As the preinst script in the target package at do_rootfs time
17# d) As the preinst script in the target package on device as a package upgrade
18#
19useradd_preinst () {
20OPT=""
21SYSROOT=""
22
23if test "x$D" != "x"; then
24 # Installing into a sysroot
25 SYSROOT="$D"
26 OPT="--root $D"
27fi
28
29# If we're not doing a special SSTATE/SYSROOT install
30# then set the values, otherwise use the environment
31if test "x$UA_SYSROOT" = "x"; then
32 # Installing onto a target
33 # Add groups and users defined only for this package
34 GROUPADD_PARAM="${GROUPADD_PARAM}"
35 USERADD_PARAM="${USERADD_PARAM}"
36 GROUPMEMS_PARAM="${GROUPMEMS_PARAM}"
37fi
38
39# Perform group additions first, since user additions may depend
40# on these groups existing
41if test "x$GROUPADD_PARAM" != "x"; then
42 echo "Running groupadd commands..."
43 # Invoke multiple instances of groupadd for parameter lists
44 # separated by ';'
45 opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1`
46 remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2-`
47 while test "x$opts" != "x"; do
48 perform_groupadd "$SYSROOT" "$OPT $opts" 10
49 if test "x$opts" = "x$remaining"; then
50 break
51 fi
52 opts=`echo "$remaining" | cut -d ';' -f 1`
53 remaining=`echo "$remaining" | cut -d ';' -f 2-`
54 done
55fi
56
57if test "x$USERADD_PARAM" != "x"; then
58 echo "Running useradd commands..."
59 # Invoke multiple instances of useradd for parameter lists
60 # separated by ';'
61 opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1`
62 remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2-`
63 while test "x$opts" != "x"; do
64 perform_useradd "$SYSROOT" "$OPT $opts" 10
65 if test "x$opts" = "x$remaining"; then
66 break
67 fi
68 opts=`echo "$remaining" | cut -d ';' -f 1`
69 remaining=`echo "$remaining" | cut -d ';' -f 2-`
70 done
71fi
72
73if test "x$GROUPMEMS_PARAM" != "x"; then
74 echo "Running groupmems commands..."
75 # Invoke multiple instances of groupmems for parameter lists
76 # separated by ';'
77 opts=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 1`
78 remaining=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 2-`
79 while test "x$opts" != "x"; do
80 perform_groupmems "$SYSROOT" "$OPT $opts" 10
81 if test "x$opts" = "x$remaining"; then
82 break
83 fi
84 opts=`echo "$remaining" | cut -d ';' -f 1`
85 remaining=`echo "$remaining" | cut -d ';' -f 2-`
86 done
87fi
88}
89
90useradd_sysroot () {
91 # Pseudo may (do_install) or may not (do_populate_sysroot_setscene) be running
92 # at this point so we're explicit about the environment so pseudo can load if
93 # not already present.
94 export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir}/pseudo"
95
96 # Explicitly set $D since it isn't set to anything
97 # before do_install
98 D=${STAGING_DIR_TARGET}
99
100 # Add groups and users defined for all recipe packages
101 GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
102 USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
103 GROUPMEMS_PARAM="${@get_all_cmd_params(d, 'groupmems')}"
104
105 # Tell the system to use the environment vars
106 UA_SYSROOT=1
107
108 useradd_preinst
109}
110
111useradd_sysroot_sstate () {
112 if [ "${BB_CURRENTTASK}" = "package_setscene" -o "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]
113 then
114 useradd_sysroot
115 fi
116}
117
118do_install[prefuncs] += "${SYSROOTFUNC}"
119SYSROOTFUNC = "useradd_sysroot"
120SYSROOTFUNC_class-cross = ""
121SYSROOTFUNC_class-native = ""
122SYSROOTFUNC_class-nativesdk = ""
123SSTATEPREINSTFUNCS += "${SYSROOTPOSTFUNC}"
124SYSROOTPOSTFUNC = "useradd_sysroot_sstate"
125SYSROOTPOSTFUNC_class-cross = ""
126SYSROOTPOSTFUNC_class-native = ""
127SYSROOTPOSTFUNC_class-nativesdk = ""
128
129USERADDSETSCENEDEPS = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
130USERADDSETSCENEDEPS_class-cross = ""
131USERADDSETSCENEDEPS_class-native = ""
132USERADDSETSCENEDEPS_class-nativesdk = ""
133do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
134do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}"
135
136# Recipe parse-time sanity checks
137def update_useradd_after_parse(d):
138 useradd_packages = d.getVar('USERADD_PACKAGES', True)
139
140 if not useradd_packages:
141 raise bb.build.FuncFailed("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE'))
142
143 for pkg in useradd_packages.split():
144 if not d.getVar('USERADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg, True):
145 bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE'), pkg))
146
147python __anonymous() {
148 if not bb.data.inherits_class('nativesdk', d) \
149 and not bb.data.inherits_class('native', d):
150 update_useradd_after_parse(d)
151}
152
153# Return a single [GROUP|USER]ADD_PARAM formatted string which includes the
154# [group|user]add parameters for all USERADD_PACKAGES in this recipe
155def get_all_cmd_params(d, cmd_type):
156 import string
157
158 param_type = cmd_type.upper() + "_PARAM_%s"
159 params = []
160
161 useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
162 for pkg in useradd_packages.split():
163 param = d.getVar(param_type % pkg, True)
164 if param:
165 params.append(param)
166
167 return "; ".join(params)
168
169# Adds the preinst script into generated packages
170fakeroot python populate_packages_prepend () {
171 def update_useradd_package(pkg):
172 bb.debug(1, 'adding user/group calls to preinst for %s' % pkg)
173
174 """
175 useradd preinst is appended here because pkg_preinst may be
176 required to execute on the target. Not doing so may cause
177 useradd preinst to be invoked twice, causing unwanted warnings.
178 """
179 preinst = d.getVar('pkg_preinst_%s' % pkg, True) or d.getVar('pkg_preinst', True)
180 if not preinst:
181 preinst = '#!/bin/sh\n'
182 preinst += 'bbnote () {\n%s}\n' % d.getVar('bbnote', True)
183 preinst += 'bbwarn () {\n%s}\n' % d.getVar('bbwarn', True)
184 preinst += 'bbfatal () {\n%s}\n' % d.getVar('bbfatal', True)
185 preinst += 'perform_groupadd () {\n%s}\n' % d.getVar('perform_groupadd', True)
186 preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd', True)
187 preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems', True)
188 preinst += d.getVar('useradd_preinst', True)
189 d.setVar('pkg_preinst_%s' % pkg, preinst)
190
191 # RDEPENDS setup
192 rdepends = d.getVar("RDEPENDS_%s" % pkg, True) or ""
193 rdepends += ' ' + d.getVar('MLPREFIX') + 'base-passwd'
194 rdepends += ' ' + d.getVar('MLPREFIX') + 'shadow'
195 # base-files is where the default /etc/skel is packaged
196 rdepends += ' ' + d.getVar('MLPREFIX') + 'base-files'
197 d.setVar("RDEPENDS_%s" % pkg, rdepends)
198
199 # Add the user/group preinstall scripts and RDEPENDS requirements
200 # to packages specified by USERADD_PACKAGES
201 if not bb.data.inherits_class('nativesdk', d) \
202 and not bb.data.inherits_class('native', d):
203 useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
204 for pkg in useradd_packages.split():
205 update_useradd_package(pkg)
206}
207
208# Use the following to extend the useradd with custom functions
209USERADDEXTENSION ?= ""
210
211inherit ${USERADDEXTENSION}
diff --git a/meta/classes/useradd_base.bbclass b/meta/classes/useradd_base.bbclass
new file mode 100644
index 0000000000..c47b1eb810
--- /dev/null
+++ b/meta/classes/useradd_base.bbclass
@@ -0,0 +1,230 @@
1# This bbclass provides basic functionality for user/group settings.
2# This bbclass is intended to be inherited by useradd.bbclass and
3# extrausers.bbclass.
4
5# The following functions basically have similar logic.
6# *) Perform necessary checks before invoking the actual command
7# *) Invoke the actual command, make retries if necessary
8# *) Error out if an error occurs.
9
10# Note that before invoking these functions, make sure the global variable
11# PSEUDO is set up correctly.
12
13perform_groupadd () {
14 local rootdir="$1"
15 local opts="$2"
16 local retries="$3"
17 bbnote "Performing groupadd with [$opts] and $retries times of retry"
18 local groupname=`echo "$opts" | awk '{ print $NF }'`
19 local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
20 if test "x$group_exists" = "x"; then
21 local count=0
22 while true; do
23 eval $PSEUDO groupadd $opts || true
24 group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
25 if test "x$group_exists" = "x"; then
26 bbwarn "groupadd command did not succeed. Retrying..."
27 else
28 break
29 fi
30 count=`expr $count + 1`
31 if test $count = $retries; then
32 bbfatal "Tried running groupadd command $retries times without scucess, giving up"
33 fi
34 sleep $count
35 done
36 else
37 bbwarn "group $groupname already exists, not re-creating it"
38 fi
39}
40
41perform_useradd () {
42 local rootdir="$1"
43 local opts="$2"
44 local retries="$3"
45 bbnote "Performing useradd with [$opts] and $retries times of retry"
46 local username=`echo "$opts" | awk '{ print $NF }'`
47 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
48 if test "x$user_exists" = "x"; then
49 local count=0
50 while true; do
51 eval $PSEUDO useradd $opts || true
52 user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
53 if test "x$user_exists" = "x"; then
54 bbwarn "useradd command did not succeed. Retrying..."
55 else
56 break
57 fi
58 count=`expr $count + 1`
59 if test $count = $retries; then
60 bbfatal "Tried running useradd command $retries times without scucess, giving up"
61 fi
62 sleep $count
63 done
64 else
65 bbwarn "user $username already exists, not re-creating it"
66 fi
67}
68
69perform_groupmems () {
70 local rootdir="$1"
71 local opts="$2"
72 local retries="$3"
73 bbnote "Performing groupmems with [$opts] and $retries times of retry"
74 local groupname=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-g" || $i == "--group") print $(i+1) }'`
75 local username=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-a" || $i == "--add") print $(i+1) }'`
76 bbnote "Running groupmems command with group $groupname and user $username"
77 # groupmems fails if /etc/gshadow does not exist
78 local gshadow=""
79 if [ -f $rootdir${sysconfdir}/gshadow ]; then
80 gshadow="yes"
81 else
82 gshadow="no"
83 touch $rootdir${sysconfdir}/gshadow
84 fi
85 local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
86 if test "x$mem_exists" = "x"; then
87 local count=0
88 while true; do
89 eval $PSEUDO groupmems $opts || true
90 mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
91 if test "x$mem_exists" = "x"; then
92 bbwarn "groupmems command did not succeed. Retrying..."
93 else
94 break
95 fi
96 count=`expr $count + 1`
97 if test $count = $retries; then
98 if test "x$gshadow" = "xno"; then
99 rm -f $rootdir${sysconfdir}/gshadow
100 rm -f $rootdir${sysconfdir}/gshadow-
101 fi
102 bbfatal "Tried running groupmems command $retries times without scucess, giving up"
103 fi
104 sleep $count
105 done
106 else
107 bbwarn "group $groupname already contains $username, not re-adding it"
108 fi
109 if test "x$gshadow" = "xno"; then
110 rm -f $rootdir${sysconfdir}/gshadow
111 rm -f $rootdir${sysconfdir}/gshadow-
112 fi
113}
114
115perform_groupdel () {
116 local rootdir="$1"
117 local opts="$2"
118 local retries="$3"
119 bbnote "Performing groupdel with [$opts] and $retries times of retry"
120 local groupname=`echo "$opts" | awk '{ print $NF }'`
121 local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
122 if test "x$group_exists" != "x"; then
123 local count=0
124 while true; do
125 eval $PSEUDO groupdel $opts || true
126 group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
127 if test "x$group_exists" != "x"; then
128 bbwarn "groupdel command did not succeed. Retrying..."
129 else
130 break
131 fi
132 count=`expr $count + 1`
133 if test $count = $retries; then
134 bbfatal "Tried running groupdel command $retries times without scucess, giving up"
135 fi
136 sleep $count
137 done
138 else
139 bbwarn "group $groupname doesn't exist, not removing it"
140 fi
141}
142
143perform_userdel () {
144 local rootdir="$1"
145 local opts="$2"
146 local retries="$3"
147 bbnote "Performing userdel with [$opts] and $retries times of retry"
148 local username=`echo "$opts" | awk '{ print $NF }'`
149 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
150 if test "x$user_exists" != "x"; then
151 local count=0
152 while true; do
153 eval $PSEUDO userdel $opts || true
154 user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
155 if test "x$user_exists" != "x"; then
156 bbwarn "userdel command did not succeed. Retrying..."
157 else
158 break
159 fi
160 count=`expr $count + 1`
161 if test $count = $retries; then
162 bbfatal "Tried running userdel command $retries times without scucess, giving up"
163 fi
164 sleep $count
165 done
166 else
167 bbwarn "user $username doesn't exist, not removing it"
168 fi
169}
170
171perform_groupmod () {
172 # Other than the return value of groupmod, there's no simple way to judge whether the command
173 # succeeds, so we disable -e option temporarily
174 set +e
175 local rootdir="$1"
176 local opts="$2"
177 local retries="$3"
178 bbnote "Performing groupmod with [$opts] and $retries times of retry"
179 local groupname=`echo "$opts" | awk '{ print $NF }'`
180 local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
181 if test "x$group_exists" != "x"; then
182 local count=0
183 while true; do
184 eval $PSEUDO groupmod $opts
185 if test $? != 0; then
186 bbwarn "groupmod command did not succeed. Retrying..."
187 else
188 break
189 fi
190 count=`expr $count + 1`
191 if test $count = $retries; then
192 bbfatal "Tried running groupmod command $retries times without scucess, giving up"
193 fi
194 sleep $count
195 done
196 else
197 bbwarn "group $groupname doesn't exist, unable to modify it"
198 fi
199 set -e
200}
201
202perform_usermod () {
203 # Same reason with groupmod, temporarily disable -e option
204 set +e
205 local rootdir="$1"
206 local opts="$2"
207 local retries="$3"
208 bbnote "Performing usermod with [$opts] and $retries times of retry"
209 local username=`echo "$opts" | awk '{ print $NF }'`
210 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
211 if test "x$user_exists" != "x"; then
212 local count=0
213 while true; do
214 eval $PSEUDO usermod $opts
215 if test $? != 0; then
216 bbwarn "usermod command did not succeed. Retrying..."
217 else
218 break
219 fi
220 count=`expr $count + 1`
221 if test $count = $retries; then
222 bbfatal "Tried running usermod command $retries times without scucess, giving up"
223 fi
224 sleep $count
225 done
226 else
227 bbwarn "user $username doesn't exist, unable to modify it"
228 fi
229 set -e
230}
diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass
new file mode 100644
index 0000000000..1792f18e8c
--- /dev/null
+++ b/meta/classes/utility-tasks.bbclass
@@ -0,0 +1,69 @@
1addtask listtasks
2do_listtasks[nostamp] = "1"
3python do_listtasks() {
4 taskdescs = {}
5 maxlen = 0
6 for e in d.keys():
7 if d.getVarFlag(e, 'task'):
8 maxlen = max(maxlen, len(e))
9 if e.endswith('_setscene'):
10 desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc') or '')
11 else:
12 desc = d.getVarFlag(e, 'doc') or ''
13 taskdescs[e] = desc
14
15 tasks = sorted(taskdescs.keys())
16 for taskname in tasks:
17 bb.plain("%s %s" % (taskname.ljust(maxlen), taskdescs[taskname]))
18}
19
20CLEANFUNCS ?= ""
21
22T_task-clean = "${LOG_DIR}/cleanlogs/${PN}"
23addtask clean
24do_clean[nostamp] = "1"
25python do_clean() {
26 """clear the build and temp directories"""
27 dir = d.expand("${WORKDIR}")
28 bb.note("Removing " + dir)
29 oe.path.remove(dir)
30
31 dir = "%s.*" % bb.data.expand(d.getVar('STAMP'), d)
32 bb.note("Removing " + dir)
33 oe.path.remove(dir)
34
35 for f in (d.getVar('CLEANFUNCS', True) or '').split():
36 bb.build.exec_func(f, d)
37}
38
39addtask checkuri
40do_checkuri[nostamp] = "1"
41python do_checkuri() {
42 src_uri = (d.getVar('SRC_URI', True) or "").split()
43 if len(src_uri) == 0:
44 return
45
46 localdata = bb.data.createCopy(d)
47 bb.data.update_data(localdata)
48
49 try:
50 fetcher = bb.fetch2.Fetch(src_uri, localdata)
51 fetcher.checkstatus()
52 except bb.fetch2.BBFetchException, e:
53 raise bb.build.FuncFailed(e)
54}
55
56addtask checkuriall after do_checkuri
57do_checkuriall[recrdeptask] = "do_checkuriall do_checkuri"
58do_checkuriall[recideptask] = "do_${BB_DEFAULT_TASK}"
59do_checkuriall[nostamp] = "1"
60do_checkuriall() {
61 :
62}
63
64addtask fetchall after do_fetch
65do_fetchall[recrdeptask] = "do_fetchall do_fetch"
66do_fetchall[recideptask] = "do_${BB_DEFAULT_TASK}"
67do_fetchall() {
68 :
69}
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
new file mode 100644
index 0000000000..0a533afb1f
--- /dev/null
+++ b/meta/classes/utils.bbclass
@@ -0,0 +1,368 @@
1# For compatibility
2def base_path_join(a, *p):
3 return oe.path.join(a, *p)
4
5def base_path_relative(src, dest):
6 return oe.path.relative(src, dest)
7
8def base_path_out(path, d):
9 return oe.path.format_display(path, d)
10
11def base_read_file(filename):
12 return oe.utils.read_file(filename)
13
14def base_ifelse(condition, iftrue = True, iffalse = False):
15 return oe.utils.ifelse(condition, iftrue, iffalse)
16
17def base_conditional(variable, checkvalue, truevalue, falsevalue, d):
18 return oe.utils.conditional(variable, checkvalue, truevalue, falsevalue, d)
19
20def base_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
21 return oe.utils.less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
22
23def base_version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
24 return oe.utils.version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
25
26def base_contains(variable, checkvalues, truevalue, falsevalue, d):
27 return oe.utils.contains(variable, checkvalues, truevalue, falsevalue, d)
28
29def base_both_contain(variable1, variable2, checkvalue, d):
30 return oe.utils.both_contain(variable1, variable2, checkvalue, d)
31
32def base_prune_suffix(var, suffixes, d):
33 return oe.utils.prune_suffix(var, suffixes, d)
34
35def oe_filter(f, str, d):
36 return oe.utils.str_filter(f, str, d)
37
38def oe_filter_out(f, str, d):
39 return oe.utils.str_filter_out(f, str, d)
40
41def machine_paths(d):
42 """List any existing machine specific filespath directories"""
43 machine = d.getVar("MACHINE", True)
44 filespathpkg = d.getVar("FILESPATHPKG", True).split(":")
45 for basepath in d.getVar("FILESPATHBASE", True).split(":"):
46 for pkgpath in filespathpkg:
47 machinepath = os.path.join(basepath, pkgpath, machine)
48 if os.path.isdir(machinepath):
49 yield machinepath
50
51def is_machine_specific(d):
52 """Determine whether the current recipe is machine specific"""
53 machinepaths = set(machine_paths(d))
54 srcuri = d.getVar("SRC_URI", True).split()
55 for url in srcuri:
56 fetcher = bb.fetch2.Fetch([srcuri], d)
57 if url.startswith("file://"):
58 if any(fetcher.localpath(url).startswith(mp + "/") for mp in machinepaths):
59 return True
60
61oe_soinstall() {
62 # Purpose: Install shared library file and
63 # create the necessary links
64 # Example:
65 #
66 # oe_
67 #
68 #bbnote installing shared library $1 to $2
69 #
70 libname=`basename $1`
71 install -m 755 $1 $2/$libname
72 sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
73 solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
74 ln -sf $libname $2/$sonamelink
75 ln -sf $libname $2/$solink
76}
77
78oe_libinstall() {
79 # Purpose: Install a library, in all its forms
80 # Example
81 #
82 # oe_libinstall libltdl ${STAGING_LIBDIR}/
83 # oe_libinstall -C src/libblah libblah ${D}/${libdir}/
84 dir=""
85 libtool=""
86 silent=""
87 require_static=""
88 require_shared=""
89 staging_install=""
90 while [ "$#" -gt 0 ]; do
91 case "$1" in
92 -C)
93 shift
94 dir="$1"
95 ;;
96 -s)
97 silent=1
98 ;;
99 -a)
100 require_static=1
101 ;;
102 -so)
103 require_shared=1
104 ;;
105 -*)
106 bbfatal "oe_libinstall: unknown option: $1"
107 ;;
108 *)
109 break;
110 ;;
111 esac
112 shift
113 done
114
115 libname="$1"
116 shift
117 destpath="$1"
118 if [ -z "$destpath" ]; then
119 bbfatal "oe_libinstall: no destination path specified"
120 fi
121 if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null
122 then
123 staging_install=1
124 fi
125
126 __runcmd () {
127 if [ -z "$silent" ]; then
128 echo >&2 "oe_libinstall: $*"
129 fi
130 $*
131 }
132
133 if [ -z "$dir" ]; then
134 dir=`pwd`
135 fi
136
137 dotlai=$libname.lai
138
139 # Sanity check that the libname.lai is unique
140 number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
141 if [ $number_of_files -gt 1 ]; then
142 bbfatal "oe_libinstall: $dotlai is not unique in $dir"
143 fi
144
145
146 dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
147 olddir=`pwd`
148 __runcmd cd $dir
149
150 lafile=$libname.la
151
152 # If such file doesn't exist, try to cut version suffix
153 if [ ! -f "$lafile" ]; then
154 libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
155 lafile1=$libname.la
156 if [ -f "$lafile1" ]; then
157 libname=$libname1
158 lafile=$lafile1
159 fi
160 fi
161
162 if [ -f "$lafile" ]; then
163 # libtool archive
164 eval `cat $lafile|grep "^library_names="`
165 libtool=1
166 else
167 library_names="$libname.so* $libname.dll.a $libname.*.dylib"
168 fi
169
170 __runcmd install -d $destpath/
171 dota=$libname.a
172 if [ -f "$dota" -o -n "$require_static" ]; then
173 rm -f $destpath/$dota
174 __runcmd install -m 0644 $dota $destpath/
175 fi
176 if [ -f "$dotlai" -a -n "$libtool" ]; then
177 rm -f $destpath/$libname.la
178 __runcmd install -m 0644 $dotlai $destpath/$libname.la
179 fi
180
181 for name in $library_names; do
182 files=`eval echo $name`
183 for f in $files; do
184 if [ ! -e "$f" ]; then
185 if [ -n "$libtool" ]; then
186 bbfatal "oe_libinstall: $dir/$f not found."
187 fi
188 elif [ -L "$f" ]; then
189 __runcmd cp -P "$f" $destpath/
190 elif [ ! -L "$f" ]; then
191 libfile="$f"
192 rm -f $destpath/$libfile
193 __runcmd install -m 0755 $libfile $destpath/
194 fi
195 done
196 done
197
198 if [ -z "$libfile" ]; then
199 if [ -n "$require_shared" ]; then
200 bbfatal "oe_libinstall: unable to locate shared library"
201 fi
202 elif [ -z "$libtool" ]; then
203 # special case hack for non-libtool .so.#.#.# links
204 baselibfile=`basename "$libfile"`
205 if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
206 sonamelink=`${HOST_PREFIX}readelf -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
207 solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
208 if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
209 __runcmd ln -sf $baselibfile $destpath/$sonamelink
210 fi
211 __runcmd ln -sf $baselibfile $destpath/$solink
212 fi
213 fi
214
215 __runcmd cd "$olddir"
216}
217
218oe_machinstall() {
219 # Purpose: Install machine dependent files, if available
220 # If not available, check if there is a default
221 # If no default, just touch the destination
222 # Example:
223 # $1 $2 $3 $4
224 # oe_machinstall -m 0644 fstab ${D}/etc/fstab
225 #
226 # TODO: Check argument number?
227 #
228 filename=`basename $3`
229 dirname=`dirname $3`
230
231 for o in `echo ${OVERRIDES} | tr ':' ' '`; do
232 if [ -e $dirname/$o/$filename ]; then
233 bbnote $dirname/$o/$filename present, installing to $4
234 install $1 $2 $dirname/$o/$filename $4
235 return
236 fi
237 done
238# bbnote overrides specific file NOT present, trying default=$3...
239 if [ -e $3 ]; then
240 bbnote $3 present, installing to $4
241 install $1 $2 $3 $4
242 else
243 bbnote $3 NOT present, touching empty $4
244 touch $4
245 fi
246}
247
248create_cmdline_wrapper () {
249 # Create a wrapper script where commandline options are needed
250 #
251 # These are useful to work around relocation issues, by passing extra options
252 # to a program
253 #
254 # Usage: create_cmdline_wrapper FILENAME <extra-options>
255
256 cmd=$1
257 shift
258
259 echo "Generating wrapper script for $cmd"
260
261 mv $cmd $cmd.real
262 cmdname=`basename $cmd`.real
263 cat <<END >$cmd
264#!/bin/bash
265realpath=\`readlink -fn \$0\`
266exec -a $cmd \`dirname \$realpath\`/$cmdname $@ "\$@"
267END
268 chmod +x $cmd
269}
270
271create_wrapper () {
272 # Create a wrapper script where extra environment variables are needed
273 #
274 # These are useful to work around relocation issues, by setting environment
275 # variables which point to paths in the filesystem.
276 #
277 # Usage: create_wrapper FILENAME [[VAR=VALUE]..]
278
279 cmd=$1
280 shift
281
282 echo "Generating wrapper script for $cmd"
283
284 mv $cmd $cmd.real
285 cmdname=`basename $cmd`
286 cat <<END >$cmd
287#!/bin/bash
288realpath=\`readlink -fn \$0\`
289export $@
290exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real "\$@"
291END
292 chmod +x $cmd
293}
294
295def check_app_exists(app, d):
296 app = d.expand(app)
297 path = d.getVar('PATH', d, True)
298 return bool(bb.utils.which(path, app))
299
300def explode_deps(s):
301 return bb.utils.explode_deps(s)
302
303def base_set_filespath(path, d):
304 filespath = []
305 extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
306 # Don't prepend empty strings to the path list
307 if extrapaths != "":
308 path = extrapaths.split(":") + path
309 # The ":" ensures we have an 'empty' override
310 overrides = (":" + (d.getVar("FILESOVERRIDES", True) or "")).split(":")
311 overrides.reverse()
312 for o in overrides:
313 for p in path:
314 if p != "":
315 filespath.append(os.path.join(p, o))
316 return ":".join(filespath)
317
318def extend_variants(d, var, extend, delim=':'):
319 """Return a string of all bb class extend variants for the given extend"""
320 variants = []
321 whole = d.getVar(var, True) or ""
322 for ext in whole.split():
323 eext = ext.split(delim)
324 if len(eext) > 1 and eext[0] == extend:
325 variants.append(eext[1])
326 return " ".join(variants)
327
328def multilib_pkg_extend(d, pkg):
329 variants = (d.getVar("MULTILIB_VARIANTS", True) or "").split()
330 if not variants:
331 return pkg
332 pkgs = pkg
333 for v in variants:
334 pkgs = pkgs + " " + v + "-" + pkg
335 return pkgs
336
337def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
338 """Return a string of all ${var} in all multilib tune configuration"""
339 values = []
340 value = d.getVar(var, True) or ""
341 if value != "":
342 if need_split:
343 for item in value.split(delim):
344 values.append(item)
345 else:
346 values.append(value)
347 variants = d.getVar("MULTILIB_VARIANTS", True) or ""
348 for item in variants.split():
349 localdata = bb.data.createCopy(d)
350 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
351 localdata.setVar("OVERRIDES", overrides)
352 bb.data.update_data(localdata)
353 value = localdata.getVar(var, True) or ""
354 if value != "":
355 if need_split:
356 for item in value.split(delim):
357 values.append(item)
358 else:
359 values.append(value)
360 if unique:
361 #we do this to keep order as much as possible
362 ret = []
363 for value in values:
364 if not value in ret:
365 ret.append(value)
366 else:
367 ret = values
368 return " ".join(ret)
diff --git a/meta/classes/vala.bbclass b/meta/classes/vala.bbclass
new file mode 100644
index 0000000000..0b7803b251
--- /dev/null
+++ b/meta/classes/vala.bbclass
@@ -0,0 +1,21 @@
1# Vala has problems with multiple concurrent invocations
2PARALLEL_MAKE = ""
3
4# Everyone needs vala-native and targets need vala, too,
5# because that is where target builds look for .vapi files.
6#
7VALADEPENDS = ""
8VALADEPENDS_class-target = "vala"
9DEPENDS_append = " vala-native ${VALADEPENDS}"
10
11# Our patched version of Vala looks in STAGING_DATADIR for .vapi files
12export STAGING_DATADIR
13# Upstream Vala >= 0.11 looks in XDG_DATA_DIRS for .vapi files
14export XDG_DATA_DIRS = "${STAGING_DATADIR}"
15
16# Package additional files
17FILES_${PN}-dev += "\
18 ${datadir}/vala/vapi/*.vapi \
19 ${datadir}/vala/vapi/*.deps \
20 ${datadir}/gir-1.0 \
21"
diff --git a/meta/classes/waf.bbclass b/meta/classes/waf.bbclass
new file mode 100644
index 0000000000..3a221e7082
--- /dev/null
+++ b/meta/classes/waf.bbclass
@@ -0,0 +1,13 @@
1waf_do_configure() {
2 ${S}/waf configure --prefix=${prefix} ${EXTRA_OECONF}
3}
4
5waf_do_compile() {
6 ${S}/waf build ${PARALLEL_MAKE}
7}
8
9waf_do_install() {
10 ${S}/waf install --destdir=${D}
11}
12
13EXPORT_FUNCTIONS do_configure do_compile do_install