summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
authorTudor Florea <tudor.florea@enea.com>2015-10-09 22:59:03 +0200
committerTudor Florea <tudor.florea@enea.com>2015-10-09 22:59:03 +0200
commit972dcfcdbfe75dcfeb777150c136576cf1a71e99 (patch)
tree97a61cd7e293d7ae9d56ef7ed0f81253365bb026 /meta/classes
downloadpoky-972dcfcdbfe75dcfeb777150c136576cf1a71e99.tar.gz
initial commit for Enea Linux 5.0 arm
Signed-off-by: Tudor Florea <tudor.florea@enea.com>
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass43
-rw-r--r--meta/classes/archiver.bbclass380
-rw-r--r--meta/classes/autotools-brokensep.bbclass5
-rw-r--r--meta/classes/autotools.bbclass302
-rw-r--r--meta/classes/autotools_stage.bbclass2
-rw-r--r--meta/classes/base.bbclass566
-rw-r--r--meta/classes/bin_package.bbclass36
-rw-r--r--meta/classes/binconfig-disabled.bbclass15
-rw-r--r--meta/classes/binconfig.bbclass63
-rw-r--r--meta/classes/blacklist.bbclass45
-rw-r--r--meta/classes/boot-directdisk.bbclass191
-rw-r--r--meta/classes/bootimg.bbclass267
-rw-r--r--meta/classes/bugzilla.bbclass187
-rw-r--r--meta/classes/buildhistory.bbclass696
-rw-r--r--meta/classes/buildstats-summary.bbclass39
-rw-r--r--meta/classes/buildstats.bbclass289
-rw-r--r--meta/classes/ccache.bbclass8
-rw-r--r--meta/classes/chrpath.bbclass115
-rw-r--r--meta/classes/clutter.bbclass22
-rw-r--r--meta/classes/cmake.bbclass121
-rw-r--r--meta/classes/cml1.bbclass74
-rw-r--r--meta/classes/compress_doc.bbclass256
-rw-r--r--meta/classes/copyleft_compliance.bbclass64
-rw-r--r--meta/classes/copyleft_filter.bbclass62
-rw-r--r--meta/classes/core-image.bbclass80
-rw-r--r--meta/classes/cpan-base.bbclass55
-rw-r--r--meta/classes/cpan.bbclass55
-rw-r--r--meta/classes/cpan_build.bbclass53
-rw-r--r--meta/classes/cross-canadian.bbclass142
-rw-r--r--meta/classes/cross.bbclass75
-rw-r--r--meta/classes/crosssdk.bbclass36
-rw-r--r--meta/classes/debian.bbclass141
-rw-r--r--meta/classes/deploy.bbclass10
-rw-r--r--meta/classes/devshell.bbclass154
-rw-r--r--meta/classes/distro_features_check.bbclass28
-rw-r--r--meta/classes/distrodata.bbclass902
-rw-r--r--meta/classes/distutils-base.bbclass4
-rw-r--r--meta/classes/distutils-common-base.bbclass24
-rw-r--r--meta/classes/distutils-native-base.bbclass3
-rw-r--r--meta/classes/distutils-tools.bbclass77
-rw-r--r--meta/classes/distutils.bbclass80
-rw-r--r--meta/classes/distutils3-base.bbclass8
-rw-r--r--meta/classes/distutils3-native-base.bbclass4
-rw-r--r--meta/classes/distutils3.bbclass96
-rw-r--r--meta/classes/externalsrc.bbclass53
-rw-r--r--meta/classes/extrausers.bbclass65
-rw-r--r--meta/classes/fontcache.bbclass45
-rw-r--r--meta/classes/gconf.bbclass70
-rw-r--r--meta/classes/gettext.bbclass19
-rw-r--r--meta/classes/gnome.bbclass5
-rw-r--r--meta/classes/gnomebase.bbclass30
-rw-r--r--meta/classes/grub-efi.bbclass141
-rw-r--r--meta/classes/gsettings.bbclass37
-rw-r--r--meta/classes/gtk-doc.bbclass25
-rw-r--r--meta/classes/gtk-icon-cache.bbclass62
-rw-r--r--meta/classes/gtk-immodules-cache.bbclass83
-rw-r--r--meta/classes/gummiboot.bbclass114
-rw-r--r--meta/classes/gzipnative.bbclass5
-rw-r--r--meta/classes/icecc.bbclass332
-rw-r--r--meta/classes/image-live.bbclass18
-rw-r--r--meta/classes/image-mklibs.bbclass71
-rw-r--r--meta/classes/image-prelink.bbclass33
-rw-r--r--meta/classes/image-swab.bbclass94
-rw-r--r--meta/classes/image-vmdk.bbclass35
-rw-r--r--meta/classes/image.bbclass448
-rw-r--r--meta/classes/image_types.bbclass163
-rw-r--r--meta/classes/image_types_uboot.bbclass23
-rw-r--r--meta/classes/insane.bbclass1153
-rw-r--r--meta/classes/insserv.bbclass5
-rw-r--r--meta/classes/kernel-arch.bbclass60
-rw-r--r--meta/classes/kernel-grub.bbclass91
-rw-r--r--meta/classes/kernel-module-split.bbclass200
-rw-r--r--meta/classes/kernel-yocto.bbclass361
-rw-r--r--meta/classes/kernel.bbclass505
-rw-r--r--meta/classes/lib_package.bbclass7
-rw-r--r--meta/classes/libc-common.bbclass43
-rw-r--r--meta/classes/libc-package.bbclass390
-rw-r--r--meta/classes/license.bbclass397
-rw-r--r--meta/classes/linux-kernel-base.bbclass32
-rw-r--r--meta/classes/logging.bbclass72
-rw-r--r--meta/classes/meta.bbclass4
-rw-r--r--meta/classes/metadata_scm.bbclass82
-rw-r--r--meta/classes/migrate_localcount.bbclass46
-rw-r--r--meta/classes/mime.bbclass56
-rw-r--r--meta/classes/mirrors.bbclass82
-rw-r--r--meta/classes/module-base.bbclass18
-rw-r--r--meta/classes/module.bbclass32
-rw-r--r--meta/classes/multilib.bbclass145
-rw-r--r--meta/classes/multilib_global.bbclass158
-rw-r--r--meta/classes/multilib_header.bbclass54
-rw-r--r--meta/classes/native.bbclass175
-rw-r--r--meta/classes/nativesdk.bbclass95
-rw-r--r--meta/classes/oelint.bbclass85
-rw-r--r--meta/classes/own-mirrors.bbclass13
-rw-r--r--meta/classes/package.bbclass2060
-rw-r--r--meta/classes/package_deb.bbclass330
-rw-r--r--meta/classes/package_ipk.bbclass286
-rw-r--r--meta/classes/package_rpm.bbclass754
-rw-r--r--meta/classes/package_tar.bbclass69
-rw-r--r--meta/classes/packagedata.bbclass26
-rw-r--r--meta/classes/packagegroup.bbclass52
-rw-r--r--meta/classes/packageinfo.bbclass22
-rw-r--r--meta/classes/patch.bbclass188
-rw-r--r--meta/classes/perlnative.bbclass3
-rw-r--r--meta/classes/pixbufcache.bbclass72
-rw-r--r--meta/classes/pkgconfig.bbclass2
-rw-r--r--meta/classes/populate_sdk.bbclass7
-rw-r--r--meta/classes/populate_sdk_base.bbclass164
-rw-r--r--meta/classes/prexport.bbclass58
-rw-r--r--meta/classes/primport.bbclass21
-rw-r--r--meta/classes/prserv.bbclass2
-rw-r--r--meta/classes/ptest-gnome.bbclass8
-rw-r--r--meta/classes/ptest.bbclass62
-rw-r--r--meta/classes/python-dir.bbclass5
-rw-r--r--meta/classes/python3native.bbclass7
-rw-r--r--meta/classes/pythonnative.bbclass6
-rw-r--r--meta/classes/qemu.bbclass48
-rw-r--r--meta/classes/qmake2.bbclass27
-rw-r--r--meta/classes/qmake_base.bbclass119
-rw-r--r--meta/classes/qt4e.bbclass24
-rw-r--r--meta/classes/qt4x11.bbclass14
-rw-r--r--meta/classes/recipe_sanity.bbclass167
-rw-r--r--meta/classes/relocatable.bbclass7
-rw-r--r--meta/classes/report-error.bbclass70
-rw-r--r--meta/classes/rm_work.bbclass120
-rw-r--r--meta/classes/rootfs_deb.bbclass39
-rw-r--r--meta/classes/rootfs_ipk.bbclass39
-rw-r--r--meta/classes/rootfs_rpm.bbclass47
-rw-r--r--meta/classes/sanity.bbclass887
-rw-r--r--meta/classes/scons.bbclass15
-rw-r--r--meta/classes/sdl.bbclass6
-rw-r--r--meta/classes/setuptools.bbclass8
-rw-r--r--meta/classes/setuptools3.bbclass8
-rw-r--r--meta/classes/sip.bbclass61
-rw-r--r--meta/classes/siteconfig.bbclass33
-rw-r--r--meta/classes/siteinfo.bbclass164
-rw-r--r--meta/classes/spdx.bbclass325
-rw-r--r--meta/classes/sstate.bbclass837
-rw-r--r--meta/classes/staging.bbclass122
-rw-r--r--meta/classes/syslinux.bbclass187
-rw-r--r--meta/classes/systemd.bbclass197
-rw-r--r--meta/classes/terminal.bbclass94
-rw-r--r--meta/classes/testimage-auto.bbclass23
-rw-r--r--meta/classes/testimage.bbclass323
-rw-r--r--meta/classes/texinfo.bbclass15
-rw-r--r--meta/classes/tinderclient.bbclass368
-rw-r--r--meta/classes/toaster.bbclass343
-rw-r--r--meta/classes/toolchain-scripts.bbclass138
-rw-r--r--meta/classes/typecheck.bbclass12
-rw-r--r--meta/classes/uboot-config.bbclass61
-rw-r--r--meta/classes/uninative.bbclass44
-rw-r--r--meta/classes/update-alternatives.bbclass267
-rw-r--r--meta/classes/update-rc.d.bbclass135
-rw-r--r--meta/classes/useradd-staticids.bbclass276
-rw-r--r--meta/classes/useradd.bbclass213
-rw-r--r--meta/classes/useradd_base.bbclass230
-rw-r--r--meta/classes/utility-tasks.bbclass69
-rw-r--r--meta/classes/utils.bbclass379
-rw-r--r--meta/classes/vala.bbclass21
-rw-r--r--meta/classes/waf.bbclass13
160 files changed, 23006 insertions, 0 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
new file mode 100644
index 0000000000..4bc99272c4
--- /dev/null
+++ b/meta/classes/allarch.bbclass
@@ -0,0 +1,43 @@
1#
2# This class is used for architecture independent recipes/data files (usally scripts)
3#
4
5# Expand STAGING_DIR_HOST since for cross-canadian/native/nativesdk, this will
6# point elsewhere after these changes.
7STAGING_DIR_HOST := "${STAGING_DIR_HOST}"
8
9PACKAGE_ARCH = "all"
10
11python () {
12 # Allow this class to be included but overridden - only set
13 # the values if we're still "all" package arch.
14 if d.getVar("PACKAGE_ARCH") == "all":
15 # No need for virtual/libc or a cross compiler
16 d.setVar("INHIBIT_DEFAULT_DEPS","1")
17
18 # Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
19 # naming anyway
20 d.setVar("TARGET_ARCH", "allarch")
21 d.setVar("TARGET_OS", "linux")
22 d.setVar("TARGET_CC_ARCH", "none")
23 d.setVar("TARGET_LD_ARCH", "none")
24 d.setVar("TARGET_AS_ARCH", "none")
25 d.setVar("TARGET_FPU", "")
26 d.setVar("TARGET_PREFIX", "")
27 d.setVar("PACKAGE_EXTRA_ARCHS", "")
28 d.setVar("SDK_ARCH", "none")
29 d.setVar("SDK_CC_ARCH", "none")
30
31 # Avoid this being unnecessarily different due to nuances of
32 # the target machine that aren't important for "all" arch
33 # packages.
34 d.setVar("LDFLAGS", "")
35
36 # No need to do shared library processing or debug symbol handling
37 d.setVar("EXCLUDE_FROM_SHLIBS", "1")
38 d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
39 d.setVar("INHIBIT_PACKAGE_STRIP", "1")
40 elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
41 bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE", True))
42}
43
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
new file mode 100644
index 0000000000..939624ae1d
--- /dev/null
+++ b/meta/classes/archiver.bbclass
@@ -0,0 +1,380 @@
1# ex:ts=4:sw=4:sts=4:et
2# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
3#
4# This bbclass is used for creating archive for:
5# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
6# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
7# 3) configured source: ARCHIVER_MODE[src] = "configured"
8# 4) The patches between do_unpack and do_patch:
9# ARCHIVER_MODE[diff] = "1"
10# And you can set the one that you'd like to exclude from the diff:
11# ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
12# 5) The environment data, similar to 'bitbake -e recipe':
13# ARCHIVER_MODE[dumpdata] = "1"
14# 6) The recipe (.bb and .inc): ARCHIVER_MODE[recipe] = "1"
15# 7) Whether output the .src.rpm package:
16# ARCHIVER_MODE[srpm] = "1"
17# 8) Filter the license, the recipe whose license in
18# COPYLEFT_LICENSE_INCLUDE will be included, and in
19# COPYLEFT_LICENSE_EXCLUDE will be excluded.
20# COPYLEFT_LICENSE_INCLUDE = 'GPL* LGPL*'
21# COPYLEFT_LICENSE_EXCLUDE = 'CLOSED Proprietary'
22# 9) The recipe type that will be archived:
23# COPYLEFT_RECIPE_TYPES = 'target'
24#
25
26# Don't filter the license by default
27COPYLEFT_LICENSE_INCLUDE ?= ''
28COPYLEFT_LICENSE_EXCLUDE ?= ''
29# Create archive for all the recipe types
30COPYLEFT_RECIPE_TYPES ?= 'target native nativesdk cross crosssdk cross-canadian'
31inherit copyleft_filter
32
33ARCHIVER_MODE[srpm] ?= "0"
34ARCHIVER_MODE[src] ?= "patched"
35ARCHIVER_MODE[diff] ?= "0"
36ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
37ARCHIVER_MODE[dumpdata] ?= "0"
38ARCHIVER_MODE[recipe] ?= "0"
39
40DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
41ARCHIVER_TOPDIR ?= "${WORKDIR}/deploy-sources"
42ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/"
43ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
44
45do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
46do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
47do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
48
49# This is a convenience for the shell script to use it
50
51
52python () {
53 pn = d.getVar('PN', True)
54
55 if d.getVar('COPYLEFT_LICENSE_INCLUDE', True) or \
56 d.getVar('COPYLEFT_LICENSE_EXCLUDE', True):
57 included, reason = copyleft_should_include(d)
58 if not included:
59 bb.debug(1, 'archiver: %s is excluded: %s' % (pn, reason))
60 return
61 else:
62 bb.debug(1, 'archiver: %s is included: %s' % (pn, reason))
63
64 ar_src = d.getVarFlag('ARCHIVER_MODE', 'src', True)
65 ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata', True)
66 ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe', True)
67
68 if ar_src == "original":
69 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_original' % pn)
70 elif ar_src == "patched":
71 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn)
72 elif ar_src == "configured":
73 # We can't use "addtask do_ar_configured after do_configure" since it
74 # will cause the deptask of do_populate_sysroot to run not matter what
75 # archives we need, so we add the depends here.
76 d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn)
77 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_configured' % pn)
78 elif ar_src:
79 bb.fatal("Invalid ARCHIVER_MODE[src]: %s" % ar_src)
80
81 if ar_dumpdata == "1":
82 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_dumpdata' % pn)
83
84 if ar_recipe == "1":
85 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_recipe' % pn)
86
87 # Output the srpm package
88 ar_srpm = d.getVarFlag('ARCHIVER_MODE', 'srpm', True)
89 if ar_srpm == "1":
90 if d.getVar('PACKAGES', True) != '' and d.getVar('IMAGE_PKGTYPE', True) == 'rpm':
91 d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_package_write_rpm' % pn)
92 if ar_dumpdata == "1":
93 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_dumpdata' % pn)
94 if ar_recipe == "1":
95 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_recipe' % pn)
96 if ar_src == "original":
97 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_original' % pn)
98 elif ar_src == "patched":
99 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_patched' % pn)
100 elif ar_src == "configured":
101 d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
102
103 # The gcc staff uses shared source
104 flag = d.getVarFlag("do_unpack", "stamp-base", True)
105 if flag:
106 if ar_src in [ 'original', 'patched' ]:
107 ar_outdir = os.path.join(d.getVar('ARCHIVER_TOPDIR', True), 'work-shared')
108 d.setVar('ARCHIVER_OUTDIR', ar_outdir)
109 d.setVarFlag('do_ar_original', 'stamp-base', flag)
110 d.setVarFlag('do_ar_patched', 'stamp-base', flag)
111 d.setVarFlag('do_unpack_and_patch', 'stamp-base', flag)
112 d.setVarFlag('do_ar_original', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR')
113 d.setVarFlag('do_unpack_and_patch', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR')
114 d.setVarFlag('do_ar_patched', 'vardepsexclude', 'PN PF ARCHIVER_OUTDIR WORKDIR')
115 d.setVarFlag('create_diff_gz', 'vardepsexclude', 'PF')
116 d.setVarFlag('create_tarball', 'vardepsexclude', 'PF')
117
118 flag_clean = d.getVarFlag('do_unpack', 'stamp-base-clean', True)
119 if flag_clean:
120 d.setVarFlag('do_ar_original', 'stamp-base-clean', flag_clean)
121 d.setVarFlag('do_ar_patched', 'stamp-base-clean', flag_clean)
122 d.setVarFlag('do_unpack_and_patch', 'stamp-base-clean', flag_clean)
123}
124
125# Take all the sources for a recipe and puts them in WORKDIR/archiver-work/.
126# Files in SRC_URI are copied directly, anything that's a directory
127# (e.g. git repositories) is "unpacked" and then put into a tarball.
128python do_ar_original() {
129
130 import shutil, tarfile, tempfile
131
132 if d.getVarFlag('ARCHIVER_MODE', 'src', True) != "original":
133 return
134
135 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
136 bb.note('Archiving the original source...')
137 fetch = bb.fetch2.Fetch([], d)
138 for url in fetch.urls:
139 local = fetch.localpath(url).rstrip("/");
140 if os.path.isfile(local):
141 shutil.copy(local, ar_outdir)
142 elif os.path.isdir(local):
143 basename = os.path.basename(local)
144
145 tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR', True))
146 fetch.unpack(tmpdir, (url,))
147
148 os.chdir(tmpdir)
149 # We split on '+' to chuck any annoying AUTOINC+ in the revision.
150 try:
151 src_rev = bb.fetch2.get_srcrev(d).split('+')[-1][:10]
152 except:
153 src_rev = 'NOREV'
154 tarname = os.path.join(ar_outdir, basename + '.' + src_rev + '.tar.gz')
155 tar = tarfile.open(tarname, 'w:gz')
156 tar.add('.')
157 tar.close()
158
159 # Emit patch series files for 'original'
160 bb.note('Writing patch series files...')
161 for patch in src_patches(d):
162 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
163 patchdir = parm.get('patchdir')
164 if patchdir:
165 series = os.path.join(ar_outdir, 'series.subdir.%s' % patchdir.replace('/', '_'))
166 else:
167 series = os.path.join(ar_outdir, 'series')
168
169 with open(series, 'a') as s:
170 s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
171}
172
173python do_ar_patched() {
174
175 if d.getVarFlag('ARCHIVER_MODE', 'src', True) != 'patched':
176 return
177
178 # Get the ARCHIVER_OUTDIR before we reset the WORKDIR
179 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
180 bb.note('Archiving the patched source...')
181 d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
182 # The gcc staff uses shared source
183 flag = d.getVarFlag('do_unpack', 'stamp-base', True)
184 if flag:
185 create_tarball(d, d.getVar('S', True), 'patched', ar_outdir, 'gcc')
186 else:
187 create_tarball(d, d.getVar('S', True), 'patched', ar_outdir)
188}
189
190python do_ar_configured() {
191 import shutil
192
193 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
194 if d.getVarFlag('ARCHIVER_MODE', 'src', True) == 'configured':
195 bb.note('Archiving the configured source...')
196 # The libtool-native's do_configure will remove the
197 # ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
198 # do_configure, we archive the already configured ${S} to
199 # instead of.
200 if d.getVar('PN', True) != 'libtool-native':
201 # Change the WORKDIR to make do_configure run in another dir.
202 d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
203 if bb.data.inherits_class('kernel-yocto', d):
204 bb.build.exec_func('do_kernel_configme', d)
205 if bb.data.inherits_class('cmake', d):
206 bb.build.exec_func('do_generate_toolchain_file', d)
207 prefuncs = d.getVarFlag('do_configure', 'prefuncs', True)
208 for func in (prefuncs or '').split():
209 if func != "sysroot_cleansstate":
210 bb.build.exec_func(func, d)
211 bb.build.exec_func('do_configure', d)
212 postfuncs = d.getVarFlag('do_configure', 'postfuncs', True)
213 for func in (postfuncs or '').split():
214 if func != "do_qa_configure":
215 bb.build.exec_func(func, d)
216 srcdir = d.getVar('S', True)
217 builddir = d.getVar('B', True)
218 if srcdir != builddir:
219 if os.path.exists(builddir):
220 oe.path.copytree(builddir, os.path.join(srcdir, \
221 'build.%s.ar_configured' % d.getVar('PF', True)))
222 create_tarball(d, srcdir, 'configured', ar_outdir)
223}
224
225def create_tarball(d, srcdir, suffix, ar_outdir, pf=None):
226 """
227 create the tarball from srcdir
228 """
229 import tarfile
230
231 bb.utils.mkdirhier(ar_outdir)
232 if pf:
233 tarname = os.path.join(ar_outdir, '%s-%s.tar.gz' % (pf, suffix))
234 else:
235 tarname = os.path.join(ar_outdir, '%s-%s.tar.gz' % \
236 (d.getVar('PF', True), suffix))
237
238 srcdir = srcdir.rstrip('/')
239 dirname = os.path.dirname(srcdir)
240 basename = os.path.basename(srcdir)
241 os.chdir(dirname)
242 bb.note('Creating %s' % tarname)
243 tar = tarfile.open(tarname, 'w:gz')
244 tar.add(basename)
245 tar.close()
246
247# creating .diff.gz between source.orig and source
248def create_diff_gz(d, src_orig, src, ar_outdir):
249
250 import subprocess
251
252 if not os.path.isdir(src) or not os.path.isdir(src_orig):
253 return
254
255 # The diff --exclude can't exclude the file with path, so we copy
256 # the patched source, and remove the files that we'd like to
257 # exclude.
258 src_patched = src + '.patched'
259 oe.path.copyhardlinktree(src, src_patched)
260 for i in d.getVarFlag('ARCHIVER_MODE', 'diff-exclude', True).split():
261 bb.utils.remove(os.path.join(src_orig, i), recurse=True)
262 bb.utils.remove(os.path.join(src_patched, i), recurse=True)
263
264 dirname = os.path.dirname(src)
265 basename = os.path.basename(src)
266 os.chdir(dirname)
267 out_file = os.path.join(ar_outdir, '%s-diff.gz' % d.getVar('PF', True))
268 diff_cmd = 'diff -Naur %s.orig %s.patched | gzip -c > %s' % (basename, basename, out_file)
269 subprocess.call(diff_cmd, shell=True)
270 bb.utils.remove(src_patched, recurse=True)
271
272# Run do_unpack and do_patch
273python do_unpack_and_patch() {
274 if d.getVarFlag('ARCHIVER_MODE', 'src', True) not in \
275 [ 'patched', 'configured'] and \
276 d.getVarFlag('ARCHIVER_MODE', 'diff', True) != '1':
277 return
278
279 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
280
281 # Change the WORKDIR to make do_unpack do_patch run in another dir.
282 d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
283
284 # The changed 'WORKDIR' also casued 'B' changed, create dir 'B' for the
285 # possibly requiring of the following tasks (such as some recipes's
286 # do_patch required 'B' existed).
287 bb.utils.mkdirhier(d.getVar('B', True))
288
289 # The kernel source is ready after do_validate_branches
290 if bb.data.inherits_class('kernel-yocto', d):
291 bb.build.exec_func('do_unpack', d)
292 bb.build.exec_func('do_kernel_checkout', d)
293 bb.build.exec_func('do_validate_branches', d)
294 else:
295 bb.build.exec_func('do_unpack', d)
296
297 # Save the original source for creating the patches
298 if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
299 src = d.getVar('S', True).rstrip('/')
300 src_orig = '%s.orig' % src
301 oe.path.copytree(src, src_orig)
302 bb.build.exec_func('do_patch', d)
303 # Create the patches
304 if d.getVarFlag('ARCHIVER_MODE', 'diff', True) == '1':
305 bb.note('Creating diff gz...')
306 create_diff_gz(d, src_orig, src, ar_outdir)
307 bb.utils.remove(src_orig, recurse=True)
308}
309
310python do_ar_recipe () {
311 """
312 archive the recipe, including .bb and .inc.
313 """
314 import re
315 import shutil
316
317 require_re = re.compile( r"require\s+(.+)" )
318 include_re = re.compile( r"include\s+(.+)" )
319 bbfile = d.getVar('FILE', True)
320 outdir = os.path.join(d.getVar('WORKDIR', True), \
321 '%s-recipe' % d.getVar('PF', True))
322 bb.utils.mkdirhier(outdir)
323 shutil.copy(bbfile, outdir)
324
325 dirname = os.path.dirname(bbfile)
326 bbpath = '%s:%s' % (dirname, d.getVar('BBPATH', True))
327 f = open(bbfile, 'r')
328 for line in f.readlines():
329 incfile = None
330 if require_re.match(line):
331 incfile = require_re.match(line).group(1)
332 elif include_re.match(line):
333 incfile = include_re.match(line).group(1)
334 if incfile:
335 incfile = bb.data.expand(incfile, d)
336 incfile = bb.utils.which(bbpath, incfile)
337 if incfile:
338 shutil.copy(incfile, outdir)
339
340 create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR', True))
341 bb.utils.remove(outdir, recurse=True)
342}
343
344python do_dumpdata () {
345 """
346 dump environment data to ${PF}-showdata.dump
347 """
348
349 dumpfile = os.path.join(d.getVar('ARCHIVER_OUTDIR', True), \
350 '%s-showdata.dump' % d.getVar('PF', True))
351 bb.note('Dumping metadata into %s' % dumpfile)
352 f = open(dumpfile, 'w')
353 # emit variables and shell functions
354 bb.data.emit_env(f, d, True)
355 # emit the metadata which isn't valid shell
356 for e in d.keys():
357 if bb.data.getVarFlag(e, 'python', d):
358 f.write("\npython %s () {\n%s}\n" % (e, bb.data.getVar(e, d, True)))
359 f.close()
360}
361
362SSTATETASKS += "do_deploy_archives"
363do_deploy_archives () {
364 echo "Deploying source archive files ..."
365}
366python do_deploy_archives_setscene () {
367 sstate_setscene(d)
368}
369do_deploy_archives[sstate-inputdirs] = "${ARCHIVER_TOPDIR}"
370do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}"
371
372addtask do_ar_original after do_unpack
373addtask do_unpack_and_patch after do_patch
374addtask do_ar_patched after do_unpack_and_patch
375addtask do_ar_configured after do_unpack_and_patch
376addtask do_dumpdata
377addtask do_ar_recipe
378addtask do_deploy_archives before do_build
379
380do_unpack_and_patch[depends] += "gcc-source:do_patch"
diff --git a/meta/classes/autotools-brokensep.bbclass b/meta/classes/autotools-brokensep.bbclass
new file mode 100644
index 0000000000..71cf97a391
--- /dev/null
+++ b/meta/classes/autotools-brokensep.bbclass
@@ -0,0 +1,5 @@
1# Autotools class for recipes where separate build dir doesn't work
2# Ideally we should fix software so it does work. Standard autotools supports
3# this.
4inherit autotools
5B = "${S}"
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
new file mode 100644
index 0000000000..b5f45160ed
--- /dev/null
+++ b/meta/classes/autotools.bbclass
@@ -0,0 +1,302 @@
1def autotools_dep_prepend(d):
2 if d.getVar('INHIBIT_AUTOTOOLS_DEPS', True):
3 return ''
4
5 pn = d.getVar('PN', True)
6 deps = ''
7
8 if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
9 return deps
10 deps += 'autoconf-native automake-native '
11
12 if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
13 deps += 'libtool-native '
14 if not bb.data.inherits_class('native', d) \
15 and not bb.data.inherits_class('nativesdk', d) \
16 and not bb.data.inherits_class('cross', d) \
17 and not d.getVar('INHIBIT_DEFAULT_DEPS', True):
18 deps += 'libtool-cross '
19
20 return deps + 'gnu-config-native '
21
22EXTRA_OEMAKE = ""
23
24DEPENDS_prepend = "${@autotools_dep_prepend(d)}"
25
26inherit siteinfo
27
28# Space separated list of shell scripts with variables defined to supply test
29# results for autoconf tests we cannot run at build time.
30export CONFIG_SITE = "${@siteinfo_get_files(d)}"
31
32acpaths = "default"
33EXTRA_AUTORECONF = "--exclude=autopoint"
34
35export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
36
37# When building tools for use at build-time it's recommended for the build
38# system to use these variables when cross-compiling.
39# (http://sources.redhat.com/autobook/autobook/autobook_270.html)
40export CPP_FOR_BUILD = "${BUILD_CPP}"
41export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}"
42
43export CC_FOR_BUILD = "${BUILD_CC}"
44export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}"
45
46export CXX_FOR_BUILD = "${BUILD_CXX}"
47export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}"
48
49export LD_FOR_BUILD = "${BUILD_LD}"
50export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}"
51
52def append_libtool_sysroot(d):
53 # Only supply libtool sysroot option for non-native packages
54 if not bb.data.inherits_class('native', d):
55 return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
56 return ""
57
58CONFIGUREOPTS = " --build=${BUILD_SYS} \
59 --host=${HOST_SYS} \
60 --target=${TARGET_SYS} \
61 --prefix=${prefix} \
62 --exec_prefix=${exec_prefix} \
63 --bindir=${bindir} \
64 --sbindir=${sbindir} \
65 --libexecdir=${libexecdir} \
66 --datadir=${datadir} \
67 --sysconfdir=${sysconfdir} \
68 --sharedstatedir=${sharedstatedir} \
69 --localstatedir=${localstatedir} \
70 --libdir=${libdir} \
71 --includedir=${includedir} \
72 --oldincludedir=${oldincludedir} \
73 --infodir=${infodir} \
74 --mandir=${mandir} \
75 --disable-silent-rules \
76 ${CONFIGUREOPT_DEPTRACK} \
77 ${@append_libtool_sysroot(d)}"
78CONFIGUREOPT_DEPTRACK = "--disable-dependency-tracking"
79
80
81oe_runconf () {
82 cfgscript="${S}/configure"
83 if [ -x "$cfgscript" ] ; then
84 bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
85 set +e
86 ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"
87 if [ "$?" != "0" ]; then
88 echo "Configure failed. The contents of all config.log files follows to aid debugging"
89 find ${S} -ignore_readdir_race -name config.log -print -exec cat {} \;
90 bbfatal "oe_runconf failed"
91 fi
92 set -e
93 else
94 bbfatal "no configure script found at $cfgscript"
95 fi
96}
97
98AUTOTOOLS_AUXDIR ?= "${S}"
99
100CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
101
102autotools_preconfigure() {
103 if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
104 if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
105 if [ "${S}" != "${B}" ]; then
106 echo "Previously configured separate build directory detected, cleaning ${B}"
107 rm -rf ${B}
108 mkdir ${B}
109 else
110 # At least remove the .la files since automake won't automatically
111 # regenerate them even if CFLAGS/LDFLAGS are different
112 cd ${S}; find ${S} -name \*.la -delete
113 fi
114 fi
115 fi
116}
117
118autotools_postconfigure(){
119 if [ -n "${CONFIGURESTAMPFILE}" ]; then
120 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
121 fi
122}
123
124EXTRACONFFUNCS ??= ""
125
126do_configure[prefuncs] += "autotools_preconfigure autotools_copy_aclocals ${EXTRACONFFUNCS}"
127do_configure[postfuncs] += "autotools_postconfigure"
128
129ACLOCALDIR = "${B}/aclocal-copy"
130
131python autotools_copy_aclocals () {
132 s = d.getVar("S", True)
133 if not os.path.exists(s + "/configure.in") and not os.path.exists(s + "/configure.ac"):
134 if not d.getVar("AUTOTOOLS_COPYACLOCAL"):
135 return
136
137 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
138 #bb.warn(str(taskdepdata))
139 pn = d.getVar("PN", True)
140 aclocaldir = d.getVar("ACLOCALDIR", True)
141 oe.path.remove(aclocaldir)
142 bb.utils.mkdirhier(aclocaldir)
143 start = None
144 configuredeps = []
145
146 for dep in taskdepdata:
147 data = taskdepdata[dep]
148 if data[1] == "do_configure" and data[0] == pn:
149 start = dep
150 break
151 if start is None:
152 bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
153
154 # We need to find configure tasks which are either from <target> -> <target>
155 # or <native> -> <native> but not <target> -> <native> unless they're direct
156 # dependencies. This mirrors what would get restored from sstate.
157 done = [dep]
158 next = [dep]
159 while next:
160 new = []
161 for dep in next:
162 data = taskdepdata[dep]
163 for datadep in data[3]:
164 if datadep in done:
165 continue
166 done.append(datadep)
167 if (not data[0].endswith("-native")) and taskdepdata[datadep][0].endswith("-native") and dep != start:
168 continue
169 new.append(datadep)
170 if taskdepdata[datadep][1] == "do_configure":
171 configuredeps.append(taskdepdata[datadep][0])
172 next = new
173
174 #configuredeps2 = []
175 #for dep in taskdepdata:
176 # data = taskdepdata[dep]
177 # if data[1] == "do_configure" and data[0] != pn:
178 # configuredeps2.append(data[0])
179 #configuredeps.sort()
180 #configuredeps2.sort()
181 #bb.warn(str(configuredeps))
182 #bb.warn(str(configuredeps2))
183
184 cp = []
185 for c in configuredeps:
186 if c.endswith("-native"):
187 manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}-%s.populate_sysroot" % c)
188 elif c.startswith("nativesdk-"):
189 manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${SDK_ARCH}_${SDK_OS}-%s.populate_sysroot" % c)
190 elif "-cross-" in c or "-crosssdk" in c:
191 continue
192 else:
193 manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${MACHINE}-%s.populate_sysroot" % c)
194 try:
195 f = open(manifest, "r")
196 for l in f:
197 if "/aclocal/" in l and l.strip().endswith(".m4"):
198 cp.append(l.strip())
199 except:
200 bb.warn("%s not found" % manifest)
201
202 for c in cp:
203 t = os.path.join(aclocaldir, os.path.basename(c))
204 if not os.path.exists(t):
205 os.symlink(c, t)
206}
207autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
208
209autotools_do_configure() {
210 # WARNING: gross hack follows:
211 # An autotools built package generally needs these scripts, however only
212 # automake or libtoolize actually install the current versions of them.
213 # This is a problem in builds that do not use libtool or automake, in the case
214 # where we -need- the latest version of these scripts. e.g. running a build
215 # for a package whose autotools are old, on an x86_64 machine, which the old
216 # config.sub does not support. Work around this by installing them manually
217 # regardless.
218 ( for ac in `find ${S} -name configure.in -o -name configure.ac`; do
219 rm -f `dirname $ac`/configure
220 done )
221 if [ -e ${S}/configure.in -o -e ${S}/configure.ac ]; then
222 olddir=`pwd`
223 cd ${S}
224 ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
225 if [ x"${acpaths}" = xdefault ]; then
226 acpaths=
227 for i in `find ${S} -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
228 grep -v 'acinclude.m4' | grep -v 'aclocal-copy' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
229 acpaths="$acpaths -I $i"
230 done
231 else
232 acpaths="${acpaths}"
233 fi
234 AUTOV=`automake --version | sed -e '1{s/.* //;s/\.[0-9]\+$//};q'`
235 automake --version
236 echo "AUTOV is $AUTOV"
237 if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
238 ACLOCAL="$ACLOCAL --automake-acdir=${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
239 fi
240 # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
241 # like it was auto-generated. Work around this by blowing it away
242 # by hand, unless the package specifically asked not to run aclocal.
243 if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
244 rm -f aclocal.m4
245 fi
246 if [ -e configure.in ]; then
247 CONFIGURE_AC=configure.in
248 else
249 CONFIGURE_AC=configure.ac
250 fi
251 if grep "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
252 if grep "sed.*POTFILES" $CONFIGURE_AC >/dev/null; then
253 : do nothing -- we still have an old unmodified configure.ac
254 else
255 bbnote Executing glib-gettextize --force --copy
256 echo "no" | glib-gettextize --force --copy
257 fi
258 else if grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
259 # We'd call gettextize here if it wasn't so broken...
260 cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
261 if [ -d ${S}/po/ ]; then
262 cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
263 if [ ! -e ${S}/po/remove-potcdate.sin ]; then
264 cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
265 fi
266 fi
267 for i in gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4; do
268 for j in `find ${S} -name $i | grep -v aclocal-copy`; do
269 rm $j
270 done
271 done
272 fi
273 fi
274 mkdir -p m4
275 if grep "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
276 bbnote Executing intltoolize --copy --force --automake
277 intltoolize --copy --force --automake
278 fi
279 bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
280 ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || bbfatal "autoreconf execution failed."
281 cd $olddir
282 fi
283 if [ -e ${S}/configure ]; then
284 oe_runconf
285 else
286 bbnote "nothing to configure"
287 fi
288}
289
290autotools_do_install() {
291 oe_runmake 'DESTDIR=${D}' install
292 # Info dir listing isn't interesting at this point so remove it if it exists.
293 if [ -e "${D}${infodir}/dir" ]; then
294 rm -f ${D}${infodir}/dir
295 fi
296}
297
298inherit siteconfig
299
300EXPORT_FUNCTIONS do_configure do_install
301
302B = "${WORKDIR}/build"
diff --git a/meta/classes/autotools_stage.bbclass b/meta/classes/autotools_stage.bbclass
new file mode 100644
index 0000000000..b3c41e4b4d
--- /dev/null
+++ b/meta/classes/autotools_stage.bbclass
@@ -0,0 +1,2 @@
1inherit autotools
2
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
new file mode 100644
index 0000000000..ff8c63394f
--- /dev/null
+++ b/meta/classes/base.bbclass
@@ -0,0 +1,566 @@
1BB_DEFAULT_TASK ?= "build"
2CLASSOVERRIDE ?= "class-target"
3
4inherit patch
5inherit staging
6
7inherit mirrors
8inherit utils
9inherit utility-tasks
10inherit metadata_scm
11inherit logging
12
13OE_IMPORTS += "os sys time oe.path oe.utils oe.data oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath"
14OE_IMPORTS[type] = "list"
15
16def oe_import(d):
17 import sys
18
19 bbpath = d.getVar("BBPATH", True).split(":")
20 sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath]
21
22 def inject(name, value):
23 """Make a python object accessible from the metadata"""
24 if hasattr(bb.utils, "_context"):
25 bb.utils._context[name] = value
26 else:
27 __builtins__[name] = value
28
29 import oe.data
30 for toimport in oe.data.typed_value("OE_IMPORTS", d):
31 imported = __import__(toimport)
32 inject(toimport.split(".", 1)[0], imported)
33
34 return ""
35
36# We need the oe module name space early (before INHERITs get added)
37OE_IMPORTED := "${@oe_import(d)}"
38
39def lsb_distro_identifier(d):
40 adjust = d.getVar('LSB_DISTRO_ADJUST', True)
41 adjust_func = None
42 if adjust:
43 try:
44 adjust_func = globals()[adjust]
45 except KeyError:
46 pass
47 return oe.lsb.distro_identifier(adjust_func)
48
49die() {
50 bbfatal "$*"
51}
52
53oe_runmake_call() {
54 bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
55 ${MAKE} ${EXTRA_OEMAKE} "$@"
56}
57
58oe_runmake() {
59 oe_runmake_call "$@" || die "oe_runmake failed"
60}
61
62
63def base_dep_prepend(d):
64 #
65 # Ideally this will check a flag so we will operate properly in
66 # the case where host == build == target, for now we don't work in
67 # that case though.
68 #
69
70 deps = ""
71 # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
72 # we need that built is the responsibility of the patch function / class, not
73 # the application.
74 if not d.getVar('INHIBIT_DEFAULT_DEPS'):
75 if (d.getVar('HOST_SYS', True) != d.getVar('BUILD_SYS', True)):
76 deps += " virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc "
77 return deps
78
79BASEDEPENDS = "${@base_dep_prepend(d)}"
80
81DEPENDS_prepend="${BASEDEPENDS} "
82
83FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
84# THISDIR only works properly with imediate expansion as it has to run
85# in the context of the location its used (:=)
86THISDIR = "${@os.path.dirname(d.getVar('FILE', True))}"
87
88def extra_path_elements(d):
89 path = ""
90 elements = (d.getVar('EXTRANATIVEPATH', True) or "").split()
91 for e in elements:
92 path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
93 return path
94
95PATH_prepend = "${@extra_path_elements(d)}"
96
97addtask fetch
98do_fetch[dirs] = "${DL_DIR}"
99do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
100do_fetch[vardeps] += "SRCREV"
101python base_do_fetch() {
102
103 src_uri = (d.getVar('SRC_URI', True) or "").split()
104 if len(src_uri) == 0:
105 return
106
107 try:
108 fetcher = bb.fetch2.Fetch(src_uri, d)
109 fetcher.download()
110 except bb.fetch2.BBFetchException as e:
111 raise bb.build.FuncFailed(e)
112}
113
114addtask unpack after do_fetch
115do_unpack[dirs] = "${WORKDIR}"
116do_unpack[cleandirs] = "${S}/patches"
117python base_do_unpack() {
118 src_uri = (d.getVar('SRC_URI', True) or "").split()
119 if len(src_uri) == 0:
120 return
121
122 rootdir = d.getVar('WORKDIR', True)
123
124 try:
125 fetcher = bb.fetch2.Fetch(src_uri, d)
126 fetcher.unpack(rootdir)
127 except bb.fetch2.BBFetchException as e:
128 raise bb.build.FuncFailed(e)
129}
130
131def pkgarch_mapping(d):
132 # Compatibility mappings of TUNE_PKGARCH (opt in)
133 if d.getVar("PKGARCHCOMPAT_ARMV7A", True):
134 if d.getVar("TUNE_PKGARCH", True) == "armv7a-vfp-neon":
135 d.setVar("TUNE_PKGARCH", "armv7a")
136
137def get_layers_branch_rev(d):
138 layers = (d.getVar("BBLAYERS", True) or "").split()
139 layers_branch_rev = ["%-17s = \"%s:%s\"" % (os.path.basename(i), \
140 base_get_metadata_git_branch(i, None).strip(), \
141 base_get_metadata_git_revision(i, None)) \
142 for i in layers]
143 i = len(layers_branch_rev)-1
144 p1 = layers_branch_rev[i].find("=")
145 s1 = layers_branch_rev[i][p1:]
146 while i > 0:
147 p2 = layers_branch_rev[i-1].find("=")
148 s2= layers_branch_rev[i-1][p2:]
149 if s1 == s2:
150 layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
151 i -= 1
152 else:
153 i -= 1
154 p1 = layers_branch_rev[i].find("=")
155 s1= layers_branch_rev[i][p1:]
156 return layers_branch_rev
157
158
159BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
160BUILDCFG_FUNCS[type] = "list"
161
162def buildcfg_vars(d):
163 statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
164 for var in statusvars:
165 value = d.getVar(var, True)
166 if value is not None:
167 yield '%-17s = "%s"' % (var, value)
168
169def buildcfg_neededvars(d):
170 needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
171 pesteruser = []
172 for v in needed_vars:
173 val = d.getVar(v, True)
174 if not val or val == 'INVALID':
175 pesteruser.append(v)
176
177 if pesteruser:
178 bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
179
180addhandler base_eventhandler
181base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise"
182python base_eventhandler() {
183 if isinstance(e, bb.event.ConfigParsed):
184 e.data.setVar("NATIVELSBSTRING", lsb_distro_identifier(e.data))
185 e.data.setVar('BB_VERSION', bb.__version__)
186 pkgarch_mapping(e.data)
187 oe.utils.features_backfill("DISTRO_FEATURES", e.data)
188 oe.utils.features_backfill("MACHINE_FEATURES", e.data)
189
190 if isinstance(e, bb.event.BuildStarted):
191 localdata = bb.data.createCopy(e.data)
192 bb.data.update_data(localdata)
193 statuslines = []
194 for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
195 g = globals()
196 if func not in g:
197 bb.warn("Build configuration function '%s' does not exist" % func)
198 else:
199 flines = g[func](localdata)
200 if flines:
201 statuslines.extend(flines)
202
203 statusheader = e.data.getVar('BUILDCFG_HEADER', True)
204 bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
205
206 # This code is to silence warnings where the SDK variables overwrite the
207 # target ones and we'd see dulpicate key names overwriting each other
208 # for various PREFERRED_PROVIDERS
209 if isinstance(e, bb.event.RecipePreFinalise):
210 if e.data.getVar("TARGET_PREFIX", True) == e.data.getVar("SDK_PREFIX", True):
211 e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
212 e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc-initial")
213 e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
214 e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
215 e.data.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
216
217}
218
219addtask configure after do_patch
220do_configure[dirs] = "${S} ${B}"
221do_configure[deptask] = "do_populate_sysroot"
222base_do_configure() {
223 :
224}
225
226addtask compile after do_configure
227do_compile[dirs] = "${S} ${B}"
228base_do_compile() {
229 if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
230 oe_runmake || die "make failed"
231 else
232 bbnote "nothing to compile"
233 fi
234}
235
236addtask install after do_compile
237do_install[dirs] = "${D} ${S} ${B}"
238# Remove and re-create ${D} so that is it guaranteed to be empty
239do_install[cleandirs] = "${D}"
240
241base_do_install() {
242 :
243}
244
245base_do_package() {
246 :
247}
248
249addtask build after do_populate_sysroot
250do_build[noexec] = "1"
251do_build[recrdeptask] += "do_deploy"
252do_build () {
253 :
254}
255
256def set_packagetriplet(d):
257 archs = []
258 tos = []
259 tvs = []
260
261 archs.append(d.getVar("PACKAGE_ARCHS", True).split())
262 tos.append(d.getVar("TARGET_OS", True))
263 tvs.append(d.getVar("TARGET_VENDOR", True))
264
265 def settriplet(d, varname, archs, tos, tvs):
266 triplets = []
267 for i in range(len(archs)):
268 for arch in archs[i]:
269 triplets.append(arch + tvs[i] + "-" + tos[i])
270 triplets.reverse()
271 d.setVar(varname, " ".join(triplets))
272
273 settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
274
275 variants = d.getVar("MULTILIB_VARIANTS", True) or ""
276 for item in variants.split():
277 localdata = bb.data.createCopy(d)
278 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
279 localdata.setVar("OVERRIDES", overrides)
280 bb.data.update_data(localdata)
281
282 archs.append(localdata.getVar("PACKAGE_ARCHS", True).split())
283 tos.append(localdata.getVar("TARGET_OS", True))
284 tvs.append(localdata.getVar("TARGET_VENDOR", True))
285
286 settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
287
288python () {
289 import string, re
290
291 # Handle PACKAGECONFIG
292 #
293 # These take the form:
294 #
295 # PACKAGECONFIG ??= "<default options>"
296 # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends"
297 pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
298 if pkgconfigflags:
299 pkgconfig = (d.getVar('PACKAGECONFIG', True) or "").split()
300 pn = d.getVar("PN", True)
301 mlprefix = d.getVar("MLPREFIX", True)
302
303 def expandFilter(appends, extension, prefix):
304 appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
305 newappends = []
306 for a in appends:
307 if a.endswith("-native") or ("-cross-" in a):
308 newappends.append(a)
309 elif a.startswith("virtual/"):
310 subs = a.split("/", 1)[1]
311 newappends.append("virtual/" + prefix + subs + extension)
312 else:
313 if a.startswith(prefix):
314 newappends.append(a + extension)
315 else:
316 newappends.append(prefix + a + extension)
317 return newappends
318
319 def appendVar(varname, appends):
320 if not appends:
321 return
322 if varname.find("DEPENDS") != -1:
323 if pn.startswith("nativesdk-"):
324 appends = expandFilter(appends, "", "nativesdk-")
325 if pn.endswith("-native"):
326 appends = expandFilter(appends, "-native", "")
327 if mlprefix:
328 appends = expandFilter(appends, "", mlprefix)
329 varname = d.expand(varname)
330 d.appendVar(varname, " " + " ".join(appends))
331
332 extradeps = []
333 extrardeps = []
334 extraconf = []
335 for flag, flagval in sorted(pkgconfigflags.items()):
336 if flag == "defaultval":
337 continue
338 items = flagval.split(",")
339 num = len(items)
340 if num > 4:
341 bb.error("Only enable,disable,depend,rdepend can be specified!")
342
343 if flag in pkgconfig:
344 if num >= 3 and items[2]:
345 extradeps.append(items[2])
346 if num >= 4 and items[3]:
347 extrardeps.append(items[3])
348 if num >= 1 and items[0]:
349 extraconf.append(items[0])
350 elif num >= 2 and items[1]:
351 extraconf.append(items[1])
352 appendVar('DEPENDS', extradeps)
353 appendVar('RDEPENDS_${PN}', extrardeps)
354 if bb.data.inherits_class('cmake', d):
355 appendVar('EXTRA_OECMAKE', extraconf)
356 else:
357 appendVar('EXTRA_OECONF', extraconf)
358
359 # If PRINC is set, try and increase the PR value by the amount specified
360 # The PR server is now the preferred way to handle PR changes based on
361 # the checksum of the recipe (including bbappend). The PRINC is now
362 # obsolete. Return a warning to the user.
363 princ = d.getVar('PRINC', True)
364 if princ and princ != "0":
365 bb.warn("Use of PRINC %s was detected in the recipe %s (or one of its .bbappends)\nUse of PRINC is deprecated. The PR server should be used to automatically increment the PR. See: https://wiki.yoctoproject.org/wiki/PR_Service." % (princ, d.getVar("FILE", True)))
366 pr = d.getVar('PR', True)
367 pr_prefix = re.search("\D+",pr)
368 prval = re.search("\d+",pr)
369 if pr_prefix is None or prval is None:
370 bb.error("Unable to analyse format of PR variable: %s" % pr)
371 nval = int(prval.group(0)) + int(princ)
372 pr = pr_prefix.group(0) + str(nval) + pr[prval.end():]
373 d.setVar('PR', pr)
374
375 pn = d.getVar('PN', True)
376 license = d.getVar('LICENSE', True)
377 if license == "INVALID":
378 bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
379
380 if bb.data.inherits_class('license', d):
381 unmatched_license_flag = check_license_flags(d)
382 if unmatched_license_flag:
383 bb.debug(1, "Skipping %s because it has a restricted license not"
384 " whitelisted in LICENSE_FLAGS_WHITELIST" % pn)
385 raise bb.parse.SkipPackage("because it has a restricted license not"
386 " whitelisted in LICENSE_FLAGS_WHITELIST")
387
388 # If we're building a target package we need to use fakeroot (pseudo)
389 # in order to capture permissions, owners, groups and special files
390 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
391 d.setVarFlag('do_unpack', 'umask', '022')
392 d.setVarFlag('do_configure', 'umask', '022')
393 d.setVarFlag('do_compile', 'umask', '022')
394 d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
395 d.setVarFlag('do_install', 'fakeroot', 1)
396 d.setVarFlag('do_install', 'umask', '022')
397 d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
398 d.setVarFlag('do_package', 'fakeroot', 1)
399 d.setVarFlag('do_package', 'umask', '022')
400 d.setVarFlag('do_package_setscene', 'fakeroot', 1)
401 d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
402 d.setVarFlag('do_devshell', 'fakeroot', 1)
403 d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
404 source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', 0)
405 if not source_mirror_fetch:
406 need_host = d.getVar('COMPATIBLE_HOST', True)
407 if need_host:
408 import re
409 this_host = d.getVar('HOST_SYS', True)
410 if not re.match(need_host, this_host):
411 raise bb.parse.SkipPackage("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
412
413 need_machine = d.getVar('COMPATIBLE_MACHINE', True)
414 if need_machine:
415 import re
416 compat_machines = (d.getVar('MACHINEOVERRIDES', True) or "").split(":")
417 for m in compat_machines:
418 if re.match(need_machine, m):
419 break
420 else:
421 raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE', True))
422
423
424 bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
425
426 check_license = False if pn.startswith("nativesdk-") else True
427 for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}",
428 "-crosssdk-${SDK_ARCH}", "-crosssdk-initial-${SDK_ARCH}",
429 "-cross-canadian-${TRANSLATED_TARGET_ARCH}"]:
430 if pn.endswith(d.expand(t)):
431 check_license = False
432
433 if check_license and bad_licenses:
434 bad_licenses = map(lambda l: canonical_license(d, l), bad_licenses)
435
436 whitelist = []
437 for lic in bad_licenses:
438 for w in ["HOSTTOOLS_WHITELIST_", "LGPLv2_WHITELIST_", "WHITELIST_"]:
439 whitelist.extend((d.getVar(w + lic, True) or "").split())
440 spdx_license = return_spdx(d, lic)
441 if spdx_license:
442 whitelist.extend((d.getVar('HOSTTOOLS_WHITELIST_%s' % spdx_license, True) or "").split())
443 if not pn in whitelist:
444 recipe_license = d.getVar('LICENSE', True)
445 pkgs = d.getVar('PACKAGES', True).split()
446 skipped_pkgs = []
447 unskipped_pkgs = []
448 for pkg in pkgs:
449 if incompatible_license(d, bad_licenses, pkg):
450 skipped_pkgs.append(pkg)
451 else:
452 unskipped_pkgs.append(pkg)
453 all_skipped = skipped_pkgs and not unskipped_pkgs
454 if unskipped_pkgs:
455 for pkg in skipped_pkgs:
456 bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + recipe_license)
457 d.setVar('LICENSE_EXCLUSION-' + pkg, 1)
458 for pkg in unskipped_pkgs:
459 bb.debug(1, "INCLUDING the package " + pkg)
460 elif all_skipped or incompatible_license(d, bad_licenses):
461 bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, recipe_license))
462 raise bb.parse.SkipPackage("incompatible with license %s" % recipe_license)
463
464 srcuri = d.getVar('SRC_URI', True)
465 # Svn packages should DEPEND on subversion-native
466 if "svn://" in srcuri:
467 d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
468
469 # Git packages should DEPEND on git-native
470 if "git://" in srcuri:
471 d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
472
473 # Mercurial packages should DEPEND on mercurial-native
474 elif "hg://" in srcuri:
475 d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
476
477 # OSC packages should DEPEND on osc-native
478 elif "osc://" in srcuri:
479 d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
480
481 # *.lz4 should depends on lz4-native for unpacking
482 # Not endswith because of "*.patch.lz4;patch=1". Need bb.fetch.decodeurl in future
483 if '.lz4' in srcuri:
484 d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
485
486 # *.xz should depends on xz-native for unpacking
487 # Not endswith because of "*.patch.xz;patch=1". Need bb.fetch.decodeurl in future
488 if '.xz' in srcuri:
489 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
490
491 # unzip-native should already be staged before unpacking ZIP recipes
492 if ".zip" in srcuri:
493 d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
494
495 # file is needed by rpm2cpio.sh
496 if ".src.rpm" in srcuri:
497 d.appendVarFlag('do_unpack', 'depends', ' file-native:do_populate_sysroot')
498
499 set_packagetriplet(d)
500
501 # 'multimachine' handling
502 mach_arch = d.getVar('MACHINE_ARCH', True)
503 pkg_arch = d.getVar('PACKAGE_ARCH', True)
504
505 if (pkg_arch == mach_arch):
506 # Already machine specific - nothing further to do
507 return
508
509 #
510 # We always try to scan SRC_URI for urls with machine overrides
511 # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
512 #
513 override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', True)
514 if override != '0':
515 paths = []
516 fpaths = (d.getVar('FILESPATH', True) or '').split(':')
517 machine = d.getVar('MACHINE', True)
518 for p in fpaths:
519 if os.path.basename(p) == machine and os.path.isdir(p):
520 paths.append(p)
521
522 if len(paths) != 0:
523 for s in srcuri.split():
524 if not s.startswith("file://"):
525 continue
526 fetcher = bb.fetch2.Fetch([s], d)
527 local = fetcher.localpath(s)
528 for mp in paths:
529 if local.startswith(mp):
530 #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
531 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
532 return
533
534 packages = d.getVar('PACKAGES', True).split()
535 for pkg in packages:
536 pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg, True)
537
538 # We could look for != PACKAGE_ARCH here but how to choose
539 # if multiple differences are present?
540 # Look through PACKAGE_ARCHS for the priority order?
541 if pkgarch and pkgarch == mach_arch:
542 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
543 bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN", True))
544}
545
546addtask cleansstate after do_clean
547python do_cleansstate() {
548 sstate_clean_cachefiles(d)
549}
550
551addtask cleanall after do_cleansstate
552python do_cleanall() {
553 src_uri = (d.getVar('SRC_URI', True) or "").split()
554 if len(src_uri) == 0:
555 return
556
557 try:
558 fetcher = bb.fetch2.Fetch(src_uri, d)
559 fetcher.clean()
560 except bb.fetch2.BBFetchException, e:
561 raise bb.build.FuncFailed(e)
562}
563do_cleanall[nostamp] = "1"
564
565
566EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
new file mode 100644
index 0000000000..a52b75be5c
--- /dev/null
+++ b/meta/classes/bin_package.bbclass
@@ -0,0 +1,36 @@
1#
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4#
5# Common variable and task for the binary package recipe.
6# Basic principle:
7# * The files have been unpacked to ${S} by base.bbclass
8# * Skip do_configure and do_compile
9# * Use do_install to install the files to ${D}
10#
11# Note:
12# The "subdir" parameter in the SRC_URI is useful when the input package
13# is rpm, ipk, deb and so on, for example:
14#
15# SRC_URI = "http://foo.com/foo-1.0-r1.i586.rpm;subdir=foo-1.0"
16#
17# Then the files would be unpacked to ${WORKDIR}/foo-1.0, otherwise
18# they would be in ${WORKDIR}.
19#
20
21# Skip the unwanted steps
22do_configure[noexec] = "1"
23do_compile[noexec] = "1"
24
25# Install the files to ${D}
26bin_package_do_install () {
27 # Do it carefully
28 [ -d "${S}" ] || exit 1
29 cd ${S} || exit 1
30 tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
31 | tar --no-same-owner -xpf - -C ${D}
32}
33
34FILES_${PN} = "/"
35
36EXPORT_FUNCTIONS do_install
diff --git a/meta/classes/binconfig-disabled.bbclass b/meta/classes/binconfig-disabled.bbclass
new file mode 100644
index 0000000000..27f904eb42
--- /dev/null
+++ b/meta/classes/binconfig-disabled.bbclass
@@ -0,0 +1,15 @@
1#
2# Class to disable binconfig files instead of installing them
3#
4
5# The list of scripts which should be disabled.
6BINCONFIG ?= ""
7
8FILES_${PN}-dev += "${bindir}/*-config"
9
10do_install_append () {
11 for x in ${BINCONFIG}; do
12 echo "#!/bin/sh" > ${D}$x
13 echo "exit 1" >> ${D}$x
14 done
15}
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
new file mode 100644
index 0000000000..cbc4173601
--- /dev/null
+++ b/meta/classes/binconfig.bbclass
@@ -0,0 +1,63 @@
1FILES_${PN}-dev += "${bindir}/*-config"
2
3# The namespaces can clash here hence the two step replace
4def get_binconfig_mangle(d):
5 s = "-e ''"
6 if not bb.data.inherits_class('native', d):
7 optional_quote = r"\(\"\?\)"
8 s += " -e 's:=%s${base_libdir}:=\\1OEBASELIBDIR:;'" % optional_quote
9 s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
10 s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
11 s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
12 s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote
13 s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
14 s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
15 s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
16 s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'"
17 s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
18 s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
19 s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
20 s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
21 s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
22 s += " -e 's:-I${WORKDIR}:-I${STAGING_INCDIR}:'"
23 s += " -e 's:-L${WORKDIR}:-L${STAGING_LIBDIR}:'"
24 if bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d):
25 s += bb.data.getVar("OE_BINCONFIG_EXTRA_MANGLE", d)
26
27 return s
28
29BINCONFIG_GLOB ?= "*-config"
30
31PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
32
33binconfig_package_preprocess () {
34 for config in `find ${PKGD} -name '${BINCONFIG_GLOB}'`; do
35 sed -i \
36 -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
37 -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
38 -e 's:${STAGING_INCDIR}:${includedir}:g;' \
39 -e 's:${STAGING_DATADIR}:${datadir}:' \
40 -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
41 $config
42 done
43 for lafile in `find ${PKGD} -name "*.la"` ; do
44 sed -i \
45 -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
46 -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
47 -e 's:${STAGING_INCDIR}:${includedir}:g;' \
48 -e 's:${STAGING_DATADIR}:${datadir}:' \
49 -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
50 $lafile
51 done
52}
53
54SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
55
56binconfig_sysroot_preprocess () {
57 for config in `find ${S} -name '${BINCONFIG_GLOB}'` `find ${B} -name '${BINCONFIG_GLOB}'`; do
58 configname=`basename $config`
59 install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
60 sed ${@get_binconfig_mangle(d)} $config > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
61 chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
62 done
63}
diff --git a/meta/classes/blacklist.bbclass b/meta/classes/blacklist.bbclass
new file mode 100644
index 0000000000..a0141a82c0
--- /dev/null
+++ b/meta/classes/blacklist.bbclass
@@ -0,0 +1,45 @@
1# anonymous support class from originally from angstrom
2#
3# To use the blacklist, a distribution should include this
4# class in the INHERIT_DISTRO
5#
6# No longer use ANGSTROM_BLACKLIST, instead use a table of
7# recipes in PNBLACKLIST
8#
9# Features:
10#
11# * To add a package to the blacklist, set:
12# PNBLACKLIST[pn] = "message"
13#
14
15# Cope with PNBLACKLIST flags for multilib case
16addhandler blacklist_multilib_eventhandler
17blacklist_multilib_eventhandler[eventmask] = "bb.event.ConfigParsed"
18python blacklist_multilib_eventhandler() {
19 multilibs = e.data.getVar('MULTILIBS', True)
20 if not multilibs:
21 return
22
23 # this block has been copied from base.bbclass so keep it in sync
24 prefixes = []
25 for ext in multilibs.split():
26 eext = ext.split(':')
27 if len(eext) > 1 and eext[0] == 'multilib':
28 prefixes.append(eext[1])
29
30 blacklists = e.data.getVarFlags('PNBLACKLIST') or {}
31 for pkg, reason in blacklists.items():
32 if pkg.endswith(("-native", "-crosssdk")) or pkg.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in pkg:
33 continue
34 for p in prefixes:
35 newpkg = p + "-" + pkg
36 if not e.data.getVarFlag('PNBLACKLIST', newpkg, True):
37 e.data.setVarFlag('PNBLACKLIST', newpkg, reason)
38}
39
40python () {
41 blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN', True), True)
42
43 if blacklist:
44 raise bb.parse.SkipPackage("Recipe is blacklisted: %s" % (blacklist))
45}
diff --git a/meta/classes/boot-directdisk.bbclass b/meta/classes/boot-directdisk.bbclass
new file mode 100644
index 0000000000..09da032049
--- /dev/null
+++ b/meta/classes/boot-directdisk.bbclass
@@ -0,0 +1,191 @@
1# boot-directdisk.bbclass
2# (loosly based off bootimg.bbclass Copyright (C) 2004, Advanced Micro Devices, Inc.)
3#
4# Create an image which can be placed directly onto a harddisk using dd and then
5# booted.
6#
7# This uses syslinux. extlinux would have been nice but required the ext2/3
8# partition to be mounted. grub requires to run itself as part of the install
9# process.
10#
11# The end result is a 512 boot sector populated with an MBR and partition table
12# followed by an msdos fat16 partition containing syslinux and a linux kernel
13# completed by the ext2/3 rootfs.
14#
15# We have to push the msdos parition table size > 16MB so fat 16 is used as parted
16# won't touch fat12 partitions.
17
18# External variables needed
19
20# ${ROOTFS} - the rootfs image to incorporate
21
22do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
23 syslinux:do_populate_sysroot \
24 syslinux-native:do_populate_sysroot \
25 parted-native:do_populate_sysroot \
26 mtools-native:do_populate_sysroot "
27
28PACKAGES = " "
29EXCLUDE_FROM_WORLD = "1"
30
31BOOTDD_VOLUME_ID ?= "boot"
32BOOTDD_EXTRA_SPACE ?= "16384"
33
34EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
35EFI_PROVIDER ?= "grub-efi"
36EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
37
38# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
39# contain "efi". This way legacy is supported by default if neither is
40# specified, maintaining the original behavior.
41def pcbios(d):
42 pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
43 if pcbios == "0":
44 pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d)
45 return pcbios
46
47def pcbios_class(d):
48 if d.getVar("PCBIOS", True) == "1":
49 return "syslinux"
50 return ""
51
52PCBIOS = "${@pcbios(d)}"
53PCBIOS_CLASS = "${@pcbios_class(d)}"
54
55inherit ${PCBIOS_CLASS}
56inherit ${EFI_CLASS}
57
58# Get the build_syslinux_cfg() function from the syslinux class
59
60AUTO_SYSLINUXCFG = "1"
61DISK_SIGNATURE ?= "${DISK_SIGNATURE_GENERATED}"
62SYSLINUX_ROOT ?= "root=/dev/sda2"
63SYSLINUX_TIMEOUT ?= "10"
64
65IS_VMDK = '${@bb.utils.contains("IMAGE_FSTYPES", "vmdk", "true", "false", d)}'
66
67boot_direct_populate() {
68 dest=$1
69 install -d $dest
70
71 # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
72 install -m 0644 ${STAGING_KERNEL_DIR}/bzImage $dest/vmlinuz
73
74 # initrd is made of concatenation of multiple filesystem images
75 if [ -n "${INITRD}" ]; then
76 rm -f $dest/initrd
77 for fs in ${INITRD}
78 do
79 if [ -s "${fs}" ]; then
80 cat ${fs} >> $dest/initrd
81 else
82 bbfatal "${fs} is invalid. initrd image creation failed."
83 fi
84 done
85 chmod 0644 $dest/initrd
86 fi
87}
88
89build_boot_dd() {
90 HDDDIR="${S}/hdd/boot"
91 HDDIMG="${S}/hdd.image"
92 IMAGE=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect
93
94 boot_direct_populate $HDDDIR
95
96 if [ "${PCBIOS}" = "1" ]; then
97 syslinux_hddimg_populate $HDDDIR
98 fi
99 if [ "${EFI}" = "1" ]; then
100 efi_hddimg_populate $HDDDIR
101 fi
102
103 if [ "${IS_VMDK}" = "true" ]; then
104 if [ "x${AUTO_SYSLINUXMENU}" = "x1" ] ; then
105 install -m 0644 ${STAGING_DIR}/${MACHINE}/usr/share/syslinux/vesamenu.c32 $HDDDIR/${SYSLINUXDIR}/
106 if [ "x${SYSLINUX_SPLASH}" != "x" ] ; then
107 install -m 0644 ${SYSLINUX_SPLASH} $HDDDIR/${SYSLINUXDIR}/splash.lss
108 fi
109 fi
110 fi
111
112 BLOCKS=`du -bks $HDDDIR | cut -f 1`
113 BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
114
115 # Ensure total sectors is an integral number of sectors per
116 # track or mcopy will complain. Sectors are 512 bytes, and we
117 # generate images with 32 sectors per track. This calculation is
118 # done in blocks, thus the mod by 16 instead of 32.
119 BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
120
121 mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C $HDDIMG $BLOCKS
122 mcopy -i $HDDIMG -s $HDDDIR/* ::/
123
124 if [ "${PCBIOS}" = "1" ]; then
125 syslinux_hdddirect_install $HDDIMG
126 fi
127 chmod 644 $HDDIMG
128
129 ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1`
130 TOTALSIZE=`expr $BLOCKS + $ROOTFSBLOCKS`
131 END1=`expr $BLOCKS \* 1024`
132 END2=`expr $END1 + 512`
133 END3=`expr \( $ROOTFSBLOCKS \* 1024 \) + $END1`
134
135 echo $ROOTFSBLOCKS $TOTALSIZE $END1 $END2 $END3
136 rm -rf $IMAGE
137 dd if=/dev/zero of=$IMAGE bs=1024 seek=$TOTALSIZE count=1
138
139 parted $IMAGE mklabel msdos
140 parted $IMAGE mkpart primary fat16 0 ${END1}B
141 parted $IMAGE unit B mkpart primary ext2 ${END2}B ${END3}B
142 parted $IMAGE set 1 boot on
143
144 parted $IMAGE print
145
146 awk "BEGIN { printf \"$(echo ${DISK_SIGNATURE} | fold -w 2 | tac | paste -sd '' | sed 's/\(..\)/\\x&/g')\" }" | \
147 dd of=$IMAGE bs=1 seek=440 conv=notrunc
148
149 OFFSET=`expr $END2 / 512`
150 if [ "${PCBIOS}" = "1" ]; then
151 dd if=${STAGING_DATADIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
152 fi
153
154 dd if=$HDDIMG of=$IMAGE conv=notrunc seek=1 bs=512
155 dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
156
157 cd ${DEPLOY_DIR_IMAGE}
158 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
159 ln -s ${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
160}
161
162python do_bootdirectdisk() {
163 validate_disk_signature(d)
164 if d.getVar("PCBIOS", True) == "1":
165 bb.build.exec_func('build_syslinux_cfg', d)
166 if d.getVar("EFI", True) == "1":
167 bb.build.exec_func('build_efi_cfg', d)
168 bb.build.exec_func('build_boot_dd', d)
169}
170
171def generate_disk_signature():
172 import uuid
173
174 signature = str(uuid.uuid4())[:8]
175
176 if signature != '00000000':
177 return signature
178 else:
179 return 'ffffffff'
180
181def validate_disk_signature(d):
182 import re
183
184 disk_signature = d.getVar("DISK_SIGNATURE", True)
185
186 if not re.match(r'^[0-9a-fA-F]{8}$', disk_signature):
187 bb.fatal("DISK_SIGNATURE '%s' must be an 8 digit hex string" % disk_signature)
188
189DISK_SIGNATURE_GENERATED := "${@generate_disk_signature()}"
190
191addtask bootdirectdisk before do_build
diff --git a/meta/classes/bootimg.bbclass b/meta/classes/bootimg.bbclass
new file mode 100644
index 0000000000..859d517dbd
--- /dev/null
+++ b/meta/classes/bootimg.bbclass
@@ -0,0 +1,267 @@
1# Copyright (C) 2004, Advanced Micro Devices, Inc. All Rights Reserved
2# Released under the MIT license (see packages/COPYING)
3
4# Creates a bootable image using syslinux, your kernel and an optional
5# initrd
6
7#
8# End result is two things:
9#
10# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
11# an initrd and a rootfs image. These can be written to harddisks directly and
12# also booted on USB flash disks (write them there with dd).
13#
14# 2. A CD .iso image
15
16# Boot process is that the initrd will boot and process which label was selected
17# in syslinux. Actions based on the label are then performed (e.g. installing to
18# an hdd)
19
20# External variables (also used by syslinux.bbclass)
21# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
22# ${COMPRESSISO} - Transparent compress ISO, reduce size ~40% if set to 1
23# ${NOISO} - skip building the ISO image if set to 1
24# ${NOHDD} - skip building the HDD image if set to 1
25# ${HDDIMG_ID} - FAT image volume-id
26# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
27
28do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
29 mtools-native:do_populate_sysroot \
30 cdrtools-native:do_populate_sysroot \
31 ${@oe.utils.ifelse(d.getVar('COMPRESSISO'),'zisofs-tools-native:do_populate_sysroot','')}"
32
33PACKAGES = " "
34EXCLUDE_FROM_WORLD = "1"
35
36HDDDIR = "${S}/hddimg"
37ISODIR = "${S}/iso"
38EFIIMGDIR = "${S}/efi_img"
39COMPACT_ISODIR = "${S}/iso.z"
40COMPRESSISO ?= "0"
41
42BOOTIMG_VOLUME_ID ?= "boot"
43BOOTIMG_EXTRA_SPACE ?= "512"
44
45EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
46EFI_PROVIDER ?= "grub-efi"
47EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
48
49# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
50# contain "efi". This way legacy is supported by default if neither is
51# specified, maintaining the original behavior.
52def pcbios(d):
53 pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
54 if pcbios == "0":
55 pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d)
56 return pcbios
57
58PCBIOS = "${@pcbios(d)}"
59
60# The syslinux is required for the isohybrid command and boot catalog
61inherit syslinux
62inherit ${EFI_CLASS}
63
64populate() {
65 DEST=$1
66 install -d ${DEST}
67
68 # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
69 install -m 0644 ${STAGING_KERNEL_DIR}/bzImage ${DEST}/vmlinuz
70
71 # initrd is made of concatenation of multiple filesystem images
72 if [ -n "${INITRD}" ]; then
73 rm -f ${DEST}/initrd
74 for fs in ${INITRD}
75 do
76 if [ -s "${fs}" ]; then
77 cat ${fs} >> ${DEST}/initrd
78 else
79 bbfatal "${fs} is invalid. initrd image creation failed."
80 fi
81 done
82 chmod 0644 ${DEST}/initrd
83 fi
84
85 if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
86 install -m 0644 ${ROOTFS} ${DEST}/rootfs.img
87 fi
88
89}
90
91build_iso() {
92 # Only create an ISO if we have an INITRD and NOISO was not set
93 if [ -z "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
94 bbnote "ISO image will not be created."
95 return
96 fi
97 # ${INITRD} is a list of multiple filesystem images
98 for fs in ${INITRD}
99 do
100 if [ ! -s "${fs}" ]; then
101 bbnote "ISO image will not be created. ${fs} is invalid."
102 return
103 fi
104 done
105
106
107 populate ${ISODIR}
108
109 if [ "${PCBIOS}" = "1" ]; then
110 syslinux_iso_populate ${ISODIR}
111 fi
112 if [ "${EFI}" = "1" ]; then
113 efi_iso_populate ${ISODIR}
114 build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img
115 fi
116
117 # EFI only
118 if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then
119 # Work around bug in isohybrid where it requires isolinux.bin
120 # In the boot catalog, even though it is not used
121 mkdir -p ${ISODIR}/${ISOLINUXDIR}
122 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
123 fi
124
125 if [ "${COMPRESSISO}" = "1" ] ; then
126 # create compact directory, compress iso
127 mkdir -p ${COMPACT_ISODIR}
128 mkzftree -z 9 -p 4 -F ${ISODIR}/rootfs.img ${COMPACT_ISODIR}/rootfs.img
129
130 # move compact iso to iso, then remove compact directory
131 mv ${COMPACT_ISODIR}/rootfs.img ${ISODIR}/rootfs.img
132 rm -Rf ${COMPACT_ISODIR}
133 mkisofs_compress_opts="-R -z -D -l"
134 else
135 mkisofs_compress_opts="-r"
136 fi
137
138 if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
139 # PCBIOS only media
140 mkisofs -V ${BOOTIMG_VOLUME_ID} \
141 -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
142 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
143 $mkisofs_compress_opts \
144 ${MKISOFS_OPTIONS} ${ISODIR}
145 else
146 # EFI only OR EFI+PCBIOS
147 mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
148 -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
149 -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
150 $mkisofs_compress_opts ${MKISOFS_OPTIONS} \
151 -eltorito-alt-boot -eltorito-platform efi \
152 -b efi.img -no-emul-boot \
153 ${ISODIR}
154 isohybrid_args="-u"
155 fi
156
157 isohybrid $isohybrid_args ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso
158
159 cd ${DEPLOY_DIR_IMAGE}
160 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
161 ln -s ${IMAGE_NAME}.iso ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
162}
163
164build_fat_img() {
165 FATSOURCEDIR=$1
166 FATIMG=$2
167
168 # Calculate the size required for the final image including the
169 # data and filesystem overhead.
170 # Sectors: 512 bytes
171 # Blocks: 1024 bytes
172
173 # Determine the sector count just for the data
174 SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
175
176 # Account for the filesystem overhead. This includes directory
177 # entries in the clusters as well as the FAT itself.
178 # Assumptions:
179 # FAT32 (12 or 16 may be selected by mkdosfs, but the extra
180 # padding will be minimal on those smaller images and not
181 # worth the logic here to caclulate the smaller FAT sizes)
182 # < 16 entries per directory
183 # 8.3 filenames only
184
185 # 32 bytes per dir entry
186 DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
187 # 32 bytes for every end-of-directory dir entry
188 DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
189 # 4 bytes per FAT entry per sector of data
190 FAT_BYTES=$(expr $SECTORS \* 4)
191 # 4 bytes per FAT entry per end-of-cluster list
192 FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
193
194 # Use a ceiling function to determine FS overhead in sectors
195 DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
196 # There are two FATs on the image
197 FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
198 SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
199
200 # Determine the final size in blocks accounting for some padding
201 BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
202
203 # Ensure total sectors is an integral number of sectors per
204 # track or mcopy will complain. Sectors are 512 bytes, and we
205 # generate images with 32 sectors per track. This calculation is
206 # done in blocks, thus the mod by 16 instead of 32.
207 BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
208
209 # mkdosfs will sometimes use FAT16 when it is not appropriate,
210 # resulting in a boot failure from SYSLINUX. Use FAT32 for
211 # images larger than 512MB, otherwise let mkdosfs decide.
212 if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
213 FATSIZE="-F 32"
214 fi
215
216 if [ -z "${HDDIMG_ID}" ]; then
217 mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
218 ${BLOCKS}
219 else
220 mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
221 ${BLOCKS} -i ${HDDIMG_ID}
222 fi
223
224 # Copy FATSOURCEDIR recursively into the image file directly
225 mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
226}
227
228build_hddimg() {
229 # Create an HDD image
230 if [ "${NOHDD}" != "1" ] ; then
231 populate ${HDDDIR}
232
233 if [ "${PCBIOS}" = "1" ]; then
234 syslinux_hddimg_populate ${HDDDIR}
235 fi
236 if [ "${EFI}" = "1" ]; then
237 efi_hddimg_populate ${HDDDIR}
238 fi
239
240 build_fat_img ${HDDDIR} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
241
242 if [ "${PCBIOS}" = "1" ]; then
243 syslinux_hddimg_install
244 fi
245
246 chmod 644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
247
248 cd ${DEPLOY_DIR_IMAGE}
249 rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
250 ln -s ${IMAGE_NAME}.hddimg ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
251 fi
252}
253
254python do_bootimg() {
255 if d.getVar("PCBIOS", True) == "1":
256 bb.build.exec_func('build_syslinux_cfg', d)
257 if d.getVar("EFI", True) == "1":
258 bb.build.exec_func('build_efi_cfg', d)
259 bb.build.exec_func('build_hddimg', d)
260 bb.build.exec_func('build_iso', d)
261}
262
263IMAGE_TYPEDEP_iso = "ext3"
264IMAGE_TYPEDEP_hddimg = "ext3"
265IMAGE_TYPES_MASKED += "iso hddimg"
266
267addtask bootimg before do_build
diff --git a/meta/classes/bugzilla.bbclass b/meta/classes/bugzilla.bbclass
new file mode 100644
index 0000000000..3fc8956428
--- /dev/null
+++ b/meta/classes/bugzilla.bbclass
@@ -0,0 +1,187 @@
1#
2# Small event handler to automatically open URLs and file
3# bug reports at a bugzilla of your choiche
4# it uses XML-RPC interface, so you must have it enabled
5#
6# Before using you must define BUGZILLA_USER, BUGZILLA_PASS credentials,
7# BUGZILLA_XMLRPC - uri of xmlrpc.cgi,
8# BUGZILLA_PRODUCT, BUGZILLA_COMPONENT - a place in BTS for build bugs
9# BUGZILLA_VERSION - version against which to report new bugs
10#
11
12def bugzilla_find_bug_report(debug_file, server, args, bugname):
13 args['summary'] = bugname
14 bugs = server.Bug.search(args)
15 if len(bugs['bugs']) == 0:
16 print >> debug_file, "Bugs not found"
17 return (False,None)
18 else: # silently pick the first result
19 print >> debug_file, "Result of bug search is "
20 print >> debug_file, bugs
21 status = bugs['bugs'][0]['status']
22 id = bugs['bugs'][0]['id']
23 return (not status in ["CLOSED", "RESOLVED", "VERIFIED"],id)
24
25def bugzilla_file_bug(debug_file, server, args, name, text, version):
26 args['summary'] = name
27 args['comment'] = text
28 args['version'] = version
29 args['op_sys'] = 'Linux'
30 args['platform'] = 'Other'
31 args['severity'] = 'normal'
32 args['priority'] = 'Normal'
33 try:
34 return server.Bug.create(args)['id']
35 except Exception, e:
36 print >> debug_file, repr(e)
37 return None
38
39def bugzilla_reopen_bug(debug_file, server, args, bug_number):
40 args['ids'] = [bug_number]
41 args['status'] = "CONFIRMED"
42 try:
43 server.Bug.update(args)
44 return True
45 except Exception, e:
46 print >> debug_file, repr(e)
47 return False
48
49def bugzilla_create_attachment(debug_file, server, args, bug_number, text, file_name, log, logdescription):
50 args['ids'] = [bug_number]
51 args['file_name'] = file_name
52 args['summary'] = logdescription
53 args['content_type'] = "text/plain"
54 args['data'] = log
55 args['comment'] = text
56 try:
57 server.Bug.add_attachment(args)
58 return True
59 except Exception, e:
60 print >> debug_file, repr(e)
61 return False
62
63def bugzilla_add_comment(debug_file, server, args, bug_number, text):
64 args['id'] = bug_number
65 args['comment'] = text
66 try:
67 server.Bug.add_comment(args)
68 return True
69 except Exception, e:
70 print >> debug_file, repr(e)
71 return False
72
73addhandler bugzilla_eventhandler
74bugzilla_eventhandler[eventmask] = "bb.event.MsgNote bb.build.TaskFailed"
75python bugzilla_eventhandler() {
76 import glob
77 import xmlrpclib, httplib
78
79 class ProxiedTransport(xmlrpclib.Transport):
80 def __init__(self, proxy, use_datetime = 0):
81 xmlrpclib.Transport.__init__(self, use_datetime)
82 self.proxy = proxy
83 self.user = None
84 self.password = None
85
86 def set_user(self, user):
87 self.user = user
88
89 def set_password(self, password):
90 self.password = password
91
92 def make_connection(self, host):
93 self.realhost = host
94 return httplib.HTTP(self.proxy)
95
96 def send_request(self, connection, handler, request_body):
97 connection.putrequest("POST", 'http://%s%s' % (self.realhost, handler))
98 if self.user != None:
99 if self.password != None:
100 auth = "%s:%s" % (self.user, self.password)
101 else:
102 auth = self.user
103 connection.putheader("Proxy-authorization", "Basic " + base64.encodestring(auth))
104
105 event = e
106 data = e.data
107 name = bb.event.getName(event)
108 if name == "MsgNote":
109 # avoid recursion
110 return
111
112 if name == "TaskFailed":
113 xmlrpc = data.getVar("BUGZILLA_XMLRPC", True)
114 user = data.getVar("BUGZILLA_USER", True)
115 passw = data.getVar("BUGZILLA_PASS", True)
116 product = data.getVar("BUGZILLA_PRODUCT", True)
117 compon = data.getVar("BUGZILLA_COMPONENT", True)
118 version = data.getVar("BUGZILLA_VERSION", True)
119
120 proxy = data.getVar('http_proxy', True )
121 if (proxy):
122 import urllib2
123 s, u, p, hostport = urllib2._parse_proxy(proxy)
124 transport = ProxiedTransport(hostport)
125 else:
126 transport = None
127
128 server = xmlrpclib.ServerProxy(xmlrpc, transport=transport, verbose=0)
129 args = {
130 'Bugzilla_login': user,
131 'Bugzilla_password': passw,
132 'product': product,
133 'component': compon}
134
135 # evil hack to figure out what is going on
136 debug_file = open(os.path.join(data.getVar("TMPDIR", True),"..","bugzilla-log"),"a")
137
138 file = None
139 bugname = "%(package)s-%(pv)s-autobuild" % { "package" : data.getVar("PN", True),
140 "pv" : data.getVar("PV", True),
141 }
142 log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
143 text = "The %s step in %s failed at %s for machine %s" % (e.task, data.getVar("PN", True), data.getVar('DATETIME', True), data.getVar( 'MACHINE', True ) )
144 if len(log_file) != 0:
145 print >> debug_file, "Adding log file %s" % log_file[0]
146 file = open(log_file[0], 'r')
147 log = file.read()
148 file.close();
149 else:
150 print >> debug_file, "No log file found for the glob"
151 log = None
152
153 (bug_open, bug_number) = bugzilla_find_bug_report(debug_file, server, args.copy(), bugname)
154 print >> debug_file, "Bug is open: %s and bug number: %s" % (bug_open, bug_number)
155
156 # The bug is present and still open, attach an error log
157 if not bug_number:
158 bug_number = bugzilla_file_bug(debug_file, server, args.copy(), bugname, text, version)
159 if not bug_number:
160 print >> debug_file, "Couldn't acquire a new bug_numer, filing a bugreport failed"
161 else:
162 print >> debug_file, "The new bug_number: '%s'" % bug_number
163 elif not bug_open:
164 if not bugzilla_reopen_bug(debug_file, server, args.copy(), bug_number):
165 print >> debug_file, "Failed to reopen the bug #%s" % bug_number
166 else:
167 print >> debug_file, "Reopened the bug #%s" % bug_number
168
169 if bug_number and log:
170 print >> debug_file, "The bug is known as '%s'" % bug_number
171 desc = "Build log for machine %s" % (data.getVar('MACHINE', True))
172 if not bugzilla_create_attachment(debug_file, server, args.copy(), bug_number, text, log_file[0], log, desc):
173 print >> debug_file, "Failed to attach the build log for bug #%s" % bug_number
174 else:
175 print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
176 else:
177 print >> debug_file, "Not trying to create an attachment for bug #%s" % bug_number
178 if not bugzilla_add_comment(debug_file, server, args.copy(), bug_number, text, ):
179 print >> debug_file, "Failed to create a comment the build log for bug #%s" % bug_number
180 else:
181 print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
182
183 # store bug number for oestats-client
184 if bug_number:
185 data.setVar('OESTATS_BUG_NUMBER', bug_number)
186}
187
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
new file mode 100644
index 0000000000..8b5d5c214c
--- /dev/null
+++ b/meta/classes/buildhistory.bbclass
@@ -0,0 +1,696 @@
1#
2# Records history of build output in order to detect regressions
3#
4# Based in part on testlab.bbclass and packagehistory.bbclass
5#
6# Copyright (C) 2011-2014 Intel Corporation
7# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
8#
9
10BUILDHISTORY_FEATURES ?= "image package sdk"
11BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory"
12BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}"
13BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}/${PN}"
14BUILDHISTORY_DIR_SDK = "${BUILDHISTORY_DIR}/sdk/${SDK_NAME}/${IMAGE_BASENAME}"
15BUILDHISTORY_IMAGE_FILES ?= "/etc/passwd /etc/group"
16BUILDHISTORY_COMMIT ?= "0"
17BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
18BUILDHISTORY_PUSH_REPO ?= ""
19
20SSTATEPOSTINSTFUNCS_append = " buildhistory_emit_pkghistory"
21# We want to avoid influence the signatures of sstate tasks - first the function itself:
22sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
23# then the value added to SSTATEPOSTINSTFUNCS:
24SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
25
26#
27# Write out metadata about this package for comparision when writing future packages
28#
29python buildhistory_emit_pkghistory() {
30 if not d.getVar('BB_CURRENTTASK', True) in ['packagedata', 'packagedata_setscene']:
31 return 0
32
33 if not "package" in (d.getVar('BUILDHISTORY_FEATURES', True) or "").split():
34 return 0
35
36 import re
37 import json
38 import errno
39
40 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
41
42 class RecipeInfo:
43 def __init__(self, name):
44 self.name = name
45 self.pe = "0"
46 self.pv = "0"
47 self.pr = "r0"
48 self.depends = ""
49 self.packages = ""
50 self.srcrev = ""
51
52
53 class PackageInfo:
54 def __init__(self, name):
55 self.name = name
56 self.pe = "0"
57 self.pv = "0"
58 self.pr = "r0"
59 # pkg/pkge/pkgv/pkgr should be empty because we want to be able to default them
60 self.pkg = ""
61 self.pkge = ""
62 self.pkgv = ""
63 self.pkgr = ""
64 self.size = 0
65 self.depends = ""
66 self.rprovides = ""
67 self.rdepends = ""
68 self.rrecommends = ""
69 self.rsuggests = ""
70 self.rreplaces = ""
71 self.rconflicts = ""
72 self.files = ""
73 self.filelist = ""
74 # Variables that need to be written to their own separate file
75 self.filevars = dict.fromkeys(['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'])
76
77 # Should check PACKAGES here to see if anything removed
78
79 def readPackageInfo(pkg, histfile):
80 pkginfo = PackageInfo(pkg)
81 with open(histfile, "r") as f:
82 for line in f:
83 lns = line.split('=')
84 name = lns[0].strip()
85 value = lns[1].strip(" \t\r\n").strip('"')
86 if name == "PE":
87 pkginfo.pe = value
88 elif name == "PV":
89 pkginfo.pv = value
90 elif name == "PR":
91 pkginfo.pr = value
92 elif name == "PKG":
93 pkginfo.pkg = value
94 elif name == "PKGE":
95 pkginfo.pkge = value
96 elif name == "PKGV":
97 pkginfo.pkgv = value
98 elif name == "PKGR":
99 pkginfo.pkgr = value
100 elif name == "RPROVIDES":
101 pkginfo.rprovides = value
102 elif name == "RDEPENDS":
103 pkginfo.rdepends = value
104 elif name == "RRECOMMENDS":
105 pkginfo.rrecommends = value
106 elif name == "RSUGGESTS":
107 pkginfo.rsuggests = value
108 elif name == "RREPLACES":
109 pkginfo.rreplaces = value
110 elif name == "RCONFLICTS":
111 pkginfo.rconflicts = value
112 elif name == "PKGSIZE":
113 pkginfo.size = long(value)
114 elif name == "FILES":
115 pkginfo.files = value
116 elif name == "FILELIST":
117 pkginfo.filelist = value
118 # Apply defaults
119 if not pkginfo.pkg:
120 pkginfo.pkg = pkginfo.name
121 if not pkginfo.pkge:
122 pkginfo.pkge = pkginfo.pe
123 if not pkginfo.pkgv:
124 pkginfo.pkgv = pkginfo.pv
125 if not pkginfo.pkgr:
126 pkginfo.pkgr = pkginfo.pr
127 return pkginfo
128
129 def getlastpkgversion(pkg):
130 try:
131 histfile = os.path.join(pkghistdir, pkg, "latest")
132 return readPackageInfo(pkg, histfile)
133 except EnvironmentError:
134 return None
135
136 def sortpkglist(string):
137 pkgiter = re.finditer(r'[a-zA-Z0-9.+-]+( \([><=]+ [^ )]+\))?', string, 0)
138 pkglist = [p.group(0) for p in pkgiter]
139 pkglist.sort()
140 return ' '.join(pkglist)
141
142 def sortlist(string):
143 items = string.split(' ')
144 items.sort()
145 return ' '.join(items)
146
147 pn = d.getVar('PN', True)
148 pe = d.getVar('PE', True) or "0"
149 pv = d.getVar('PV', True)
150 pr = d.getVar('PR', True)
151
152 pkgdata_dir = d.getVar('PKGDATA_DIR', True)
153 packages = ""
154 try:
155 with open(os.path.join(pkgdata_dir, pn)) as f:
156 for line in f.readlines():
157 if line.startswith('PACKAGES: '):
158 packages = squashspaces(line.split(': ', 1)[1])
159 break
160 except IOError as e:
161 if e.errno == errno.ENOENT:
162 # Probably a -cross recipe, just ignore
163 return 0
164 else:
165 raise
166
167 packagelist = packages.split()
168 if not os.path.exists(pkghistdir):
169 bb.utils.mkdirhier(pkghistdir)
170 else:
171 # Remove files for packages that no longer exist
172 for item in os.listdir(pkghistdir):
173 if item != "latest" and item != "latest_srcrev":
174 if item not in packagelist:
175 subdir = os.path.join(pkghistdir, item)
176 for subfile in os.listdir(subdir):
177 os.unlink(os.path.join(subdir, subfile))
178 os.rmdir(subdir)
179
180 rcpinfo = RecipeInfo(pn)
181 rcpinfo.pe = pe
182 rcpinfo.pv = pv
183 rcpinfo.pr = pr
184 rcpinfo.depends = sortlist(squashspaces(d.getVar('DEPENDS', True) or ""))
185 rcpinfo.packages = packages
186 write_recipehistory(rcpinfo, d)
187
188 pkgdest = d.getVar('PKGDEST', True)
189 for pkg in packagelist:
190 pkgdata = {}
191 with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
192 for line in f.readlines():
193 item = line.rstrip('\n').split(': ', 1)
194 key = item[0]
195 if key.endswith('_' + pkg):
196 key = key[:-len(pkg)-1]
197 pkgdata[key] = item[1].decode('utf-8').decode('string_escape')
198
199 pkge = pkgdata.get('PKGE', '0')
200 pkgv = pkgdata['PKGV']
201 pkgr = pkgdata['PKGR']
202 #
203 # Find out what the last version was
204 # Make sure the version did not decrease
205 #
206 lastversion = getlastpkgversion(pkg)
207 if lastversion:
208 last_pkge = lastversion.pkge
209 last_pkgv = lastversion.pkgv
210 last_pkgr = lastversion.pkgr
211 r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
212 if r < 0:
213 msg = "Package version for package %s went backwards which would break package feeds from (%s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
214 package_qa_handle_error("version-going-backwards", msg, d)
215
216 pkginfo = PackageInfo(pkg)
217 # Apparently the version can be different on a per-package basis (see Python)
218 pkginfo.pe = pkgdata.get('PE', '0')
219 pkginfo.pv = pkgdata['PV']
220 pkginfo.pr = pkgdata['PR']
221 pkginfo.pkg = pkgdata['PKG']
222 pkginfo.pkge = pkge
223 pkginfo.pkgv = pkgv
224 pkginfo.pkgr = pkgr
225 pkginfo.rprovides = sortpkglist(squashspaces(pkgdata.get('RPROVIDES', "")))
226 pkginfo.rdepends = sortpkglist(squashspaces(pkgdata.get('RDEPENDS', "")))
227 pkginfo.rrecommends = sortpkglist(squashspaces(pkgdata.get('RRECOMMENDS', "")))
228 pkginfo.rsuggests = sortpkglist(squashspaces(pkgdata.get('RSUGGESTS', "")))
229 pkginfo.rreplaces = sortpkglist(squashspaces(pkgdata.get('RREPLACES', "")))
230 pkginfo.rconflicts = sortpkglist(squashspaces(pkgdata.get('RCONFLICTS', "")))
231 pkginfo.files = squashspaces(pkgdata.get('FILES', ""))
232 for filevar in pkginfo.filevars:
233 pkginfo.filevars[filevar] = pkgdata.get(filevar, "")
234
235 # Gather information about packaged files
236 val = pkgdata.get('FILES_INFO', '')
237 dictval = json.loads(val)
238 filelist = dictval.keys()
239 filelist.sort()
240 pkginfo.filelist = " ".join(filelist)
241
242 pkginfo.size = int(pkgdata['PKGSIZE'])
243
244 write_pkghistory(pkginfo, d)
245}
246
247
248def write_recipehistory(rcpinfo, d):
249 import codecs
250
251 bb.debug(2, "Writing recipe history")
252
253 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
254
255 infofile = os.path.join(pkghistdir, "latest")
256 with codecs.open(infofile, "w", encoding='utf8') as f:
257 if rcpinfo.pe != "0":
258 f.write(u"PE = %s\n" % rcpinfo.pe)
259 f.write(u"PV = %s\n" % rcpinfo.pv)
260 f.write(u"PR = %s\n" % rcpinfo.pr)
261 f.write(u"DEPENDS = %s\n" % rcpinfo.depends)
262 f.write(u"PACKAGES = %s\n" % rcpinfo.packages)
263
264
265def write_pkghistory(pkginfo, d):
266 import codecs
267
268 bb.debug(2, "Writing package history for package %s" % pkginfo.name)
269
270 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
271
272 pkgpath = os.path.join(pkghistdir, pkginfo.name)
273 if not os.path.exists(pkgpath):
274 bb.utils.mkdirhier(pkgpath)
275
276 infofile = os.path.join(pkgpath, "latest")
277 with codecs.open(infofile, "w", encoding='utf8') as f:
278 if pkginfo.pe != "0":
279 f.write(u"PE = %s\n" % pkginfo.pe)
280 f.write(u"PV = %s\n" % pkginfo.pv)
281 f.write(u"PR = %s\n" % pkginfo.pr)
282
283 pkgvars = {}
284 pkgvars['PKG'] = pkginfo.pkg if pkginfo.pkg != pkginfo.name else ''
285 pkgvars['PKGE'] = pkginfo.pkge if pkginfo.pkge != pkginfo.pe else ''
286 pkgvars['PKGV'] = pkginfo.pkgv if pkginfo.pkgv != pkginfo.pv else ''
287 pkgvars['PKGR'] = pkginfo.pkgr if pkginfo.pkgr != pkginfo.pr else ''
288 for pkgvar in pkgvars:
289 val = pkgvars[pkgvar]
290 if val:
291 f.write(u"%s = %s\n" % (pkgvar, val))
292
293 f.write(u"RPROVIDES = %s\n" % pkginfo.rprovides)
294 f.write(u"RDEPENDS = %s\n" % pkginfo.rdepends)
295 f.write(u"RRECOMMENDS = %s\n" % pkginfo.rrecommends)
296 if pkginfo.rsuggests:
297 f.write(u"RSUGGESTS = %s\n" % pkginfo.rsuggests)
298 if pkginfo.rreplaces:
299 f.write(u"RREPLACES = %s\n" % pkginfo.rreplaces)
300 if pkginfo.rconflicts:
301 f.write(u"RCONFLICTS = %s\n" % pkginfo.rconflicts)
302 f.write(u"PKGSIZE = %d\n" % pkginfo.size)
303 f.write(u"FILES = %s\n" % pkginfo.files)
304 f.write(u"FILELIST = %s\n" % pkginfo.filelist)
305
306 for filevar in pkginfo.filevars:
307 filevarpath = os.path.join(pkgpath, "latest.%s" % filevar)
308 val = pkginfo.filevars[filevar]
309 if val:
310 with codecs.open(filevarpath, "w", encoding='utf8') as f:
311 f.write(val)
312 else:
313 if os.path.exists(filevarpath):
314 os.unlink(filevarpath)
315
316#
317# rootfs_type can be: image, sdk_target, sdk_host
318#
319def buildhistory_list_installed(d, rootfs_type="image"):
320 from oe.rootfs import image_list_installed_packages
321 from oe.sdk import sdk_list_installed_packages
322
323 process_list = [('file', 'bh_installed_pkgs.txt'),\
324 ('deps', 'bh_installed_pkgs_deps.txt')]
325
326 for output_type, output_file in process_list:
327 output_file_full = os.path.join(d.getVar('WORKDIR', True), output_file)
328
329 with open(output_file_full, 'w') as output:
330 if rootfs_type == "image":
331 output.write(image_list_installed_packages(d, output_type))
332 else:
333 output.write(sdk_list_installed_packages(d, rootfs_type == "sdk_target", output_type))
334
335python buildhistory_list_installed_image() {
336 buildhistory_list_installed(d)
337}
338
339python buildhistory_list_installed_sdk_target() {
340 buildhistory_list_installed(d, "sdk_target")
341}
342
343python buildhistory_list_installed_sdk_host() {
344 buildhistory_list_installed(d, "sdk_host")
345}
346
347buildhistory_get_installed() {
348 mkdir -p $1
349
350 # Get list of installed packages
351 pkgcache="$1/installed-packages.tmp"
352 cat ${WORKDIR}/bh_installed_pkgs.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs.txt
353
354 cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt
355 if [ -s $pkgcache ] ; then
356 cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt
357 else
358 printf "" > $1/installed-packages.txt
359 fi
360
361 # Produce dependency graph
362 # First, quote each name to handle characters that cause issues for dot
363 sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp && \
364 rm ${WORKDIR}/bh_installed_pkgs_deps.txt
365 # Change delimiter from pipe to -> and set style for recommend lines
366 sed -i -e 's:|: -> :' -e 's:"\[REC\]":[style=dotted]:' -e 's:$:;:' $1/depends.tmp
367 # Add header, sorted and de-duped contents and footer and then delete the temp file
368 printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
369 cat $1/depends.tmp | sort | uniq >> $1/depends.dot
370 echo "}" >> $1/depends.dot
371 rm $1/depends.tmp
372
373 # Produce installed package sizes list
374 printf "" > $1/installed-package-sizes.tmp
375 cat $pkgcache | while read pkg pkgfile pkgarch
376 do
377 size=`oe-pkgdata-util read-value ${PKGDATA_DIR} "PKGSIZE" ${pkg}_${pkgarch}`
378 if [ "$size" != "" ] ; then
379 echo "$size $pkg" >> $1/installed-package-sizes.tmp
380 fi
381 done
382 cat $1/installed-package-sizes.tmp | sort -n -r | awk '{print $1 "\tKiB " $2}' > $1/installed-package-sizes.txt
383 rm $1/installed-package-sizes.tmp
384
385 # We're now done with the cache, delete it
386 rm $pkgcache
387
388 if [ "$2" != "sdk" ] ; then
389 # Produce some cut-down graphs (for readability)
390 grep -v kernel_image $1/depends.dot | grep -v kernel-2 | grep -v kernel-3 > $1/depends-nokernel.dot
391 grep -v libc6 $1/depends-nokernel.dot | grep -v libgcc > $1/depends-nokernel-nolibc.dot
392 grep -v update- $1/depends-nokernel-nolibc.dot > $1/depends-nokernel-nolibc-noupdate.dot
393 grep -v kernel-module $1/depends-nokernel-nolibc-noupdate.dot > $1/depends-nokernel-nolibc-noupdate-nomodules.dot
394 fi
395
396 # add complementary package information
397 if [ -e ${WORKDIR}/complementary_pkgs.txt ]; then
398 cp ${WORKDIR}/complementary_pkgs.txt $1
399 fi
400}
401
402buildhistory_get_image_installed() {
403 # Anything requiring the use of the packaging system should be done in here
404 # in case the packaging files are going to be removed for this image
405
406 if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
407 return
408 fi
409
410 buildhistory_get_installed ${BUILDHISTORY_DIR_IMAGE}
411}
412
413buildhistory_get_sdk_installed() {
414 # Anything requiring the use of the packaging system should be done in here
415 # in case the packaging files are going to be removed for this SDK
416
417 if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then
418 return
419 fi
420
421 buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk
422}
423
424buildhistory_get_sdk_installed_host() {
425 buildhistory_get_sdk_installed host
426}
427
428buildhistory_get_sdk_installed_target() {
429 buildhistory_get_sdk_installed target
430}
431
432buildhistory_list_files() {
433 # List the files in the specified directory, but exclude date/time etc.
434 # This awk script is somewhat messy, but handles where the size is not printed for device files under pseudo
435 ( cd $1 && find . -printf "%M %-10u %-10g %10s %p -> %l\n" | sort -k5 | sed 's/ * -> $//' > $2 )
436}
437
438
439buildhistory_get_imageinfo() {
440 if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
441 return
442 fi
443
444 buildhistory_list_files ${IMAGE_ROOTFS} ${BUILDHISTORY_DIR_IMAGE}/files-in-image.txt
445
446 # Collect files requested in BUILDHISTORY_IMAGE_FILES
447 rm -rf ${BUILDHISTORY_DIR_IMAGE}/image-files
448 for f in ${BUILDHISTORY_IMAGE_FILES}; do
449 if [ -f ${IMAGE_ROOTFS}/$f ] ; then
450 mkdir -p ${BUILDHISTORY_DIR_IMAGE}/image-files/`dirname $f`
451 cp ${IMAGE_ROOTFS}/$f ${BUILDHISTORY_DIR_IMAGE}/image-files/$f
452 fi
453 done
454
455 # Record some machine-readable meta-information about the image
456 printf "" > ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
457 cat >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt <<END
458${@buildhistory_get_imagevars(d)}
459END
460 imagesize=`du -ks ${IMAGE_ROOTFS} | awk '{ print $1 }'`
461 echo "IMAGESIZE = $imagesize" >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
462
463 # Add some configuration information
464 echo "${MACHINE}: ${IMAGE_BASENAME} configured for ${DISTRO} ${DISTRO_VERSION}" > ${BUILDHISTORY_DIR_IMAGE}/build-id.txt
465
466 cat >> ${BUILDHISTORY_DIR_IMAGE}/build-id.txt <<END
467${@buildhistory_get_build_id(d)}
468END
469}
470
471buildhistory_get_sdkinfo() {
472 if [ "${@bb.utils.contains('BUILDHISTORY_FEATURES', 'sdk', '1', '0', d)}" = "0" ] ; then
473 return
474 fi
475
476 buildhistory_list_files ${SDK_OUTPUT} ${BUILDHISTORY_DIR_SDK}/files-in-sdk.txt
477
478 # Record some machine-readable meta-information about the SDK
479 printf "" > ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
480 cat >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt <<END
481${@buildhistory_get_sdkvars(d)}
482END
483 sdksize=`du -ks ${SDK_OUTPUT} | awk '{ print $1 }'`
484 echo "SDKSIZE = $sdksize" >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
485}
486
487# By prepending we get in before the removal of packaging files
488ROOTFS_POSTPROCESS_COMMAND =+ " buildhistory_list_installed_image ;\
489 buildhistory_get_image_installed ; "
490
491IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; "
492
493# We want these to be the last run so that we get called after complementary package installation
494POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target ;\
495 buildhistory_get_sdk_installed_target ; "
496POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host ;\
497 buildhistory_get_sdk_installed_host ; "
498
499SDK_POSTPROCESS_COMMAND += "buildhistory_get_sdkinfo ; "
500
501def buildhistory_get_build_id(d):
502 if d.getVar('BB_WORKERCONTEXT', True) != '1':
503 return ""
504 localdata = bb.data.createCopy(d)
505 bb.data.update_data(localdata)
506 statuslines = []
507 for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
508 g = globals()
509 if func not in g:
510 bb.warn("Build configuration function '%s' does not exist" % func)
511 else:
512 flines = g[func](localdata)
513 if flines:
514 statuslines.extend(flines)
515
516 statusheader = d.getVar('BUILDCFG_HEADER', True)
517 return('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
518
519def buildhistory_get_metadata_revs(d):
520 # We want an easily machine-readable format here, so get_layers_branch_rev isn't quite what we want
521 layers = (d.getVar("BBLAYERS", True) or "").split()
522 medadata_revs = ["%-17s = %s:%s" % (os.path.basename(i), \
523 base_get_metadata_git_branch(i, None).strip(), \
524 base_get_metadata_git_revision(i, None)) \
525 for i in layers]
526 return '\n'.join(medadata_revs)
527
528
529def squashspaces(string):
530 import re
531 return re.sub("\s+", " ", string).strip()
532
533def outputvars(vars, listvars, d):
534 vars = vars.split()
535 listvars = listvars.split()
536 ret = ""
537 for var in vars:
538 value = d.getVar(var, True) or ""
539 if var in listvars:
540 # Squash out spaces
541 value = squashspaces(value)
542 ret += "%s = %s\n" % (var, value)
543 return ret.rstrip('\n')
544
545def buildhistory_get_imagevars(d):
546 if d.getVar('BB_WORKERCONTEXT', True) != '1':
547 return ""
548 imagevars = "DISTRO DISTRO_VERSION USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE ROOTFS_POSTPROCESS_COMMAND IMAGE_POSTPROCESS_COMMAND"
549 listvars = "USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS PACKAGE_EXCLUDE"
550 return outputvars(imagevars, listvars, d)
551
552def buildhistory_get_sdkvars(d):
553 if d.getVar('BB_WORKERCONTEXT', True) != '1':
554 return ""
555 sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
556 listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE"
557 return outputvars(sdkvars, listvars, d)
558
559
560def buildhistory_get_cmdline(d):
561 if sys.argv[0].endswith('bin/bitbake'):
562 bincmd = 'bitbake'
563 else:
564 bincmd = sys.argv[0]
565 return '%s %s' % (bincmd, ' '.join(sys.argv[1:]))
566
567
568buildhistory_commit() {
569 if [ ! -d ${BUILDHISTORY_DIR} ] ; then
570 # Code above that creates this dir never executed, so there can't be anything to commit
571 return
572 fi
573
574 # Create a machine-readable list of metadata revisions for each layer
575 cat > ${BUILDHISTORY_DIR}/metadata-revs <<END
576${@buildhistory_get_metadata_revs(d)}
577END
578
579 ( cd ${BUILDHISTORY_DIR}/
580 # Initialise the repo if necessary
581 if [ ! -d .git ] ; then
582 git init -q
583 else
584 git tag -f build-minus-3 build-minus-2 > /dev/null 2>&1 || true
585 git tag -f build-minus-2 build-minus-1 > /dev/null 2>&1 || true
586 git tag -f build-minus-1 > /dev/null 2>&1 || true
587 fi
588 # Check if there are new/changed files to commit (other than metadata-revs)
589 repostatus=`git status --porcelain | grep -v " metadata-revs$"`
590 HOSTNAME=`hostname 2>/dev/null || echo unknown`
591 CMDLINE="${@buildhistory_get_cmdline(d)}"
592 if [ "$repostatus" != "" ] ; then
593 git add -A .
594 # porcelain output looks like "?? packages/foo/bar"
595 # Ensure we commit metadata-revs with the first commit
596 for entry in `echo "$repostatus" | awk '{print $2}' | awk -F/ '{print $1}' | sort | uniq` ; do
597 git commit $entry metadata-revs -m "$entry: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" -m "cmd: $CMDLINE" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
598 done
599 git gc --auto --quiet
600 if [ "${BUILDHISTORY_PUSH_REPO}" != "" ] ; then
601 git push -q ${BUILDHISTORY_PUSH_REPO}
602 fi
603 else
604 git commit ${BUILDHISTORY_DIR}/ --allow-empty -m "No changes: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" -m "cmd: $CMDLINE" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
605 fi) || true
606}
607
608python buildhistory_eventhandler() {
609 if e.data.getVar('BUILDHISTORY_FEATURES', True).strip():
610 if e.data.getVar("BUILDHISTORY_COMMIT", True) == "1":
611 bb.note("Writing buildhistory")
612 bb.build.exec_func("buildhistory_commit", e.data)
613}
614
615addhandler buildhistory_eventhandler
616buildhistory_eventhandler[eventmask] = "bb.event.BuildCompleted"
617
618
619# FIXME this ought to be moved into the fetcher
620def _get_srcrev_values(d):
621 """
622 Return the version strings for the current recipe
623 """
624
625 scms = []
626 fetcher = bb.fetch.Fetch(d.getVar('SRC_URI', True).split(), d)
627 urldata = fetcher.ud
628 for u in urldata:
629 if urldata[u].method.supports_srcrev():
630 scms.append(u)
631
632 autoinc_templ = 'AUTOINC+'
633 dict_srcrevs = {}
634 dict_tag_srcrevs = {}
635 for scm in scms:
636 ud = urldata[scm]
637 for name in ud.names:
638 try:
639 rev = ud.method.sortable_revision(ud, d, name)
640 except TypeError:
641 # support old bitbake versions
642 rev = ud.method.sortable_revision(scm, ud, d, name)
643 # Clean this up when we next bump bitbake version
644 if type(rev) != str:
645 autoinc, rev = rev
646 elif rev.startswith(autoinc_templ):
647 rev = rev[len(autoinc_templ):]
648 dict_srcrevs[name] = rev
649 if 'tag' in ud.parm:
650 tag = ud.parm['tag'];
651 key = name+'_'+tag
652 dict_tag_srcrevs[key] = rev
653 return (dict_srcrevs, dict_tag_srcrevs)
654
655do_fetch[postfuncs] += "write_srcrev"
656do_fetch[vardepsexclude] += "write_srcrev"
657python write_srcrev() {
658 pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
659 srcrevfile = os.path.join(pkghistdir, 'latest_srcrev')
660
661 srcrevs, tag_srcrevs = _get_srcrev_values(d)
662 if srcrevs:
663 if not os.path.exists(pkghistdir):
664 bb.utils.mkdirhier(pkghistdir)
665 old_tag_srcrevs = {}
666 if os.path.exists(srcrevfile):
667 with open(srcrevfile) as f:
668 for line in f:
669 if line.startswith('# tag_'):
670 key, value = line.split("=", 1)
671 key = key.replace('# tag_', '').strip()
672 value = value.replace('"', '').strip()
673 old_tag_srcrevs[key] = value
674 with open(srcrevfile, 'w') as f:
675 orig_srcrev = d.getVar('SRCREV', False) or 'INVALID'
676 if orig_srcrev != 'INVALID':
677 f.write('# SRCREV = "%s"\n' % orig_srcrev)
678 if len(srcrevs) > 1:
679 for name, srcrev in srcrevs.items():
680 orig_srcrev = d.getVar('SRCREV_%s' % name, False)
681 if orig_srcrev:
682 f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
683 f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
684 else:
685 f.write('SRCREV = "%s"\n' % srcrevs.itervalues().next())
686 if len(tag_srcrevs) > 0:
687 for name, srcrev in tag_srcrevs.items():
688 f.write('# tag_%s = "%s"\n' % (name, srcrev))
689 if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
690 pkg = d.getVar('PN', True)
691 bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
692
693 else:
694 if os.path.exists(srcrevfile):
695 os.remove(srcrevfile)
696}
diff --git a/meta/classes/buildstats-summary.bbclass b/meta/classes/buildstats-summary.bbclass
new file mode 100644
index 0000000000..c8fbb2f1a1
--- /dev/null
+++ b/meta/classes/buildstats-summary.bbclass
@@ -0,0 +1,39 @@
1# Summarize sstate usage at the end of the build
2python buildstats_summary () {
3 if not isinstance(e, bb.event.BuildCompleted):
4 return
5
6 import collections
7 import os.path
8
9 bn = get_bn(e)
10 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
11 if not os.path.exists(bsdir):
12 return
13
14 sstatetasks = (e.data.getVar('SSTATETASKS', True) or '').split()
15 built = collections.defaultdict(lambda: [set(), set()])
16 for pf in os.listdir(bsdir):
17 taskdir = os.path.join(bsdir, pf)
18 if not os.path.isdir(taskdir):
19 continue
20
21 tasks = os.listdir(taskdir)
22 for t in sstatetasks:
23 no_sstate, sstate = built[t]
24 if t in tasks:
25 no_sstate.add(pf)
26 elif t + '_setscene' in tasks:
27 sstate.add(pf)
28
29 header_printed = False
30 for t in sstatetasks:
31 no_sstate, sstate = built[t]
32 if no_sstate | sstate:
33 if not header_printed:
34 header_printed = True
35 bb.note("Build completion summary:")
36
37 bb.note(" {0}: {1}% sstate reuse ({2} setscene, {3} scratch)".format(t, 100*len(sstate)/(len(sstate)+len(no_sstate)), len(sstate), len(no_sstate)))
38}
39addhandler buildstats_summary
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
new file mode 100644
index 0000000000..89ae72c679
--- /dev/null
+++ b/meta/classes/buildstats.bbclass
@@ -0,0 +1,289 @@
1BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
2BNFILE = "${BUILDSTATS_BASE}/.buildname"
3DEVFILE = "${BUILDSTATS_BASE}/.device"
4
5################################################################################
6# Build statistics gathering.
7#
8# The CPU and Time gathering/tracking functions and bbevent inspiration
9# were written by Christopher Larson and can be seen here:
10# http://kergoth.pastey.net/142813
11#
12################################################################################
13
14def get_process_cputime(pid):
15 with open("/proc/%d/stat" % pid, "r") as f:
16 fields = f.readline().rstrip().split()
17 # 13: utime, 14: stime, 15: cutime, 16: cstime
18 return sum(int(field) for field in fields[13:16])
19
20def get_cputime():
21 with open("/proc/stat", "r") as f:
22 fields = f.readline().rstrip().split()[1:]
23 return sum(int(field) for field in fields)
24
25def set_bn(e):
26 bn = e.getPkgs()[0] + "-" + e.data.getVar('MACHINE', True)
27 try:
28 os.remove(e.data.getVar('BNFILE', True))
29 except:
30 pass
31 with open(e.data.getVar('BNFILE', True), "w") as f:
32 f.write(os.path.join(bn, e.data.getVar('BUILDNAME', True)))
33
34def get_bn(e):
35 with open(e.data.getVar('BNFILE', True)) as f:
36 bn = f.readline()
37 return bn
38
39def set_device(e):
40 tmpdir = e.data.getVar('TMPDIR', True)
41 try:
42 os.remove(e.data.getVar('DEVFILE', True))
43 except:
44 pass
45 ############################################################################
46 # We look for the volume TMPDIR lives on. To do all disks would make little
47 # sense and not give us any particularly useful data. In theory we could do
48 # something like stick DL_DIR on a different partition and this would
49 # throw stats gathering off. The same goes with SSTATE_DIR. However, let's
50 # get the basics in here and work on the cornercases later.
51 # A note. /proc/diskstats does not contain info on encryptfs, tmpfs, etc.
52 # If we end up hitting one of these fs, we'll just skip diskstats collection.
53 ############################################################################
54 device=os.stat(tmpdir)
55 majordev=os.major(device.st_dev)
56 minordev=os.minor(device.st_dev)
57 ############################################################################
58 # Bug 1700:
59 # Because tmpfs/encryptfs/ramfs etc inserts no entry in /proc/diskstats
60 # we set rdev to NoLogicalDevice and search for it later. If we find NLD
61 # we do not collect diskstats as the method to collect meaningful statistics
62 # for these fs types requires a bit more research.
63 ############################################################################
64 rdev="NoLogicalDevice"
65 try:
66 with open("/proc/diskstats", "r") as f:
67 for line in f:
68 if majordev == int(line.split()[0]) and minordev == int(line.split()[1]):
69 rdev=line.split()[2]
70 except:
71 pass
72 file = open(e.data.getVar('DEVFILE', True), "w")
73 file.write(rdev)
74 file.close()
75
76def get_device(e):
77 file = open(e.data.getVar('DEVFILE', True))
78 device = file.readline()
79 file.close()
80 return device
81
82def get_diskstats(dev):
83 import itertools
84 ############################################################################
85 # For info on what these are, see kernel doc file iostats.txt
86 ############################################################################
87 DSTAT_KEYS = ['ReadsComp', 'ReadsMerged', 'SectRead', 'TimeReads', 'WritesComp', 'SectWrite', 'TimeWrite', 'IOinProgress', 'TimeIO', 'WTimeIO']
88 try:
89 with open("/proc/diskstats", "r") as f:
90 for x in f:
91 if dev in x:
92 diskstats_val = x.rstrip().split()[4:]
93 except IOError as e:
94 return
95 diskstats = dict(itertools.izip(DSTAT_KEYS, diskstats_val))
96 return diskstats
97
98def set_diskdata(var, dev, data):
99 data.setVar(var, get_diskstats(dev))
100
101def get_diskdata(var, dev, data):
102 olddiskdata = data.getVar(var, False)
103 diskdata = {}
104 if olddiskdata is None:
105 return
106 newdiskdata = get_diskstats(dev)
107 for key in olddiskdata.iterkeys():
108 diskdata["Start"+key] = str(int(olddiskdata[key]))
109 diskdata["End"+key] = str(int(newdiskdata[key]))
110 return diskdata
111
112def set_timedata(var, data, server_time=None):
113 import time
114 if server_time:
115 time = server_time
116 else:
117 time = time.time()
118 cputime = get_cputime()
119 proctime = get_process_cputime(os.getpid())
120 data.setVar(var, (time, cputime, proctime))
121
122def get_timedata(var, data, server_time=None):
123 import time
124 timedata = data.getVar(var, False)
125 if timedata is None:
126 return
127 oldtime, oldcpu, oldproc = timedata
128 procdiff = get_process_cputime(os.getpid()) - oldproc
129 cpudiff = get_cputime() - oldcpu
130 if server_time:
131 end_time = server_time
132 else:
133 end_time = time.time()
134 timediff = end_time - oldtime
135 if cpudiff > 0:
136 cpuperc = float(procdiff) * 100 / cpudiff
137 else:
138 cpuperc = None
139 return timediff, cpuperc
140
141def write_task_data(status, logfile, dev, e):
142 bn = get_bn(e)
143 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
144 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
145 file = open(os.path.join(logfile), "a")
146 timedata = get_timedata("__timedata_task", e.data, e.time)
147 if timedata:
148 elapsedtime, cpu = timedata
149 file.write(bb.data.expand("${PF}: %s: Elapsed time: %0.2f seconds \n" %
150 (e.task, elapsedtime), e.data))
151 if cpu:
152 file.write("CPU usage: %0.1f%% \n" % cpu)
153 ############################################################################
154 # Here we gather up disk data. In an effort to avoid lying with stats
155 # I do a bare minimum of analysis of collected data.
156 # The simple fact is, doing disk io collection on a per process basis
157 # without effecting build time would be difficult.
158 # For the best information, running things with BB_TOTAL_THREADS = "1"
159 # would return accurate per task results.
160 ############################################################################
161 if dev != "NoLogicalDevice":
162 diskdata = get_diskdata("__diskdata_task", dev, e.data)
163 if diskdata:
164 for key in sorted(diskdata.iterkeys()):
165 file.write(key + ": " + diskdata[key] + "\n")
166 if status is "passed":
167 file.write("Status: PASSED \n")
168 else:
169 file.write("Status: FAILED \n")
170 file.write("Ended: %0.2f \n" % e.time)
171 file.close()
172
173python run_buildstats () {
174 import bb.build
175 import bb.event
176 import bb.data
177 import time, subprocess, platform
178
179 if isinstance(e, bb.event.BuildStarted):
180 ########################################################################
181 # at first pass make the buildstats heriarchy and then
182 # set the buildname
183 ########################################################################
184 try:
185 bb.utils.mkdirhier(e.data.getVar('BUILDSTATS_BASE', True))
186 except:
187 pass
188 set_bn(e)
189 bn = get_bn(e)
190 set_device(e)
191 device = get_device(e)
192
193 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
194 try:
195 bb.utils.mkdirhier(bsdir)
196 except:
197 pass
198 if device != "NoLogicalDevice":
199 set_diskdata("__diskdata_build", device, e.data)
200 set_timedata("__timedata_build", e.data)
201 build_time = os.path.join(bsdir, "build_stats")
202 # write start of build into build_time
203 file = open(build_time,"a")
204 host_info = platform.uname()
205 file.write("Host Info: ")
206 for x in host_info:
207 if x:
208 file.write(x + " ")
209 file.write("\n")
210 file.write("Build Started: %0.2f \n" % time.time())
211 file.close()
212
213 elif isinstance(e, bb.event.BuildCompleted):
214 bn = get_bn(e)
215 device = get_device(e)
216 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
217 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
218 build_time = os.path.join(bsdir, "build_stats")
219 file = open(build_time, "a")
220 ########################################################################
221 # Write build statistics for the build
222 ########################################################################
223 timedata = get_timedata("__timedata_build", e.data)
224 if timedata:
225 time, cpu = timedata
226 # write end of build and cpu used into build_time
227 file.write("Elapsed time: %0.2f seconds \n" % (time))
228 if cpu:
229 file.write("CPU usage: %0.1f%% \n" % cpu)
230 if device != "NoLogicalDevice":
231 diskio = get_diskdata("__diskdata_build", device, e.data)
232 if diskio:
233 for key in sorted(diskio.iterkeys()):
234 file.write(key + ": " + diskio[key] + "\n")
235 file.close()
236
237 if isinstance(e, bb.build.TaskStarted):
238 bn = get_bn(e)
239 device = get_device(e)
240 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
241 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
242 if device != "NoLogicalDevice":
243 set_diskdata("__diskdata_task", device, e.data)
244 set_timedata("__timedata_task", e.data, e.time)
245 try:
246 bb.utils.mkdirhier(taskdir)
247 except:
248 pass
249 # write into the task event file the name and start time
250 file = open(os.path.join(taskdir, e.task), "a")
251 file.write("Event: %s \n" % bb.event.getName(e))
252 file.write("Started: %0.2f \n" % e.time)
253 file.close()
254
255 elif isinstance(e, bb.build.TaskSucceeded):
256 bn = get_bn(e)
257 device = get_device(e)
258 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
259 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
260 write_task_data("passed", os.path.join(taskdir, e.task), device, e)
261 if e.task == "do_rootfs":
262 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
263 bs=os.path.join(bsdir, "build_stats")
264 file = open(bs,"a")
265 rootfs = e.data.getVar('IMAGE_ROOTFS', True)
266 rootfs_size = subprocess.Popen(["du", "-sh", rootfs], stdout=subprocess.PIPE).stdout.read()
267 file.write("Uncompressed Rootfs size: %s" % rootfs_size)
268 file.close()
269
270 elif isinstance(e, bb.build.TaskFailed):
271 bn = get_bn(e)
272 device = get_device(e)
273 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
274 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
275 write_task_data("failed", os.path.join(taskdir, e.task), device, e)
276 ########################################################################
277 # Lets make things easier and tell people where the build failed in
278 # build_status. We do this here because BuildCompleted triggers no
279 # matter what the status of the build actually is
280 ########################################################################
281 build_status = os.path.join(bsdir, "build_stats")
282 file = open(build_status,"a")
283 file.write(e.data.expand("Failed at: ${PF} at task: %s \n" % e.task))
284 file.close()
285}
286
287addhandler run_buildstats
288run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
289
diff --git a/meta/classes/ccache.bbclass b/meta/classes/ccache.bbclass
new file mode 100644
index 0000000000..2cdce46932
--- /dev/null
+++ b/meta/classes/ccache.bbclass
@@ -0,0 +1,8 @@
1CCACHE = "${@bb.utils.which(d.getVar('PATH', True), 'ccache') and 'ccache '}"
2export CCACHE_DIR ?= "${TMPDIR}/ccache/${MULTIMACH_HOST_SYS}/${PN}"
3CCACHE_DISABLE[unexport] = "1"
4
5do_configure[dirs] =+ "${CCACHE_DIR}"
6do_kernel_configme[dirs] =+ "${CCACHE_DIR}"
7
8do_clean[cleandirs] += "${CCACHE_DIR}"
diff --git a/meta/classes/chrpath.bbclass b/meta/classes/chrpath.bbclass
new file mode 100644
index 0000000000..77b19372ba
--- /dev/null
+++ b/meta/classes/chrpath.bbclass
@@ -0,0 +1,115 @@
1CHRPATH_BIN ?= "chrpath"
2PREPROCESS_RELOCATE_DIRS ?= ""
3
4def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
5 import subprocess as sub
6
7 p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
8 err, out = p.communicate()
9 # If returned succesfully, process stderr for results
10 if p.returncode != 0:
11 return
12
13 # Throw away everything other than the rpath list
14 curr_rpath = err.partition("RPATH=")[2]
15 #bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
16 rpaths = curr_rpath.split(":")
17 new_rpaths = []
18 modified = False
19 for rpath in rpaths:
20 # If rpath is already dynamic copy it to new_rpath and continue
21 if rpath.find("$ORIGIN") != -1:
22 new_rpaths.append(rpath.strip())
23 continue
24 rpath = os.path.normpath(rpath)
25 if baseprefix not in rpath and tmpdir not in rpath:
26 new_rpaths.append(rpath.strip())
27 continue
28 new_rpaths.append("$ORIGIN/" + os.path.relpath(rpath.strip(), os.path.dirname(fpath.replace(rootdir, "/"))))
29 modified = True
30
31 # if we have modified some rpaths call chrpath to update the binary
32 if modified:
33 args = ":".join(new_rpaths)
34 #bb.note("Setting rpath for %s to %s" %(fpath, args))
35 p = sub.Popen([cmd, '-r', args, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
36 out, err = p.communicate()
37 if p.returncode != 0:
38 bb.error("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN', True), p.returncode, out, err))
39 raise bb.build.FuncFailed
40
41def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
42 import subprocess as sub
43
44 p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
45 err, out = p.communicate()
46 # If returned succesfully, process stderr for results
47 if p.returncode != 0:
48 return
49 for l in err.split("\n"):
50 if "(compatibility" not in l:
51 continue
52 rpath = l.partition("(compatibility")[0].strip()
53 if baseprefix not in rpath:
54 continue
55
56 newpath = "@loader_path/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/")))
57 p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, newpath, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
58 err, out = p.communicate()
59
60def process_dir (rootdir, directory, d):
61 import stat
62
63 rootdir = os.path.normpath(rootdir)
64 cmd = d.expand('${CHRPATH_BIN}')
65 tmpdir = os.path.normpath(d.getVar('TMPDIR'))
66 baseprefix = os.path.normpath(d.expand('${base_prefix}'))
67 hostos = d.getVar("HOST_OS", True)
68
69 #bb.debug("Checking %s for binaries to process" % directory)
70 if not os.path.exists(directory):
71 return
72
73 if "linux" in hostos:
74 process_file = process_file_linux
75 elif "darwin" in hostos:
76 process_file = process_file_darwin
77 else:
78 # Relocations not supported
79 return
80
81 dirs = os.listdir(directory)
82 for file in dirs:
83 fpath = directory + "/" + file
84 fpath = os.path.normpath(fpath)
85 if os.path.islink(fpath):
86 # Skip symlinks
87 continue
88
89 if os.path.isdir(fpath):
90 process_dir(rootdir, fpath, d)
91 else:
92 #bb.note("Testing %s for relocatability" % fpath)
93
94 # We need read and write permissions for chrpath, if we don't have
95 # them then set them temporarily. Take a copy of the files
96 # permissions so that we can restore them afterwards.
97 perms = os.stat(fpath)[stat.ST_MODE]
98 if os.access(fpath, os.W_OK|os.R_OK):
99 perms = None
100 else:
101 # Temporarily make the file writeable so we can chrpath it
102 os.chmod(fpath, perms|stat.S_IRWXU)
103 process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d)
104
105 if perms:
106 os.chmod(fpath, perms)
107
108def rpath_replace (path, d):
109 bindirs = d.expand("${bindir} ${sbindir} ${base_sbindir} ${base_bindir} ${libdir} ${base_libdir} ${libexecdir} ${PREPROCESS_RELOCATE_DIRS}").split()
110
111 for bindir in bindirs:
112 #bb.note ("Processing directory " + bindir)
113 directory = path + "/" + bindir
114 process_dir (path, directory, d)
115
diff --git a/meta/classes/clutter.bbclass b/meta/classes/clutter.bbclass
new file mode 100644
index 0000000000..167407dfdc
--- /dev/null
+++ b/meta/classes/clutter.bbclass
@@ -0,0 +1,22 @@
1
2def get_minor_dir(v):
3 import re
4 m = re.match("^([0-9]+)\.([0-9]+)", v)
5 return "%s.%s" % (m.group(1), m.group(2))
6
7def get_real_name(n):
8 import re
9 m = re.match("^([a-z]+(-[a-z]+)?)(-[0-9]+\.[0-9]+)?", n)
10 return "%s" % (m.group(1))
11
12VERMINOR = "${@get_minor_dir("${PV}")}"
13REALNAME = "${@get_real_name("${BPN}")}"
14
15CLUTTER_SRC_FTP = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
16
17CLUTTER_SRC_GIT = "git://git.gnome.org/${REALNAME}"
18
19SRC_URI = "${CLUTTER_SRC_FTP}"
20S = "${WORKDIR}/${REALNAME}-${PV}"
21
22inherit autotools pkgconfig gtk-doc gettext
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
new file mode 100644
index 0000000000..995ddf1ea2
--- /dev/null
+++ b/meta/classes/cmake.bbclass
@@ -0,0 +1,121 @@
1# Path to the CMake file to process.
2OECMAKE_SOURCEPATH ?= "${S}"
3
4DEPENDS_prepend = "cmake-native "
5B = "${WORKDIR}/build"
6
7# We need to unset CCACHE otherwise cmake gets too confused
8CCACHE = ""
9
10# We want the staging and installing functions from autotools
11inherit autotools
12
13# C/C++ Compiler (without cpu arch/tune arguments)
14OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
15OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
16
17# Compiler flags
18OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
19OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS}"
20OECMAKE_C_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CFLAGS} -DNDEBUG"
21OECMAKE_CXX_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CXXFLAGS} -DNDEBUG"
22OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
23OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
24
25OECMAKE_RPATH ?= ""
26OECMAKE_PERLNATIVE_DIR ??= ""
27OECMAKE_EXTRA_ROOT_PATH ?= ""
28
29cmake_do_generate_toolchain_file() {
30 cat > ${WORKDIR}/toolchain.cmake <<EOF
31# CMake system name must be something like "Linux".
32# This is important for cross-compiling.
33set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).*/\1/'` )
34set( CMAKE_SYSTEM_PROCESSOR ${TARGET_ARCH} )
35set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
36set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
37set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
38set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
39set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
40set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
41set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "CFLAGS for release" )
42set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "CXXFLAGS for release" )
43set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "ASM FLAGS for release" )
44set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
45set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
46
47# only search in the paths provided so cmake doesnt pick
48# up libraries and tools from the native build machine
49set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN})
50set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
51set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY )
52set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
53set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
54
55# Use qt.conf settings
56set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
57
58# We need to set the rpath to the correct directory as cmake does not provide any
59# directory as rpath by default
60set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
61
62# Use native cmake modules
63set( CMAKE_MODULE_PATH ${STAGING_DATADIR}/cmake/Modules/ )
64
65# add for non /usr/lib libdir, e.g. /usr/lib64
66set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
67
68EOF
69}
70
71addtask generate_toolchain_file after do_patch before do_configure
72
73cmake_do_configure() {
74 if [ "${OECMAKE_BUILDPATH}" ]; then
75 bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
76 fi
77
78 if [ "${S}" != "${B}" ]; then
79 rm -rf ${B}
80 mkdir -p ${B}
81 cd ${B}
82 fi
83
84 # Just like autotools cmake can use a site file to cache result that need generated binaries to run
85 if [ -e ${WORKDIR}/site-file.cmake ] ; then
86 OECMAKE_SITEFILE=" -C ${WORKDIR}/site-file.cmake"
87 else
88 OECMAKE_SITEFILE=""
89 fi
90
91 cmake \
92 ${OECMAKE_SITEFILE} \
93 ${OECMAKE_SOURCEPATH} \
94 -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
95 -DCMAKE_INSTALL_BINDIR:PATH=${bindir} \
96 -DCMAKE_INSTALL_SBINDIR:PATH=${sbindir} \
97 -DCMAKE_INSTALL_LIBEXECDIR:PATH=${libexecdir} \
98 -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
99 -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${sharedstatedir} \
100 -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
101 -DCMAKE_INSTALL_LIBDIR:PATH=${libdir} \
102 -DCMAKE_INSTALL_INCLUDEDIR:PATH=${includedir} \
103 -DCMAKE_INSTALL_DATAROOTDIR:PATH=${datadir} \
104 -DCMAKE_INSTALL_SO_NO_EXE=0 \
105 -DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
106 -DCMAKE_VERBOSE_MAKEFILE=1 \
107 ${EXTRA_OECMAKE} \
108 -Wno-dev
109}
110
111cmake_do_compile() {
112 cd ${B}
113 base_do_compile
114}
115
116cmake_do_install() {
117 cd ${B}
118 autotools_do_install
119}
120
121EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
new file mode 100644
index 0000000000..43acfd531b
--- /dev/null
+++ b/meta/classes/cml1.bbclass
@@ -0,0 +1,74 @@
1cml1_do_configure() {
2 set -e
3 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
4 oe_runmake oldconfig
5}
6
7EXPORT_FUNCTIONS do_configure
8addtask configure after do_unpack do_patch before do_compile
9
10inherit terminal
11
12OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS TERMINFO CROSS_CURSES_LIB CROSS_CURSES_INC"
13HOST_EXTRACFLAGS = "${BUILD_CFLAGS} ${BUILD_LDFLAGS}"
14HOSTLDFLAGS = "${BUILD_LDFLAGS}"
15CROSS_CURSES_LIB = "-lncurses -ltinfo"
16CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
17TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
18
19python do_menuconfig() {
20 import shutil
21
22 try:
23 mtime = os.path.getmtime(".config")
24 shutil.copy(".config", ".config.orig")
25 except OSError:
26 mtime = 0
27
28 oe_terminal("${SHELL} -c \"make menuconfig; if [ \$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"", '${PN} Configuration', d)
29
30 # FIXME this check can be removed when the minimum bitbake version has been bumped
31 if hasattr(bb.build, 'write_taint'):
32 try:
33 newmtime = os.path.getmtime(".config")
34 except OSError:
35 newmtime = 0
36
37 if newmtime > mtime:
38 bb.note("Configuration changed, recompile will be forced")
39 bb.build.write_taint('do_compile', d)
40}
41do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
42do_menuconfig[nostamp] = "1"
43addtask menuconfig after do_configure
44
45python do_diffconfig() {
46 import shutil
47 import subprocess
48
49 workdir = d.getVar('WORKDIR', True)
50 fragment = workdir + '/fragment.cfg'
51 configorig = '.config.orig'
52 config = '.config'
53
54 try:
55 md5newconfig = bb.utils.md5_file(configorig)
56 md5config = bb.utils.md5_file(config)
57 isdiff = md5newconfig != md5config
58 except IOError as e:
59 bb.fatal("No config files found. Did you do menuconfig ?\n%s" % e)
60
61 if isdiff:
62 statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment
63 subprocess.call(statement, shell=True)
64
65 shutil.copy(configorig, config)
66
67 bb.plain("Config fragment has been dumped into:\n %s" % fragment)
68 else:
69 if os.path.exists(fragment):
70 os.unlink(fragment)
71}
72
73do_diffconfig[nostamp] = "1"
74addtask diffconfig
diff --git a/meta/classes/compress_doc.bbclass b/meta/classes/compress_doc.bbclass
new file mode 100644
index 0000000000..6edbaf531f
--- /dev/null
+++ b/meta/classes/compress_doc.bbclass
@@ -0,0 +1,256 @@
1# Compress man pages in ${mandir} and info pages in ${infodir}
2#
3# 1. The doc will be compressed to gz format by default.
4#
5# 2. It will automatically correct the compressed doc which is not
6# in ${DOC_COMPRESS} but in ${DOC_COMPRESS_LIST} to the format
7# of ${DOC_COMPRESS} policy
8#
9# 3. It is easy to add a new type compression by editing
10# local.conf, such as:
11# DOC_COMPRESS_LIST_append = ' abc'
12# DOC_COMPRESS = 'abc'
13# DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***'
14# DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***'
15
16# All supported compression policy
17DOC_COMPRESS_LIST ?= "gz xz bz2"
18
19# Compression policy, must be one of ${DOC_COMPRESS_LIST}
20DOC_COMPRESS ?= "gz"
21
22# Compression shell command
23DOC_COMPRESS_CMD[gz] ?= 'gzip -v -9 -n'
24DOC_COMPRESS_CMD[bz2] ?= "bzip2 -v -9"
25DOC_COMPRESS_CMD[xz] ?= "xz -v"
26
27# Decompression shell command
28DOC_DECOMPRESS_CMD[gz] ?= 'gunzip -v'
29DOC_DECOMPRESS_CMD[bz2] ?= "bunzip2 -v"
30DOC_DECOMPRESS_CMD[xz] ?= "unxz -v"
31
32PACKAGE_PREPROCESS_FUNCS += "package_do_compress_doc compress_doc_updatealternatives"
33python package_do_compress_doc() {
34 compress_mode = d.getVar('DOC_COMPRESS', True)
35 compress_list = (d.getVar('DOC_COMPRESS_LIST', True) or '').split()
36 if compress_mode not in compress_list:
37 bb.fatal('Compression policy %s not supported (not listed in %s)\n' % (compress_mode, compress_list))
38
39 dvar = d.getVar('PKGD', True)
40 compress_cmds = {}
41 decompress_cmds = {}
42 for mode in compress_list:
43 compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode)
44 decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode)
45
46 mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir", True))
47 if os.path.exists(mandir):
48 # Decompress doc files which format is not compress_mode
49 decompress_doc(mandir, compress_mode, decompress_cmds)
50 compress_doc(mandir, compress_mode, compress_cmds)
51
52 infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir", True))
53 if os.path.exists(infodir):
54 # Decompress doc files which format is not compress_mode
55 decompress_doc(infodir, compress_mode, decompress_cmds)
56 compress_doc(infodir, compress_mode, compress_cmds)
57}
58
59def _get_compress_format(file, compress_format_list):
60 for compress_format in compress_format_list:
61 compress_suffix = '.' + compress_format
62 if file.endswith(compress_suffix):
63 return compress_format
64
65 return ''
66
67# Collect hardlinks to dict, each element in dict lists hardlinks
68# which points to the same doc file.
69# {hardlink10: [hardlink11, hardlink12],,,}
70# The hardlink10, hardlink11 and hardlink12 are the same file.
71def _collect_hardlink(hardlink_dict, file):
72 for hardlink in hardlink_dict:
73 # Add to the existed hardlink
74 if os.path.samefile(hardlink, file):
75 hardlink_dict[hardlink].append(file)
76 return hardlink_dict
77
78 hardlink_dict[file] = []
79 return hardlink_dict
80
81def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False):
82 for target in hardlink_dict:
83 if decompress:
84 compress_format = _get_compress_format(target, shell_cmds.keys())
85 cmd = "%s -f %s" % (shell_cmds[compress_format], target)
86 bb.note('decompress hardlink %s' % target)
87 else:
88 cmd = "%s -f %s" % (shell_cmds[compress_mode], target)
89 bb.note('compress hardlink %s' % target)
90 (retval, output) = oe.utils.getstatusoutput(cmd)
91 if retval:
92 bb.warn("de/compress file failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
93 return
94
95 for hardlink_dup in hardlink_dict[target]:
96 if decompress:
97 # Remove compress suffix
98 compress_suffix = '.' + compress_format
99 new_hardlink = hardlink_dup[:-len(compress_suffix)]
100 new_target = target[:-len(compress_suffix)]
101 else:
102 # Append compress suffix
103 compress_suffix = '.' + compress_mode
104 new_hardlink = hardlink_dup + compress_suffix
105 new_target = target + compress_suffix
106
107 bb.note('hardlink %s-->%s' % (new_hardlink, new_target))
108 if not os.path.exists(new_hardlink):
109 os.link(new_target, new_hardlink)
110 if os.path.exists(hardlink_dup):
111 os.unlink(hardlink_dup)
112
113def _process_symlink(file, compress_format, decompress=False):
114 compress_suffix = '.' + compress_format
115 if decompress:
116 # Remove compress suffix
117 new_linkname = file[:-len(compress_suffix)]
118 new_source = os.readlink(file)[:-len(compress_suffix)]
119 else:
120 # Append compress suffix
121 new_linkname = file + compress_suffix
122 new_source = os.readlink(file) + compress_suffix
123
124 bb.note('symlink %s-->%s' % (new_linkname, new_source))
125 if not os.path.exists(new_linkname):
126 os.symlink(new_source, new_linkname)
127
128 os.unlink(file)
129
130def _is_info(file):
131 flags = '.info .info-'.split()
132 for flag in flags:
133 if flag in os.path.basename(file):
134 return True
135
136 return False
137
138def _is_man(file):
139 # It refers MANSECT-var in man(1.6g)'s man.config
140 flags = '.1:.1p:.8:.2:.3:.3p:.4:.5:.6:.7:.9:.0p:.tcl:.n:.l:.p:.o'.split(':')
141 for flag in flags:
142 if os.path.basename(file).endswith(flag):
143 return True
144
145 return False
146
147def _is_compress_doc(file, compress_format_list):
148 compress_format = _get_compress_format(file, compress_format_list)
149 compress_suffix = '.' + compress_format
150 if file.endswith(compress_suffix):
151 # Remove the compress suffix
152 uncompress_file = file[:-len(compress_suffix)]
153 if _is_info(uncompress_file) or _is_man(uncompress_file):
154 return True, compress_format
155
156 return False, ''
157
158def compress_doc(topdir, compress_mode, compress_cmds):
159 hardlink_dict = {}
160 for root, dirs, files in os.walk(topdir):
161 for f in files:
162 file = os.path.join(root, f)
163 if os.path.isdir(file):
164 continue
165
166 if _is_info(file) or _is_man(file):
167 # Symlink
168 if os.path.islink(file):
169 _process_symlink(file, compress_mode)
170 # Hardlink
171 elif os.lstat(file).st_nlink > 1:
172 _collect_hardlink(hardlink_dict, file)
173 # Normal file
174 elif os.path.isfile(file):
175 cmd = "%s %s" % (compress_cmds[compress_mode], file)
176 (retval, output) = oe.utils.getstatusoutput(cmd)
177 if retval:
178 bb.warn("compress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
179 continue
180 bb.note('compress file %s' % file)
181
182 _process_hardlink(hardlink_dict, compress_mode, compress_cmds)
183
184# Decompress doc files which format is not compress_mode
185def decompress_doc(topdir, compress_mode, decompress_cmds):
186 hardlink_dict = {}
187 decompress = True
188 for root, dirs, files in os.walk(topdir):
189 for f in files:
190 file = os.path.join(root, f)
191 if os.path.isdir(file):
192 continue
193
194 res, compress_format = _is_compress_doc(file, decompress_cmds.keys())
195 # Decompress files which format is not compress_mode
196 if res and compress_mode!=compress_format:
197 # Symlink
198 if os.path.islink(file):
199 _process_symlink(file, compress_format, decompress)
200 # Hardlink
201 elif os.lstat(file).st_nlink > 1:
202 _collect_hardlink(hardlink_dict, file)
203 # Normal file
204 elif os.path.isfile(file):
205 cmd = "%s %s" % (decompress_cmds[compress_format], file)
206 (retval, output) = oe.utils.getstatusoutput(cmd)
207 if retval:
208 bb.warn("decompress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
209 continue
210 bb.note('decompress file %s' % file)
211
212 _process_hardlink(hardlink_dict, compress_mode, decompress_cmds, decompress)
213
214python compress_doc_updatealternatives () {
215 if not bb.data.inherits_class('update-alternatives', d):
216 return
217
218 mandir = d.getVar("mandir", True)
219 infodir = d.getVar("infodir", True)
220 compress_mode = d.getVar('DOC_COMPRESS', True)
221 for pkg in (d.getVar('PACKAGES', True) or "").split():
222 old_names = (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split()
223 new_names = []
224 for old_name in old_names:
225 old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name, True)
226 old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name, True) or \
227 d.getVarFlag('ALTERNATIVE_TARGET', old_name, True) or \
228 d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or \
229 d.getVar('ALTERNATIVE_TARGET', True) or \
230 old_link
231 # Sometimes old_target is specified as relative to the link name.
232 old_target = os.path.join(os.path.dirname(old_link), old_target)
233
234 # The updatealternatives used for compress doc
235 if mandir in old_target or infodir in old_target:
236 new_name = old_name + '.' + compress_mode
237 new_link = old_link + '.' + compress_mode
238 new_target = old_target + '.' + compress_mode
239 d.delVarFlag('ALTERNATIVE_LINK_NAME', old_name)
240 d.setVarFlag('ALTERNATIVE_LINK_NAME', new_name, new_link)
241 if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name, True):
242 d.delVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name)
243 d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, new_name, new_target)
244 elif d.getVarFlag('ALTERNATIVE_TARGET', old_name, True):
245 d.delVarFlag('ALTERNATIVE_TARGET', old_name)
246 d.setVarFlag('ALTERNATIVE_TARGET', new_name, new_target)
247 elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True):
248 d.setVar('ALTERNATIVE_TARGET_%s' % pkg, new_target)
249 elif d.getVar('ALTERNATIVE_TARGET', old_name, True):
250 d.setVar('ALTERNATIVE_TARGET', new_target)
251
252 new_names.append(new_name)
253
254 if new_names:
255 d.setVar('ALTERNATIVE_%s' % pkg, ' '.join(new_names))
256}
diff --git a/meta/classes/copyleft_compliance.bbclass b/meta/classes/copyleft_compliance.bbclass
new file mode 100644
index 0000000000..907c1836b3
--- /dev/null
+++ b/meta/classes/copyleft_compliance.bbclass
@@ -0,0 +1,64 @@
1# Deploy sources for recipes for compliance with copyleft-style licenses
2# Defaults to using symlinks, as it's a quick operation, and one can easily
3# follow the links when making use of the files (e.g. tar with the -h arg).
4#
5# vi:sts=4:sw=4:et
6
7inherit copyleft_filter
8
9COPYLEFT_SOURCES_DIR ?= '${DEPLOY_DIR}/copyleft_sources'
10
11python do_prepare_copyleft_sources () {
12 """Populate a tree of the recipe sources and emit patch series files"""
13 import os.path
14 import shutil
15
16 p = d.getVar('P', True)
17 included, reason = copyleft_should_include(d)
18 if not included:
19 bb.debug(1, 'copyleft: %s is excluded: %s' % (p, reason))
20 return
21 else:
22 bb.debug(1, 'copyleft: %s is included: %s' % (p, reason))
23
24 sources_dir = d.getVar('COPYLEFT_SOURCES_DIR', True)
25 dl_dir = d.getVar('DL_DIR', True)
26 src_uri = d.getVar('SRC_URI', True).split()
27 fetch = bb.fetch2.Fetch(src_uri, d)
28 ud = fetch.ud
29
30 pf = d.getVar('PF', True)
31 dest = os.path.join(sources_dir, pf)
32 shutil.rmtree(dest, ignore_errors=True)
33 bb.utils.mkdirhier(dest)
34
35 for u in ud.values():
36 local = os.path.normpath(fetch.localpath(u.url))
37 if local.endswith('.bb'):
38 continue
39 elif local.endswith('/'):
40 local = local[:-1]
41
42 if u.mirrortarball:
43 tarball_path = os.path.join(dl_dir, u.mirrortarball)
44 if os.path.exists(tarball_path):
45 local = tarball_path
46
47 oe.path.symlink(local, os.path.join(dest, os.path.basename(local)), force=True)
48
49 patches = src_patches(d)
50 for patch in patches:
51 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
52 patchdir = parm.get('patchdir')
53 if patchdir:
54 series = os.path.join(dest, 'series.subdir.%s' % patchdir.replace('/', '_'))
55 else:
56 series = os.path.join(dest, 'series')
57
58 with open(series, 'a') as s:
59 s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
60}
61
62addtask prepare_copyleft_sources after do_fetch before do_build
63do_prepare_copyleft_sources[dirs] = "${WORKDIR}"
64do_build[recrdeptask] += 'do_prepare_copyleft_sources'
diff --git a/meta/classes/copyleft_filter.bbclass b/meta/classes/copyleft_filter.bbclass
new file mode 100644
index 0000000000..2c1d8f1c90
--- /dev/null
+++ b/meta/classes/copyleft_filter.bbclass
@@ -0,0 +1,62 @@
1# Filter the license, the copyleft_should_include returns True for the
2# COPYLEFT_LICENSE_INCLUDE recipe, and False for the
3# COPYLEFT_LICENSE_EXCLUDE.
4#
5# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary.
6#
7# vi:sts=4:sw=4:et
8
9COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL*'
10COPYLEFT_LICENSE_INCLUDE[type] = 'list'
11COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which include licenses'
12
13COPYLEFT_LICENSE_EXCLUDE ?= 'CLOSED Proprietary'
14COPYLEFT_LICENSE_EXCLUDE[type] = 'list'
15COPYLEFT_LICENSE_EXCLUDE[doc] = 'Space separated list of globs which exclude licenses'
16
17COPYLEFT_RECIPE_TYPE ?= '${@copyleft_recipe_type(d)}'
18COPYLEFT_RECIPE_TYPE[doc] = 'The "type" of the current recipe (e.g. target, native, cross)'
19
20COPYLEFT_RECIPE_TYPES ?= 'target'
21COPYLEFT_RECIPE_TYPES[type] = 'list'
22COPYLEFT_RECIPE_TYPES[doc] = 'Space separated list of recipe types to include'
23
24COPYLEFT_AVAILABLE_RECIPE_TYPES = 'target native nativesdk cross crosssdk cross-canadian'
25COPYLEFT_AVAILABLE_RECIPE_TYPES[type] = 'list'
26COPYLEFT_AVAILABLE_RECIPE_TYPES[doc] = 'Space separated list of available recipe types'
27
28def copyleft_recipe_type(d):
29 for recipe_type in oe.data.typed_value('COPYLEFT_AVAILABLE_RECIPE_TYPES', d):
30 if oe.utils.inherits(d, recipe_type):
31 return recipe_type
32 return 'target'
33
34def copyleft_should_include(d):
35 """
36 Determine if this recipe's sources should be deployed for compliance
37 """
38 import ast
39 import oe.license
40 from fnmatch import fnmatchcase as fnmatch
41
42 recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE', True)
43 if recipe_type not in oe.data.typed_value('COPYLEFT_RECIPE_TYPES', d):
44 return False, 'recipe type "%s" is excluded' % recipe_type
45
46 include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d)
47 exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d)
48
49 try:
50 is_included, reason = oe.license.is_included(d.getVar('LICENSE', True), include, exclude)
51 except oe.license.LicenseError as exc:
52 bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
53 else:
54 if is_included:
55 if reason:
56 return True, 'recipe has included licenses: %s' % ', '.join(reason)
57 else:
58 return False, 'recipe does not include a copyleft license'
59 else:
60 return False, 'recipe has excluded licenses: %s' % ', '.join(reason)
61
62
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
new file mode 100644
index 0000000000..62363fb334
--- /dev/null
+++ b/meta/classes/core-image.bbclass
@@ -0,0 +1,80 @@
1# Common code for generating core reference images
2#
3# Copyright (C) 2007-2011 Linux Foundation
4
5LIC_FILES_CHKSUM = "file://${COREBASE}/LICENSE;md5=4d92cd373abda3937c2bc47fbc49d690 \
6 file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
7
8# IMAGE_FEATURES control content of the core reference images
9#
10# By default we install packagegroup-core-boot and packagegroup-base-extended packages;
11# this gives us working (console only) rootfs.
12#
13# Available IMAGE_FEATURES:
14#
15# - x11 - X server
16# - x11-base - X server with minimal environment
17# - x11-sato - OpenedHand Sato environment
18# - tools-debug - debugging tools
19# - eclipse-debug - Eclipse remote debugging support
20# - tools-profile - profiling tools
21# - tools-testapps - tools usable to make some device tests
22# - tools-sdk - SDK (C/C++ compiler, autotools, etc.)
23# - nfs-server - NFS server
24# - ssh-server-dropbear - SSH server (dropbear)
25# - ssh-server-openssh - SSH server (openssh)
26# - qt4-pkgs - Qt4/X11 and demo applications
27# - hwcodecs - Install hardware acceleration codecs
28# - package-management - installs package management tools and preserves the package manager database
29# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
30# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
31# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
32# - doc-pkgs - documentation packages for all installed packages in the rootfs
33# - ptest-pkgs - ptest packages for all ptest-enabled recipes
34# - read-only-rootfs - tweaks an image to support read-only rootfs
35#
36FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
37FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
38FEATURE_PACKAGES_x11-sato = "packagegroup-core-x11-sato"
39FEATURE_PACKAGES_tools-debug = "packagegroup-core-tools-debug"
40FEATURE_PACKAGES_eclipse-debug = "packagegroup-core-eclipse-debug"
41FEATURE_PACKAGES_tools-profile = "packagegroup-core-tools-profile"
42FEATURE_PACKAGES_tools-testapps = "packagegroup-core-tools-testapps"
43FEATURE_PACKAGES_tools-sdk = "packagegroup-core-sdk packagegroup-core-standalone-sdk-target"
44FEATURE_PACKAGES_nfs-server = "packagegroup-core-nfs-server"
45FEATURE_PACKAGES_ssh-server-dropbear = "packagegroup-core-ssh-dropbear"
46FEATURE_PACKAGES_ssh-server-openssh = "packagegroup-core-ssh-openssh"
47FEATURE_PACKAGES_qt4-pkgs = "packagegroup-core-qt-demoapps"
48FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}"
49
50
51# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2'
52# Including image feature foo would replace the image features bar1 and bar2
53IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear"
54
55# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2'
56# An error exception would be raised if both image features foo and bar1(or bar2) are included
57
58MACHINE_HWCODECS ??= ""
59
60CORE_IMAGE_BASE_INSTALL = '\
61 packagegroup-core-boot \
62 packagegroup-base-extended \
63 \
64 ${CORE_IMAGE_EXTRA_INSTALL} \
65 '
66
67CORE_IMAGE_EXTRA_INSTALL ?= ""
68
69IMAGE_INSTALL ?= "${CORE_IMAGE_BASE_INSTALL}"
70
71inherit image
72
73# Create /etc/timestamp during image construction to give a reasonably sane default time setting
74ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
75
76# Zap the root password if debug-tweaks feature is not enabled
77ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "debug-tweaks", "", "zap_empty_root_password ; ",d)}'
78
79# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
80ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass
new file mode 100644
index 0000000000..d9817ba6b6
--- /dev/null
+++ b/meta/classes/cpan-base.bbclass
@@ -0,0 +1,55 @@
1#
2# cpan-base providers various perl related information needed for building
3# cpan modules
4#
5FILES_${PN} += "${libdir}/perl ${datadir}/perl"
6
7DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
8RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
9
10PERL_OWN_DIR = "${@["", "/perl-native"][(bb.data.inherits_class('native', d))]}"
11
12# Determine the staged version of perl from the perl configuration file
13# Assign vardepvalue, because otherwise signature is changed before and after
14# perl is built (from None to real version in config.sh).
15get_perl_version[vardepvalue] = "${PERL_OWN_DIR}"
16def get_perl_version(d):
17 import re
18 cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh')
19 try:
20 f = open(cfg, 'r')
21 except IOError:
22 return None
23 l = f.readlines();
24 f.close();
25 r = re.compile("^version='(\d*\.\d*\.\d*)'")
26 for s in l:
27 m = r.match(s)
28 if m:
29 return m.group(1)
30 return None
31
32# Determine where the library directories are
33def perl_get_libdirs(d):
34 libdir = d.getVar('libdir', True)
35 if is_target(d) == "no":
36 libdir += '/perl-native'
37 libdir += '/perl'
38 return libdir
39
40def is_target(d):
41 if not bb.data.inherits_class('native', d):
42 return "yes"
43 return "no"
44
45PERLLIBDIRS := "${@perl_get_libdirs(d)}"
46PERLVERSION := "${@get_perl_version(d)}"
47PERLVERSION[vardepvalue] = ""
48
49FILES_${PN}-dbg += "${PERLLIBDIRS}/auto/*/.debug \
50 ${PERLLIBDIRS}/auto/*/*/.debug \
51 ${PERLLIBDIRS}/auto/*/*/*/.debug \
52 ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/.debug \
53 ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/.debug \
54 ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/*/.debug \
55 "
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
new file mode 100644
index 0000000000..e2bbd2f63a
--- /dev/null
+++ b/meta/classes/cpan.bbclass
@@ -0,0 +1,55 @@
1#
2# This is for perl modules that use the old Makefile.PL build system
3#
4inherit cpan-base perlnative
5
6EXTRA_CPANFLAGS ?= ""
7EXTRA_PERLFLAGS ?= ""
8
9# Env var which tells perl if it should use host (no) or target (yes) settings
10export PERLCONFIGTARGET = "${@is_target(d)}"
11
12# Env var which tells perl where the perl include files are
13export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}/CORE"
14export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
15export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
16export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
17
18cpan_do_configure () {
19 export PERL5LIB="${PERL_ARCHLIB}"
20 yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL ${EXTRA_CPANFLAGS}
21
22 # Makefile.PLs can exit with success without generating a
23 # Makefile, e.g. in cases of missing configure time
24 # dependencies. This is considered a best practice by
25 # cpantesters.org. See:
26 # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
27 # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
28 [ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL"
29
30 if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
31 . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh
32 # Use find since there can be a Makefile generated for each Makefile.PL
33 for f in `find -name Makefile.PL`; do
34 f2=`echo $f | sed -e 's/.PL//'`
35 test -f $f2 || continue
36 sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \
37 -e 's/perl.real/perl/' \
38 -e "s|^\(CCFLAGS =.*\)|\1 ${CFLAGS}|" \
39 $f2
40 done
41 fi
42}
43
44cpan_do_compile () {
45 oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}"
46}
47
48cpan_do_install () {
49 oe_runmake DESTDIR="${D}" install_vendor
50 for PERLSCRIPT in `grep -rIEl '#! *${bindir}/perl-native.*/perl' ${D}`; do
51 sed -i -e 's|${bindir}/perl-native.*/perl|/usr/bin/env nativeperl|' $PERLSCRIPT
52 done
53}
54
55EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cpan_build.bbclass b/meta/classes/cpan_build.bbclass
new file mode 100644
index 0000000000..2eb8162314
--- /dev/null
+++ b/meta/classes/cpan_build.bbclass
@@ -0,0 +1,53 @@
1#
2# This is for perl modules that use the new Build.PL build system
3#
4inherit cpan-base perlnative
5
6EXTRA_CPAN_BUILD_FLAGS ?= ""
7
8# Env var which tells perl if it should use host (no) or target (yes) settings
9export PERLCONFIGTARGET = "${@is_target(d)}"
10export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
11export LD = "${CCLD}"
12
13#
14# We also need to have built libmodule-build-perl-native for
15# everything except libmodule-build-perl-native itself (which uses
16# this class, but uses itself as the provider of
17# libmodule-build-perl)
18#
19def cpan_build_dep_prepend(d):
20 if d.getVar('CPAN_BUILD_DEPS', True):
21 return ''
22 pn = d.getVar('PN', True)
23 if pn in ['libmodule-build-perl', 'libmodule-build-perl-native']:
24 return ''
25 return 'libmodule-build-perl-native '
26
27DEPENDS_prepend = "${@cpan_build_dep_prepend(d)}"
28
29cpan_build_do_configure () {
30 if [ "${@is_target(d)}" = "yes" ]; then
31 # build for target
32 . ${STAGING_LIBDIR}/perl/config.sh
33 fi
34
35 perl Build.PL --installdirs vendor \
36 --destdir ${D} \
37 --install_path arch="${libdir}/perl" \
38 --install_path script=${bindir} \
39 --install_path bin=${bindir} \
40 --install_path bindoc=${mandir}/man1 \
41 --install_path libdoc=${mandir}/man3 \
42 ${EXTRA_CPAN_BUILD_FLAGS}
43}
44
45cpan_build_do_compile () {
46 perl Build
47}
48
49cpan_build_do_install () {
50 perl Build install
51}
52
53EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
new file mode 100644
index 0000000000..a8565e91e3
--- /dev/null
+++ b/meta/classes/cross-canadian.bbclass
@@ -0,0 +1,142 @@
1#
2# NOTE - When using this class the user is repsonsible for ensuring that
3# TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH
4# is changed, another nativesdk xxx-canadian-cross can be installed
5#
6
7
8# SDK packages are built either explicitly by the user,
9# or indirectly via dependency. No need to be in 'world'.
10EXCLUDE_FROM_WORLD = "1"
11CLASSOVERRIDE = "class-cross-canadian"
12STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
13
14#
15# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
16#
17PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
18CANADIANEXTRAOS = ""
19MODIFYTOS ??= "1"
20python () {
21 archs = d.getVar('PACKAGE_ARCHS', True).split()
22 sdkarchs = []
23 for arch in archs:
24 sdkarchs.append(arch + '-${SDKPKGSUFFIX}')
25 d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
26
27 # Allow the following code segment to be disabled, e.g. meta-environment
28 if d.getVar("MODIFYTOS", True) != "1":
29 return
30 # PowerPC can build "linux" and "linux-gnuspe"
31 tarch = d.getVar("TARGET_ARCH", True)
32 if tarch == "powerpc":
33 tos = d.getVar("TARGET_OS", True)
34 if (tos != "linux" and tos != "linux-gnuspe"
35 and tos != "linux-uclibc" and tos != "linux-uclibcspe"
36 and tos != "linux-musl" and tos != "linux-muslspe"):
37 bb.fatal("Building cross-candian powerpc for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS", True))
38 # This is a bit ugly. We need to zero LIBC/ABI extension which will change TARGET_OS
39 # however we need the old value in some variables. We expand those here first.
40 d.setVar("DEPENDS", d.getVar("DEPENDS", True))
41 d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN", True))
42 for prefix in ["AR", "AS", "DLLTOOL", "CC", "CXX", "GCC", "LD", "LIPO", "NM", "OBJDUMP", "RANLIB", "STRIP", "WINDRES"]:
43 n = prefix + "_FOR_TARGET"
44 d.setVar(n, d.getVar(n, True))
45
46 d.setVar("LIBCEXTENSION", "")
47 d.setVar("ABIEXTENSION", "")
48 d.setVar("CANADIANEXTRAOS", "linux-gnuspe")
49}
50MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
51
52INHIBIT_DEFAULT_DEPS = "1"
53
54STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
55
56TOOLCHAIN_OPTIONS = " --sysroot=${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
57
58PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
59PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
60
61HOST_ARCH = "${SDK_ARCH}"
62HOST_VENDOR = "${SDK_VENDOR}"
63HOST_OS = "${SDK_OS}"
64HOST_PREFIX = "${SDK_PREFIX}"
65HOST_CC_ARCH = "${SDK_CC_ARCH}"
66HOST_LD_ARCH = "${SDK_LD_ARCH}"
67HOST_AS_ARCH = "${SDK_AS_ARCH}"
68
69#assign DPKG_ARCH
70DPKG_ARCH = "${SDK_ARCH}"
71
72CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
73CFLAGS = "${BUILDSDK_CFLAGS}"
74CXXFLAGS = "${BUILDSDK_CFLAGS}"
75LDFLAGS = "${BUILDSDK_LDFLAGS} \
76 -Wl,-rpath-link,${STAGING_LIBDIR}/.. \
77 -Wl,-rpath,${libdir}/.. "
78
79DEPENDS_GETTEXT = "gettext-native nativesdk-gettext"
80
81#
82# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
83# binaries
84#
85DEPENDS_append = " chrpath-replacement-native"
86EXTRANATIVEPATH += "chrpath-native"
87
88# Path mangling needed by the cross packaging
89# Note that we use := here to ensure that libdir and includedir are
90# target paths.
91target_base_prefix := "${base_prefix}"
92target_prefix := "${prefix}"
93target_exec_prefix := "${exec_prefix}"
94target_base_libdir = "${target_base_prefix}/${baselib}"
95target_libdir = "${target_exec_prefix}/${baselib}"
96target_includedir := "${includedir}"
97
98# Change to place files in SDKPATH
99base_prefix = "${SDKPATHNATIVE}"
100prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
101exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
102bindir = "${exec_prefix}/bin/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
103sbindir = "${bindir}"
104base_bindir = "${bindir}"
105base_sbindir = "${bindir}"
106libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
107libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
108
109FILES_${PN} = "${prefix}"
110FILES_${PN}-dbg += "${prefix}/.debug \
111 ${prefix}/bin/.debug \
112 "
113
114export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
115export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
116
117do_populate_sysroot[stamp-extra-info] = ""
118do_packagedata[stamp-extra-info] = ""
119
120USE_NLS = "${SDKUSE_NLS}"
121
122# We have to us TARGET_ARCH but we care about the absolute value
123# and not any particular tune that is enabled.
124TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
125
126# If MLPREFIX is set by multilib code, shlibs
127# points to the wrong place so force it
128SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
129SHLIBSWORKDIR = "${PKGDATA_DIR}/nativesdk-shlibs2"
130
131cross_canadian_bindirlinks () {
132 for i in ${CANADIANEXTRAOS}
133 do
134 d=${D}${bindir}/../${TARGET_ARCH}${TARGET_VENDOR}-$i
135 install -d $d
136 for j in `ls ${D}${bindir}`
137 do
138 p=${TARGET_ARCH}${TARGET_VENDOR}-$i-`echo $j | sed -e s,${TARGET_PREFIX},,`
139 ln -s ../${TARGET_SYS}/$j $d/$p
140 done
141 done
142}
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
new file mode 100644
index 0000000000..28fd2116c0
--- /dev/null
+++ b/meta/classes/cross.bbclass
@@ -0,0 +1,75 @@
1inherit relocatable
2
3# Cross packages are built indirectly via dependency,
4# no need for them to be a direct target of 'world'
5EXCLUDE_FROM_WORLD = "1"
6
7CLASSOVERRIDE = "class-cross"
8PACKAGES = ""
9PACKAGES_DYNAMIC = ""
10PACKAGES_DYNAMIC_class-native = ""
11
12HOST_ARCH = "${BUILD_ARCH}"
13HOST_VENDOR = "${BUILD_VENDOR}"
14HOST_OS = "${BUILD_OS}"
15HOST_PREFIX = "${BUILD_PREFIX}"
16HOST_CC_ARCH = "${BUILD_CC_ARCH}"
17HOST_LD_ARCH = "${BUILD_LD_ARCH}"
18HOST_AS_ARCH = "${BUILD_AS_ARCH}"
19
20STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
21
22PACKAGE_ARCH = "${BUILD_ARCH}"
23
24export PKG_CONFIG_DIR = "${exec_prefix}/lib/pkgconfig"
25export PKG_CONFIG_SYSROOT_DIR = ""
26
27CPPFLAGS = "${BUILD_CPPFLAGS}"
28CFLAGS = "${BUILD_CFLAGS}"
29CXXFLAGS = "${BUILD_CFLAGS}"
30LDFLAGS = "${BUILD_LDFLAGS}"
31LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE}"
32
33TOOLCHAIN_OPTIONS = ""
34
35DEPENDS_GETTEXT = "gettext-native"
36
37# Path mangling needed by the cross packaging
38# Note that we use := here to ensure that libdir and includedir are
39# target paths.
40target_base_prefix := "${base_prefix}"
41target_prefix := "${prefix}"
42target_exec_prefix := "${exec_prefix}"
43target_base_libdir = "${target_base_prefix}/${baselib}"
44target_libdir = "${target_exec_prefix}/${baselib}"
45target_includedir := "${includedir}"
46
47# Overrides for paths
48CROSS_TARGET_SYS_DIR = "${TARGET_SYS}"
49prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
50base_prefix = "${STAGING_DIR_NATIVE}"
51exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
52bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}"
53sbindir = "${bindir}"
54base_bindir = "${bindir}"
55base_sbindir = "${bindir}"
56libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
57libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
58
59do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
60do_populate_sysroot[stamp-extra-info] = ""
61do_packagedata[stamp-extra-info] = ""
62
63do_install () {
64 oe_runmake 'DESTDIR=${D}' install
65}
66
67USE_NLS = "no"
68
69deltask package
70deltask packagedata
71deltask package_write_ipk
72deltask package_write_deb
73deltask package_write_rpm
74deltask package_write
75
diff --git a/meta/classes/crosssdk.bbclass b/meta/classes/crosssdk.bbclass
new file mode 100644
index 0000000000..87d5cf5d37
--- /dev/null
+++ b/meta/classes/crosssdk.bbclass
@@ -0,0 +1,36 @@
1inherit cross
2
3CLASSOVERRIDE = "class-crosssdk"
4MACHINEOVERRIDES = ""
5PACKAGE_ARCH = "${SDK_ARCH}"
6python () {
7 # set TUNE_PKGARCH to SDK_ARCH
8 d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH', True))
9}
10
11STAGING_DIR_TARGET = "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}"
12STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
13
14TARGET_ARCH = "${SDK_ARCH}"
15TARGET_VENDOR = "${SDK_VENDOR}"
16TARGET_OS = "${SDK_OS}"
17TARGET_PREFIX = "${SDK_PREFIX}"
18TARGET_CC_ARCH = "${SDK_CC_ARCH}"
19TARGET_LD_ARCH = "${SDK_LD_ARCH}"
20TARGET_AS_ARCH = "${SDK_AS_ARCH}"
21TARGET_FPU = ""
22
23target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
24target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
25target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
26target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
27target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
28baselib = "lib"
29
30do_populate_sysroot[stamp-extra-info] = ""
31do_packagedata[stamp-extra-info] = ""
32
33# Need to force this to ensure consitency accross architectures
34EXTRA_OECONF_GCC_FLOAT = ""
35
36USE_NLS = "no"
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
new file mode 100644
index 0000000000..c859703669
--- /dev/null
+++ b/meta/classes/debian.bbclass
@@ -0,0 +1,141 @@
1# Debian package renaming only occurs when a package is built
2# We therefore have to make sure we build all runtime packages
3# before building the current package to make the packages runtime
4# depends are correct
5#
6# Custom library package names can be defined setting
7# DEBIANNAME_ + pkgname to the desired name.
8#
9# Better expressed as ensure all RDEPENDS package before we package
10# This means we can't have circular RDEPENDS/RRECOMMENDS
11
12AUTO_LIBNAME_PKGS = "${PACKAGES}"
13
14inherit package
15
16DEBIANRDEP = "do_packagedata"
17do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
18do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
19do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
20do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
21
22python () {
23 if not d.getVar("PACKAGES", True):
24 d.setVar("DEBIANRDEP", "")
25}
26
27python debian_package_name_hook () {
28 import glob, copy, stat, errno, re
29
30 pkgdest = d.getVar('PKGDEST', True)
31 packages = d.getVar('PACKAGES', True)
32 bin_re = re.compile(".*/s?" + os.path.basename(d.getVar("bindir", True)) + "$")
33 lib_re = re.compile(".*/" + os.path.basename(d.getVar("libdir", True)) + "$")
34 so_re = re.compile("lib.*\.so")
35
36 def socrunch(s):
37 s = s.lower().replace('_', '-')
38 m = re.match("^(.*)(.)\.so\.(.*)$", s)
39 if m is None:
40 return None
41 if m.group(2) in '0123456789':
42 bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
43 else:
44 bin = m.group(1) + m.group(2) + m.group(3)
45 dev = m.group(1) + m.group(2)
46 return (bin, dev)
47
48 def isexec(path):
49 try:
50 s = os.stat(path)
51 except (os.error, AttributeError):
52 return 0
53 return (s[stat.ST_MODE] & stat.S_IEXEC)
54
55 def add_rprovides(pkg, d):
56 newpkg = d.getVar('PKG_' + pkg)
57 if newpkg and newpkg != pkg:
58 provs = (d.getVar('RPROVIDES_' + pkg, True) or "").split()
59 if pkg not in provs:
60 d.appendVar('RPROVIDES_' + pkg, " " + pkg)
61
62 def auto_libname(packages, orig_pkg):
63 sonames = []
64 has_bins = 0
65 has_libs = 0
66 for file in pkgfiles[orig_pkg]:
67 root = os.path.dirname(file)
68 if bin_re.match(root):
69 has_bins = 1
70 if lib_re.match(root):
71 has_libs = 1
72 if so_re.match(os.path.basename(file)):
73 cmd = (d.getVar('TARGET_PREFIX', True) or "") + "objdump -p " + file + " 2>/dev/null"
74 fd = os.popen(cmd)
75 lines = fd.readlines()
76 fd.close()
77 for l in lines:
78 m = re.match("\s+SONAME\s+([^\s]*)", l)
79 if m and not m.group(1) in sonames:
80 sonames.append(m.group(1))
81
82 bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
83 soname = None
84 if len(sonames) == 1:
85 soname = sonames[0]
86 elif len(sonames) > 1:
87 lead = d.getVar('LEAD_SONAME', True)
88 if lead:
89 r = re.compile(lead)
90 filtered = []
91 for s in sonames:
92 if r.match(s):
93 filtered.append(s)
94 if len(filtered) == 1:
95 soname = filtered[0]
96 elif len(filtered) > 1:
97 bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
98 else:
99 bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
100 else:
101 bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
102
103 if has_libs and not has_bins and soname:
104 soname_result = socrunch(soname)
105 if soname_result:
106 (pkgname, devname) = soname_result
107 for pkg in packages.split():
108 if (d.getVar('PKG_' + pkg) or d.getVar('DEBIAN_NOAUTONAME_' + pkg)):
109 add_rprovides(pkg, d)
110 continue
111 debian_pn = d.getVar('DEBIANNAME_' + pkg)
112 if debian_pn:
113 newpkg = debian_pn
114 elif pkg == orig_pkg:
115 newpkg = pkgname
116 else:
117 newpkg = pkg.replace(orig_pkg, devname, 1)
118 mlpre=d.getVar('MLPREFIX', True)
119 if mlpre:
120 if not newpkg.find(mlpre) == 0:
121 newpkg = mlpre + newpkg
122 if newpkg != pkg:
123 d.setVar('PKG_' + pkg, newpkg)
124 add_rprovides(pkg, d)
125 else:
126 add_rprovides(orig_pkg, d)
127
128 # reversed sort is needed when some package is substring of another
129 # ie in ncurses we get without reverse sort:
130 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
131 # and later
132 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
133 # so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
134 for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS', True) or "").split(), reverse=True):
135 auto_libname(packages, pkg)
136}
137
138EXPORT_FUNCTIONS package_name_hook
139
140DEBIAN_NAMES = "1"
141
diff --git a/meta/classes/deploy.bbclass b/meta/classes/deploy.bbclass
new file mode 100644
index 0000000000..78f5e4a7ba
--- /dev/null
+++ b/meta/classes/deploy.bbclass
@@ -0,0 +1,10 @@
1DEPLOYDIR = "${WORKDIR}/deploy-${PN}"
2SSTATETASKS += "do_deploy"
3do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"
4do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
5
6python do_deploy_setscene () {
7 sstate_setscene(d)
8}
9addtask do_deploy_setscene
10do_deploy[dirs] = "${DEPLOYDIR} ${B}"
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
new file mode 100644
index 0000000000..41164a3f33
--- /dev/null
+++ b/meta/classes/devshell.bbclass
@@ -0,0 +1,154 @@
1inherit terminal
2
3DEVSHELL = "${SHELL}"
4
5python do_devshell () {
6 if d.getVarFlag("do_devshell", "manualfakeroot"):
7 d.prependVar("DEVSHELL", "pseudo ")
8 fakeenv = d.getVar("FAKEROOTENV", True).split()
9 for f in fakeenv:
10 k = f.split("=")
11 d.setVar(k[0], k[1])
12 d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
13 d.delVarFlag("do_devshell", "fakeroot")
14
15 oe_terminal(d.getVar('DEVSHELL', True), 'OpenEmbedded Developer Shell', d)
16}
17
18addtask devshell after do_patch
19
20do_devshell[dirs] = "${S}"
21do_devshell[nostamp] = "1"
22
23# devshell and fakeroot/pseudo need careful handling since only the final
24# command should run under fakeroot emulation, any X connection should
25# be done as the normal user. We therfore carefully construct the envionment
26# manually
27python () {
28 if d.getVarFlag("do_devshell", "fakeroot"):
29 # We need to signal our code that we want fakeroot however we
30 # can't manipulate the environment and variables here yet (see YOCTO #4795)
31 d.setVarFlag("do_devshell", "manualfakeroot", "1")
32 d.delVarFlag("do_devshell", "fakeroot")
33}
34
35def devpyshell(d):
36
37 import code
38 import select
39 import signal
40 import termios
41
42 m, s = os.openpty()
43 sname = os.ttyname(s)
44
45 def noechoicanon(fd):
46 old = termios.tcgetattr(fd)
47 old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
48 # &~ termios.ISIG
49 termios.tcsetattr(fd, termios.TCSADRAIN, old)
50
51 # No echo or buffering over the pty
52 noechoicanon(s)
53
54 pid = os.fork()
55 if pid:
56 os.close(m)
57 oe_terminal("oepydevshell-internal.py %s %d" % (sname, pid), 'OpenEmbedded Developer PyShell', d)
58 os._exit(0)
59 else:
60 os.close(s)
61
62 os.dup2(m, sys.stdin.fileno())
63 os.dup2(m, sys.stdout.fileno())
64 os.dup2(m, sys.stderr.fileno())
65
66 sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
67 sys.stdin = os.fdopen(sys.stdin.fileno(), 'r', 0)
68
69 bb.utils.nonblockingfd(sys.stdout)
70 bb.utils.nonblockingfd(sys.stderr)
71 bb.utils.nonblockingfd(sys.stdin)
72
73 _context = {
74 "os": os,
75 "bb": bb,
76 "time": time,
77 "d": d,
78 }
79
80 ps1 = "pydevshell> "
81 ps2 = "... "
82 buf = []
83 more = False
84
85 i = code.InteractiveInterpreter(locals=_context)
86 print("OE PyShell (PN = %s)\n" % d.getVar("PN", True))
87
88 def prompt(more):
89 if more:
90 prompt = ps2
91 else:
92 prompt = ps1
93 sys.stdout.write(prompt)
94
95 # Restore Ctrl+C since bitbake masks this
96 def signal_handler(signal, frame):
97 raise KeyboardInterrupt
98 signal.signal(signal.SIGINT, signal_handler)
99
100 child = None
101
102 prompt(more)
103 while True:
104 try:
105 try:
106 (r, _, _) = select.select([sys.stdin], [], [], 1)
107 if not r:
108 continue
109 line = sys.stdin.readline().strip()
110 if not line:
111 prompt(more)
112 continue
113 except EOFError as e:
114 sys.stdout.write("\n")
115 except (OSError, IOError) as e:
116 if e.errno == 11:
117 continue
118 if e.errno == 5:
119 return
120 raise
121 else:
122 if not child:
123 child = int(line)
124 continue
125 buf.append(line)
126 source = "\n".join(buf)
127 more = i.runsource(source, "<pyshell>")
128 if not more:
129 buf = []
130 prompt(more)
131 except KeyboardInterrupt:
132 i.write("\nKeyboardInterrupt\n")
133 buf = []
134 more = False
135 prompt(more)
136 except SystemExit:
137 # Easiest way to ensure everything exits
138 os.kill(child, signal.SIGTERM)
139 break
140
141python do_devpyshell() {
142 import signal
143
144 try:
145 devpyshell(d)
146 except SystemExit:
147 # Stop the SIGTERM above causing an error exit code
148 return
149 finally:
150 return
151}
152addtask devpyshell after do_patch
153
154do_devpyshell[nostamp] = "1"
diff --git a/meta/classes/distro_features_check.bbclass b/meta/classes/distro_features_check.bbclass
new file mode 100644
index 0000000000..1f1d6fba37
--- /dev/null
+++ b/meta/classes/distro_features_check.bbclass
@@ -0,0 +1,28 @@
1# Allow checking of required and conflicting DISTRO_FEATURES
2#
3# REQUIRED_DISTRO_FEATURES: ensure every item on this list is included
4# in DISTRO_FEATURES.
5# CONFLICT_DISTRO_FEATURES: ensure no item in this list is included in
6# DISTRO_FEATURES.
7#
8# Copyright 2013 (C) O.S. Systems Software LTDA.
9
10python () {
11 required_distro_features = d.getVar('REQUIRED_DISTRO_FEATURES', True)
12 if required_distro_features:
13 required_distro_features = required_distro_features.split()
14 distro_features = (d.getVar('DISTRO_FEATURES', True) or "").split()
15 for f in required_distro_features:
16 if f in distro_features:
17 continue
18 else:
19 raise bb.parse.SkipPackage("missing required distro feature '%s' (not in DISTRO_FEATURES)" % f)
20
21 conflict_distro_features = d.getVar('CONFLICT_DISTRO_FEATURES', True)
22 if conflict_distro_features:
23 conflict_distro_features = conflict_distro_features.split()
24 distro_features = (d.getVar('DISTRO_FEATURES', True) or "").split()
25 for f in conflict_distro_features:
26 if f in distro_features:
27 raise bb.parse.SkipPackage("conflicting distro feature '%s' (in DISTRO_FEATURES)" % f)
28}
diff --git a/meta/classes/distrodata.bbclass b/meta/classes/distrodata.bbclass
new file mode 100644
index 0000000000..a890de7911
--- /dev/null
+++ b/meta/classes/distrodata.bbclass
@@ -0,0 +1,902 @@
1include conf/distro/include/package_regex.inc
2addhandler distro_eventhandler
3distro_eventhandler[eventmask] = "bb.event.BuildStarted"
4python distro_eventhandler() {
5 import oe.distro_check as dc
6 logfile = dc.create_log_file(e.data, "distrodata.csv")
7 lf = bb.utils.lockfile("%s.lock" % logfile)
8 f = open(logfile, "a")
9 f.write("Package,Description,Owner,License,VerMatch,Version,Upsteam,Reason,Recipe Status,Distro 1,Distro 2,Distro 3\n")
10 f.close()
11 bb.utils.unlockfile(lf)
12
13 return
14}
15
16addtask distrodata_np
17do_distrodata_np[nostamp] = "1"
18python do_distrodata_np() {
19 localdata = bb.data.createCopy(d)
20 pn = d.getVar("PN", True)
21 bb.note("Package Name: %s" % pn)
22
23 import oe.distro_check as dist_check
24 tmpdir = d.getVar('TMPDIR', True)
25 distro_check_dir = os.path.join(tmpdir, "distro_check")
26 datetime = localdata.getVar('DATETIME', True)
27 dist_check.update_distro_data(distro_check_dir, datetime)
28
29 if pn.find("-native") != -1:
30 pnstripped = pn.split("-native")
31 bb.note("Native Split: %s" % pnstripped)
32 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
33 bb.data.update_data(localdata)
34
35 if pn.find("-cross") != -1:
36 pnstripped = pn.split("-cross")
37 bb.note("cross Split: %s" % pnstripped)
38 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
39 bb.data.update_data(localdata)
40
41 if pn.find("-crosssdk") != -1:
42 pnstripped = pn.split("-crosssdk")
43 bb.note("cross Split: %s" % pnstripped)
44 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
45 bb.data.update_data(localdata)
46
47 if pn.startswith("nativesdk-"):
48 pnstripped = pn.replace("nativesdk-", "")
49 bb.note("NativeSDK Split: %s" % pnstripped)
50 localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
51 bb.data.update_data(localdata)
52
53
54 if pn.find("-initial") != -1:
55 pnstripped = pn.split("-initial")
56 bb.note("initial Split: %s" % pnstripped)
57 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
58 bb.data.update_data(localdata)
59
60 """generate package information from .bb file"""
61 pname = localdata.getVar('PN', True)
62 pcurver = localdata.getVar('PV', True)
63 pdesc = localdata.getVar('DESCRIPTION', True)
64 if pdesc is not None:
65 pdesc = pdesc.replace(',','')
66 pdesc = pdesc.replace('\n','')
67
68 pgrp = localdata.getVar('SECTION', True)
69 plicense = localdata.getVar('LICENSE', True).replace(',','_')
70
71 rstatus = localdata.getVar('RECIPE_COLOR', True)
72 if rstatus is not None:
73 rstatus = rstatus.replace(',','')
74
75 pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True)
76 if pcurver == pupver:
77 vermatch="1"
78 else:
79 vermatch="0"
80 noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
81 if noupdate_reason is None:
82 noupdate="0"
83 else:
84 noupdate="1"
85 noupdate_reason = noupdate_reason.replace(',','')
86
87 maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
88 rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True)
89 result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
90
91 bb.note("DISTRO: %s,%s,%s,%s,%s,%s,%s,%s,%s\n" % \
92 (pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus))
93 line = pn
94 for i in result:
95 line = line + "," + i
96 bb.note("%s\n" % line)
97}
98
99addtask distrodata
100do_distrodata[nostamp] = "1"
101python do_distrodata() {
102 logpath = d.getVar('LOG_DIR', True)
103 bb.utils.mkdirhier(logpath)
104 logfile = os.path.join(logpath, "distrodata.csv")
105
106 import oe.distro_check as dist_check
107 localdata = bb.data.createCopy(d)
108 tmpdir = d.getVar('TMPDIR', True)
109 distro_check_dir = os.path.join(tmpdir, "distro_check")
110 datetime = localdata.getVar('DATETIME', True)
111 dist_check.update_distro_data(distro_check_dir, datetime)
112
113 pn = d.getVar("PN", True)
114 bb.note("Package Name: %s" % pn)
115
116 if pn.find("-native") != -1:
117 pnstripped = pn.split("-native")
118 bb.note("Native Split: %s" % pnstripped)
119 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
120 bb.data.update_data(localdata)
121
122 if pn.startswith("nativesdk-"):
123 pnstripped = pn.replace("nativesdk-", "")
124 bb.note("NativeSDK Split: %s" % pnstripped)
125 localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
126 bb.data.update_data(localdata)
127
128 if pn.find("-cross") != -1:
129 pnstripped = pn.split("-cross")
130 bb.note("cross Split: %s" % pnstripped)
131 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
132 bb.data.update_data(localdata)
133
134 if pn.find("-crosssdk") != -1:
135 pnstripped = pn.split("-crosssdk")
136 bb.note("cross Split: %s" % pnstripped)
137 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
138 bb.data.update_data(localdata)
139
140 if pn.find("-initial") != -1:
141 pnstripped = pn.split("-initial")
142 bb.note("initial Split: %s" % pnstripped)
143 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
144 bb.data.update_data(localdata)
145
146 """generate package information from .bb file"""
147 pname = localdata.getVar('PN', True)
148 pcurver = localdata.getVar('PV', True)
149 pdesc = localdata.getVar('DESCRIPTION', True)
150 if pdesc is not None:
151 pdesc = pdesc.replace(',','')
152 pdesc = pdesc.replace('\n','')
153
154 pgrp = localdata.getVar('SECTION', True)
155 plicense = localdata.getVar('LICENSE', True).replace(',','_')
156
157 rstatus = localdata.getVar('RECIPE_COLOR', True)
158 if rstatus is not None:
159 rstatus = rstatus.replace(',','')
160
161 pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION', True)
162 if pcurver == pupver:
163 vermatch="1"
164 else:
165 vermatch="0"
166
167 noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
168 if noupdate_reason is None:
169 noupdate="0"
170 else:
171 noupdate="1"
172 noupdate_reason = noupdate_reason.replace(',','')
173
174 maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
175 rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE', True)
176 # do the comparison
177 result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
178
179 lf = bb.utils.lockfile("%s.lock" % logfile)
180 f = open(logfile, "a")
181 f.write("%s,%s,%s,%s,%s,%s,%s,%s,%s" % \
182 (pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus))
183 line = ""
184 for i in result:
185 line = line + "," + i
186 f.write(line + "\n")
187 f.close()
188 bb.utils.unlockfile(lf)
189}
190
191addtask distrodataall after do_distrodata
192do_distrodataall[recrdeptask] = "do_distrodataall do_distrodata"
193do_distrodataall[recideptask] = "do_${BB_DEFAULT_TASK}"
194do_distrodataall[nostamp] = "1"
195do_distrodataall() {
196 :
197}
198
199addhandler checkpkg_eventhandler
200checkpkg_eventhandler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted"
201python checkpkg_eventhandler() {
202 def parse_csv_file(filename):
203 package_dict = {}
204 fd = open(filename, "r")
205 lines = fd.read().rsplit("\n")
206 fd.close()
207
208 first_line = ''
209 index = 0
210 for line in lines:
211 #Skip the first line
212 if index == 0:
213 first_line = line
214 index += 1
215 continue
216 elif line == '':
217 continue
218 index += 1
219 package_name = line.rsplit("\t")[0]
220 if '-native' in package_name or 'nativesdk-' in package_name:
221 original_name = package_name.rsplit('-native')[0]
222 if original_name == '':
223 original_name = package_name.rsplit('nativesdk-')[0]
224 if original_name in package_dict:
225 continue
226 else:
227 package_dict[package_name] = line
228 else:
229 new_name = package_name + "-native"
230 if not(new_name in package_dict):
231 new_name = 'nativesdk-' + package_name
232 if new_name in package_dict:
233 del package_dict[new_name]
234 package_dict[package_name] = line
235
236 fd = open(filename, "w")
237 fd.write("%s\n"%first_line)
238 for el in package_dict:
239 fd.write(package_dict[el] + "\n")
240 fd.close()
241
242 del package_dict
243
244 if bb.event.getName(e) == "BuildStarted":
245 import oe.distro_check as dc
246 logfile = dc.create_log_file(e.data, "checkpkg.csv")
247
248 lf = bb.utils.lockfile("%s.lock" % logfile)
249 f = open(logfile, "a")
250 f.write("Package\tVersion\tUpver\tLicense\tSection\tHome\tRelease\tDepends\tBugTracker\tPE\tDescription\tStatus\tTracking\tURI\tMAINTAINER\tNoUpReason\n")
251 f.close()
252 bb.utils.unlockfile(lf)
253 elif bb.event.getName(e) == "BuildCompleted":
254 import os
255 filename = "tmp/log/checkpkg.csv"
256 if os.path.isfile(filename):
257 lf = bb.utils.lockfile("%s.lock"%filename)
258 parse_csv_file(filename)
259 bb.utils.unlockfile(lf)
260 return
261}
262
263addtask checkpkg
264do_checkpkg[nostamp] = "1"
265python do_checkpkg() {
266 localdata = bb.data.createCopy(d)
267 import re
268 import tempfile
269 import subprocess
270
271 """
272 sanity check to ensure same name and type. Match as many patterns as possible
273 such as:
274 gnome-common-2.20.0.tar.gz (most common format)
275 gtk+-2.90.1.tar.gz
276 xf86-input-synaptics-12.6.9.tar.gz
277 dri2proto-2.3.tar.gz
278 blktool_4.orig.tar.gz
279 libid3tag-0.15.1b.tar.gz
280 unzip552.tar.gz
281 icu4c-3_6-src.tgz
282 genext2fs_1.3.orig.tar.gz
283 gst-fluendo-mp3
284 """
285 prefix1 = "[a-zA-Z][a-zA-Z0-9]*([\-_][a-zA-Z]\w+)*\+?[\-_]" # match most patterns which uses "-" as separator to version digits
286 prefix2 = "[a-zA-Z]+" # a loose pattern such as for unzip552.tar.gz
287 prefix3 = "[0-9]+[\-]?[a-zA-Z]+" # a loose pattern such as for 80325-quicky-0.4.tar.gz
288 prefix = "(%s|%s|%s)" % (prefix1, prefix2, prefix3)
289 ver_regex = "(([A-Z]*\d+[a-zA-Z]*[\.\-_]*)+)"#"((\d+[\.\-_[a-z]])+)"
290 # src.rpm extension was added only for rpm package. Can be removed if the rpm
291 # packaged will always be considered as having to be manually upgraded
292 suffix = "(tar\.gz|tgz|tar\.bz2|tar\.lz4|zip|xz|rpm|bz2|lz4|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
293
294 suffixtuple = ("tar.gz", "tgz", "zip", "tar.bz2", "tar.xz", "tar.lz4", "bz2", "lz4", "orig.tar.gz", "src.tar.gz", "src.rpm", "src.tgz", "svnr\d+.tar.bz2", "stable.tar.gz", "src.rpm")
295 sinterstr = "(?P<name>%s?)v?(?P<ver>%s)(\-source)?" % (prefix, ver_regex)
296 sdirstr = "(?P<name>%s)\.?v?(?P<ver>%s)(\-source)?[\.\-](?P<type>%s$)" % (prefix, ver_regex, suffix)
297
298 def parse_inter(s):
299 m = re.search(sinterstr, s)
300 if not m:
301 return None
302 else:
303 return (m.group('name'), m.group('ver'), "")
304
305 def parse_dir(s):
306 m = re.search(sdirstr, s)
307 if not m:
308 return None
309 else:
310 return (m.group('name'), m.group('ver'), m.group('type'))
311
312 def modelate_version(version):
313 if version[0] in ['.', '-']:
314 if version[1].isdigit():
315 version = version[1] + version[0] + version[2:len(version)]
316 else:
317 version = version[1:len(version)]
318
319 version = re.sub('\-', '.', version)
320 version = re.sub('_', '.', version)
321 version = re.sub('(rc)+', '.-1.', version)
322 version = re.sub('(alpha)+', '.-3.', version)
323 version = re.sub('(beta)+', '.-2.', version)
324 if version[0] == 'v':
325 version = version[1:len(version)]
326 return version
327
328 """
329 Check whether 'new' is newer than 'old' version. We use existing vercmp() for the
330 purpose. PE is cleared in comparison as it's not for build, and PV is cleared too
331 for simplicity as it's somehow difficult to get from various upstream format
332 """
333 def __vercmp(old, new):
334 (on, ov, ot) = old
335 (en, ev, et) = new
336 if on != en or (et and et not in suffixtuple):
337 return False
338 ov = modelate_version(ov)
339 ev = modelate_version(ev)
340
341 result = bb.utils.vercmp(("0", ov, ""), ("0", ev, ""))
342 if result < 0:
343 return True
344 else:
345 return False
346
347 """
348 wrapper for fetch upstream directory info
349 'url' - upstream link customized by regular expression
350 'd' - database
351 'tmpf' - tmpfile for fetcher output
352 We don't want to exit whole build due to one recipe error. So handle all exceptions
353 gracefully w/o leaking to outer.
354 """
355 def internal_fetch_wget(url, ud, d, tmpf):
356 status = "ErrFetchUnknown"
357
358 agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
359 fetchcmd = "/usr/bin/env wget -t 1 --passive-ftp -O %s --user-agent=\"%s\" '%s'" % (tmpf.name, agent, url)
360 try:
361 fetcher = bb.fetch2.wget.Wget(d)
362 fetcher._runwget(ud, d, fetchcmd, True)
363 status = "SUCC"
364 except bb.fetch2.BBFetchException, e:
365 status = "ErrFetch"
366
367 return status
368
369 """
370 Check on middle version directory such as "2.4/" in "http://xxx/2.4/pkg-2.4.1.tar.gz",
371 'url' - upstream link customized by regular expression
372 'd' - database
373 'curver' - current version
374 Return new version if success, or else error in "Errxxxx" style
375 """
376 def check_new_dir(url, curver, ud, d):
377 pn = d.getVar('PN', True)
378 f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-1-" % pn)
379 status = internal_fetch_wget(url, ud, d, f)
380 fhtml = f.read()
381 if status == "SUCC" and len(fhtml):
382 newver = parse_inter(curver)
383
384 """
385 match "*4.1/">*4.1/ where '*' matches chars
386 N.B. add package name, only match for digits
387 """
388 regex = d.getVar('REGEX', True)
389 if regex == '':
390 regex = "^%s" %prefix
391 m = re.search("^%s" % regex, curver)
392 if m:
393 s = "%s[^\d\"]*?(\d+[\.\-_])+\d+/?" % m.group()
394 else:
395 s = "(\d+[\.\-_])+\d+/?"
396
397 searchstr = "[hH][rR][eE][fF]=\"%s\">" % s
398
399 reg = re.compile(searchstr)
400 valid = 0
401 for line in fhtml.split("\n"):
402 if line.find(curver) >= 0:
403 valid = 1
404 m = reg.search(line)
405 if m:
406 ver = m.group().split("\"")[1]
407 ver = ver.strip("/")
408 ver = parse_inter(ver)
409 if ver and __vercmp(newver, ver) == True:
410 newver = ver
411
412 """Expect a match for curver in directory list, or else it indicates unknown format"""
413 if not valid:
414 status = "ErrParseInterDir"
415 else:
416 """rejoin the path name"""
417 status = newver[0] + newver[1]
418 elif not len(fhtml):
419 status = "ErrHostNoDir"
420
421 f.close()
422 if status != "ErrHostNoDir" and re.match("Err", status):
423 logpath = d.getVar('LOG_DIR', True)
424 subprocess.call("cp %s %s/" % (f.name, logpath), shell=True)
425 os.unlink(f.name)
426 return status
427
428 """
429 Check on the last directory to search '2.4.1' in "http://xxx/2.4/pkg-2.4.1.tar.gz",
430 'url' - upstream link customized by regular expression
431 'd' - database
432 'curname' - current package name
433 Return new version if success, or else error in "Errxxxx" style
434 """
435 def check_new_version(url, curname, ud, d):
436 """possible to have no version in pkg name, such as spectrum-fw"""
437 if not re.search("\d+", curname):
438 return pcurver
439 pn = d.getVar('PN', True)
440 newver_regex = d.getVar('REGEX', True)
441 f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-2-" % pn)
442 status = internal_fetch_wget(url, ud, d, f)
443 fhtml = f.read()
444
445 if status == "SUCC" and len(fhtml):
446 newver = parse_dir(curname)
447
448 if not newver_regex:
449 """this is the default matching pattern, if recipe does not """
450 """provide a regex expression """
451 """match "{PN}-5.21.1.tar.gz">{PN}-5.21.1.tar.gz """
452 pn1 = re.search("^%s" % prefix, curname).group()
453 s = "[^\"]*%s[^\d\"]*?(\d+[\.\-_])+[^\"]*" % pn1
454 searchstr = "[hH][rR][eE][fF]=\"%s\".*[>\"]" % s
455 reg = searchstr
456 else:
457 reg = newver_regex
458 valid = 0
459 count = 0
460 for line in fhtml.split("\n"):
461 if pn == 'kconfig-frontends':
462 m = re.findall(reg, line)
463 if m:
464 valid = 1
465 for match in m:
466 (on, ov, oe) = newver
467 ver = (on, match[0], oe)
468 if ver and __vercmp(newver, ver) == True:
469 newver = ver
470 continue
471 count += 1
472 m = re.search(reg, line)
473 if m:
474 valid = 1
475 if not newver_regex:
476 ver = m.group().split("\"")[1].split("/")[-1]
477 if ver == "download":
478 ver = m.group().split("\"")[1].split("/")[-2]
479 ver = parse_dir(ver)
480 else:
481 """ we cheat a little here, but we assume that the
482 regular expression in the recipe will extract exacly
483 the version """
484 (on, ov, oe) = newver
485 ver = (on, m.group('pver'), oe)
486 if ver and __vercmp(newver, ver) == True:
487 newver = ver
488 """Expect a match for curver in directory list, or else it indicates unknown format"""
489 if not valid:
490 status = "ErrParseDir"
491 else:
492 """newver still contains a full package name string"""
493 status = re.sub('_', '.', newver[1])
494 elif not len(fhtml):
495 status = "ErrHostNoDir"
496
497 f.close()
498 """if host hasn't directory information, no need to save tmp file"""
499 if status != "ErrHostNoDir" and re.match("Err", status):
500 logpath = d.getVar('LOG_DIR', True)
501 subprocess.call("cp %s %s/" % (f.name, logpath), shell=True)
502 os.unlink(f.name)
503 return status
504
505 """first check whether a uri is provided"""
506 src_uri = d.getVar('SRC_URI', True)
507 if not src_uri:
508 return
509
510 """initialize log files."""
511 logpath = d.getVar('LOG_DIR', True)
512 bb.utils.mkdirhier(logpath)
513 logfile = os.path.join(logpath, "checkpkg.csv")
514
515 """generate package information from .bb file"""
516 pname = d.getVar('PN', True)
517
518 if pname.find("-native") != -1:
519 if d.getVar('BBCLASSEXTEND', True):
520 return
521 pnstripped = pname.split("-native")
522 bb.note("Native Split: %s" % pnstripped)
523 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
524 bb.data.update_data(localdata)
525
526 if pname.startswith("nativesdk-"):
527 if d.getVar('BBCLASSEXTEND', True):
528 return
529 pnstripped = pname.replace("nativesdk-", "")
530 bb.note("NativeSDK Split: %s" % pnstripped)
531 localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES', True))
532 bb.data.update_data(localdata)
533
534 if pname.find("-cross") != -1:
535 pnstripped = pname.split("-cross")
536 bb.note("cross Split: %s" % pnstripped)
537 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
538 bb.data.update_data(localdata)
539
540 if pname.find("-initial") != -1:
541 pnstripped = pname.split("-initial")
542 bb.note("initial Split: %s" % pnstripped)
543 localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
544 bb.data.update_data(localdata)
545
546 chk_uri = d.getVar('REGEX_URI', True)
547 if not chk_uri:
548 chk_uri = src_uri
549 pdesc = localdata.getVar('DESCRIPTION', True)
550 pgrp = localdata.getVar('SECTION', True)
551 if localdata.getVar('PRSPV', True):
552 pversion = localdata.getVar('PRSPV', True)
553 else:
554 pversion = localdata.getVar('PV', True)
555 plicense = localdata.getVar('LICENSE', True)
556 psection = localdata.getVar('SECTION', True)
557 phome = localdata.getVar('HOMEPAGE', True)
558 prelease = localdata.getVar('PR', True)
559 pdepends = localdata.getVar('DEPENDS', True)
560 pbugtracker = localdata.getVar('BUGTRACKER', True)
561 ppe = localdata.getVar('PE', True)
562 psrcuri = localdata.getVar('SRC_URI', True)
563 maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
564
565 found = 0
566 for uri in src_uri.split():
567 m = re.compile('(?P<type>[^:]*)').match(uri)
568 if not m:
569 raise MalformedUrl(uri)
570 elif m.group('type') in ('http', 'https', 'ftp', 'cvs', 'svn', 'git'):
571 found = 1
572 pproto = m.group('type')
573 break
574 if not found:
575 pproto = "file"
576 pupver = "N/A"
577 pstatus = "ErrUnknown"
578
579 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(uri)
580 if type in ['http', 'https', 'ftp']:
581 if d.getVar('PRSPV', True):
582 pcurver = d.getVar('PRSPV', True)
583 else:
584 pcurver = d.getVar('PV', True)
585 else:
586 if d.getVar('PRSPV', True):
587 pcurver = d.getVar('PRSPV', True)
588 else:
589 pcurver = d.getVar("SRCREV", True)
590
591
592 if type in ['http', 'https', 'ftp']:
593 ud = bb.fetch2.FetchData(uri, d)
594 newver = pcurver
595 altpath = path
596 dirver = "-"
597 curname = "-"
598
599 """
600 match version number amid the path, such as "5.7" in:
601 http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz
602 N.B. how about sth. like "../5.7/5.8/..."? Not find such example so far :-P
603 """
604 m = re.search(r"[^/]*(\d+\.)+\d+([\-_]r\d+)*/", path)
605 if m:
606 altpath = path.split(m.group())[0]
607 dirver = m.group().strip("/")
608
609 """use new path and remove param. for wget only param is md5sum"""
610 alturi = bb.fetch.encodeurl([type, host, altpath, user, pswd, {}])
611 my_uri = d.getVar('REGEX_URI', True)
612 if my_uri:
613 if d.getVar('PRSPV', True):
614 newver = d.getVar('PRSPV', True)
615 else:
616 newver = d.getVar('PV', True)
617 else:
618 newver = check_new_dir(alturi, dirver, ud, d)
619 altpath = path
620 if not re.match("Err", newver) and dirver != newver:
621 altpath = altpath.replace(dirver, newver, True)
622 # For folder in folder cases - try to enter the folder again and then try parsing
623 """Now try to acquire all remote files in current directory"""
624 if not re.match("Err", newver):
625 curname = altpath.split("/")[-1]
626
627 """get remote name by skipping pacakge name"""
628 m = re.search(r"/.*/", altpath)
629 if not m:
630 altpath = "/"
631 else:
632 altpath = m.group()
633
634 chk_uri = d.getVar('REGEX_URI', True)
635 if not chk_uri:
636 alturi = bb.fetch.encodeurl([type, host, altpath, user, pswd, {}])
637 else:
638 alturi = chk_uri
639 newver = check_new_version(alturi, curname, ud, d)
640 while(newver == "ErrHostNoDir"):
641 if alturi == "/download":
642 break
643 else:
644 alturi = "/".join(alturi.split("/")[0:-2]) + "/download"
645 newver = check_new_version(alturi, curname, ud, d)
646 if not re.match("Err", newver):
647 pupver = newver
648 if pupver != pcurver:
649 pstatus = "UPDATE"
650 else:
651 pstatus = "MATCH"
652
653 if re.match("Err", newver):
654 pstatus = newver + ":" + altpath + ":" + dirver + ":" + curname
655 elif type == 'git':
656 if user:
657 gituser = user + '@'
658 else:
659 gituser = ""
660
661 if 'protocol' in parm:
662 gitproto = parm['protocol']
663 else:
664 gitproto = "git"
665
666 # Get all tags and HEAD
667 if d.getVar('GIT_REGEX', True):
668 gitcmd = "git ls-remote %s://%s%s%s %s 2>&1" % (gitproto, gituser, host, path, d.getVar('GIT_REGEX', True))
669 else:
670 gitcmd = "git ls-remote %s://%s%s%s *tag* 2>&1" % (gitproto, gituser, host, path)
671 gitcmd2 = "git ls-remote %s://%s%s%s HEAD 2>&1" % (gitproto, gituser, host, path)
672
673 tmp = os.popen(gitcmd).read()
674 if 'unable to connect' in tmp:
675 tmp = None
676 tmp2 = os.popen(gitcmd2).read()
677 if 'unable to connect' in tmp2:
678 tmp2 = None
679 #This is for those repos have tag like: refs/tags/1.2.2
680 phash = pversion.rsplit("+")[-1]
681 if tmp:
682 tmpline = tmp.split("\n")
683 verflag = 0
684 pupver = pversion
685 for line in tmpline:
686 if len(line)==0:
687 break;
688 puptag = line.split("/")[-1]
689 upstr_regex = d.getVar('REGEX', True)
690 if upstr_regex:
691 puptag = re.search(upstr_regex, puptag)
692 else:
693 puptag = re.search("(?P<pver>([0-9][\.|_]?)+)", puptag)
694 if puptag == None:
695 continue
696 puptag = puptag.group('pver')
697 puptag = re.sub("_",".",puptag)
698 plocaltag = pupver.split("+git")[0]
699 if "git" in plocaltag:
700 plocaltag = plocaltag.split("-")[0]
701 result = bb.utils.vercmp(("0", puptag, ""), ("0", plocaltag, ""))
702
703 if result > 0:
704 verflag = 1
705 pupver = puptag
706 elif verflag == 0 :
707 pupver = plocaltag
708 #This is for those no tag repo
709 elif tmp2:
710 pupver = pversion.rsplit("+")[0]
711 phash = pupver
712 else:
713 pstatus = "ErrGitAccess"
714 if not ('ErrGitAccess' in pstatus):
715
716 latest_head = tmp2.rsplit("\t")[0][:7]
717 tmp3 = re.search('(?P<git_ver>(\d+[\.-]?)+)(?P<git_prefix>(\+git[r|\-|]?)AUTOINC\+)(?P<head_md5>([\w|_]+))', pversion)
718 tmp4 = re.search('(?P<git_ver>(\d+[\.-]?)+)(?P<git_prefix>(\+git[r|\-|]?)AUTOINC\+)(?P<head_md5>([\w|_]+))', pupver)
719 if not tmp4:
720 tmp4 = re.search('(?P<git_ver>(\d+[\.-]?)+)', pupver)
721
722 if tmp3:
723 # Get status of the package - MATCH/UPDATE
724 result = bb.utils.vercmp(("0", tmp3.group('git_ver'), ""), ("0",tmp3.group('git_ver') , ""))
725 # Get the latest tag
726 pstatus = 'MATCH'
727 if result < 0:
728 latest_pv = tmp3.group('git_ver')
729 else:
730 latest_pv = pupver
731 if not(tmp3.group('head_md5')[:7] in latest_head) or not(latest_head in tmp3.group('head_md5')[:7]):
732 pstatus = 'UPDATE'
733
734 git_prefix = tmp3.group('git_prefix')
735 pupver = latest_pv + tmp3.group('git_prefix') + latest_head
736 else:
737 if not tmp3:
738 bb.plain("#DEBUG# Package %s: current version (%s) doesn't match the usual pattern" %(pname, pversion))
739 elif type == 'svn':
740 ud = bb.fetch2.FetchData(uri, d)
741
742 svnFetcher = bb.fetch2.svn.Svn(d)
743 svnFetcher.urldata_init(ud, d)
744 try:
745 pupver = svnFetcher.latest_revision(ud, d, ud.names[0])
746 except bb.fetch2.FetchError:
747 pstatus = "ErrSvnAccess"
748
749 if pupver:
750 if pupver in pversion:
751 pstatus = "MATCH"
752 else:
753 pstatus = "UPDATE"
754 else:
755 pstatus = "ErrSvnAccess"
756
757 if 'rev' in ud.parm:
758 pcurver = ud.parm['rev']
759
760 if pstatus != "ErrSvnAccess":
761 tag = pversion.rsplit("+svn")[0]
762 svn_prefix = re.search('(\+svn[r|\-]?)', pversion)
763 if tag and svn_prefix:
764 pupver = tag + svn_prefix.group() + pupver
765
766 elif type == 'cvs':
767 pupver = "HEAD"
768 pstatus = "UPDATE"
769 elif type == 'file':
770 """local file is always up-to-date"""
771 pupver = pcurver
772 pstatus = "MATCH"
773 else:
774 pstatus = "ErrUnsupportedProto"
775
776 if re.match("Err", pstatus):
777 pstatus += ":%s%s" % (host, path)
778
779 """Read from manual distro tracking fields as alternative"""
780 pmver = d.getVar("RECIPE_UPSTREAM_VERSION", True)
781 if not pmver:
782 pmver = "N/A"
783 pmstatus = "ErrNoRecipeData"
784 else:
785 if pmver == pcurver:
786 pmstatus = "MATCH"
787 else:
788 pmstatus = "UPDATE"
789
790 psrcuri = psrcuri.split()[0]
791 pdepends = "".join(pdepends.split("\t"))
792 pdesc = "".join(pdesc.split("\t"))
793 no_upgr_reason = d.getVar('RECIPE_NO_UPDATE_REASON', True)
794 lf = bb.utils.lockfile("%s.lock" % logfile)
795 f = open(logfile, "a")
796 f.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % \
797 (pname,pversion,pupver,plicense,psection, phome,prelease, pdepends,pbugtracker,ppe,pdesc,pstatus,pmver,psrcuri,maintainer, no_upgr_reason))
798 f.close()
799 bb.utils.unlockfile(lf)
800}
801
802addtask checkpkgall after do_checkpkg
803do_checkpkgall[recrdeptask] = "do_checkpkgall do_checkpkg"
804do_checkpkgall[recideptask] = "do_${BB_DEFAULT_TASK}"
805do_checkpkgall[nostamp] = "1"
806do_checkpkgall() {
807 :
808}
809
810addhandler distro_check_eventhandler
811distro_check_eventhandler[eventmask] = "bb.event.BuildStarted"
812python distro_check_eventhandler() {
813 """initialize log files."""
814 import oe.distro_check as dc
815 result_file = dc.create_log_file(e.data, "distrocheck.csv")
816 return
817}
818
819addtask distro_check
820do_distro_check[nostamp] = "1"
821python do_distro_check() {
822 """checks if the package is present in other public Linux distros"""
823 import oe.distro_check as dc
824 import shutil
825 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk',d):
826 return
827
828 localdata = bb.data.createCopy(d)
829 bb.data.update_data(localdata)
830 tmpdir = d.getVar('TMPDIR', True)
831 distro_check_dir = os.path.join(tmpdir, "distro_check")
832 logpath = d.getVar('LOG_DIR', True)
833 bb.utils.mkdirhier(logpath)
834 result_file = os.path.join(logpath, "distrocheck.csv")
835 datetime = localdata.getVar('DATETIME', True)
836 dc.update_distro_data(distro_check_dir, datetime)
837
838 # do the comparison
839 result = dc.compare_in_distro_packages_list(distro_check_dir, d)
840
841 # save the results
842 dc.save_distro_check_result(result, datetime, result_file, d)
843}
844
845addtask distro_checkall after do_distro_check
846do_distro_checkall[recrdeptask] = "do_distro_checkall do_distro_check"
847do_distro_checkall[recideptask] = "do_${BB_DEFAULT_TASK}"
848do_distro_checkall[nostamp] = "1"
849do_distro_checkall() {
850 :
851}
852#
853#Check Missing License Text.
854#Use this task to generate the missing license text data for pkg-report system,
855#then we can search those recipes which license text isn't exsit in common-licenses directory
856#
857addhandler checklicense_eventhandler
858checklicense_eventhandler[eventmask] = "bb.event.BuildStarted"
859python checklicense_eventhandler() {
860 """initialize log files."""
861 import oe.distro_check as dc
862 logfile = dc.create_log_file(e.data, "missinglicense.csv")
863 lf = bb.utils.lockfile("%s.lock" % logfile)
864 f = open(logfile, "a")
865 f.write("Package\tLicense\tMissingLicense\n")
866 f.close()
867 bb.utils.unlockfile(lf)
868 return
869}
870
871addtask checklicense
872do_checklicense[nostamp] = "1"
873python do_checklicense() {
874 import shutil
875 logpath = d.getVar('LOG_DIR', True)
876 bb.utils.mkdirhier(logpath)
877 pn = d.getVar('PN', True)
878 logfile = os.path.join(logpath, "missinglicense.csv")
879 generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
880 license_types = d.getVar('LICENSE', True)
881 for license_type in ((license_types.replace('+', '').replace('|', '&')
882 .replace('(', '').replace(')', '').replace(';', '')
883 .replace(',', '').replace(" ", "").split("&"))):
884 if not os.path.isfile(os.path.join(generic_directory, license_type)):
885 lf = bb.utils.lockfile("%s.lock" % logfile)
886 f = open(logfile, "a")
887 f.write("%s\t%s\t%s\n" % \
888 (pn,license_types,license_type))
889 f.close()
890 bb.utils.unlockfile(lf)
891 return
892}
893
894addtask checklicenseall after do_checklicense
895do_checklicenseall[recrdeptask] = "do_checklicenseall do_checklicense"
896do_checklicenseall[recideptask] = "do_${BB_DEFAULT_TASK}"
897do_checklicenseall[nostamp] = "1"
898do_checklicenseall() {
899 :
900}
901
902
diff --git a/meta/classes/distutils-base.bbclass b/meta/classes/distutils-base.bbclass
new file mode 100644
index 0000000000..aa18e8b292
--- /dev/null
+++ b/meta/classes/distutils-base.bbclass
@@ -0,0 +1,4 @@
1DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}"
2RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
3
4inherit distutils-common-base pythonnative
diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/distutils-common-base.bbclass
new file mode 100644
index 0000000000..9a608eb63e
--- /dev/null
+++ b/meta/classes/distutils-common-base.bbclass
@@ -0,0 +1,24 @@
1inherit python-dir
2
3EXTRA_OEMAKE = ""
4
5export STAGING_INCDIR
6export STAGING_LIBDIR
7
8PACKAGES = "${PN}-staticdev ${PN}-dev ${PN}-dbg ${PN}-doc ${PN}"
9
10FILES_${PN} = "${bindir}/* ${libdir}/* ${libdir}/${PYTHON_DIR}/*"
11
12FILES_${PN}-staticdev += "\
13 ${PYTHON_SITEPACKAGES_DIR}/*.a \
14"
15FILES_${PN}-dev += "\
16 ${datadir}/pkgconfig \
17 ${libdir}/pkgconfig \
18 ${PYTHON_SITEPACKAGES_DIR}/*.la \
19"
20FILES_${PN}-dbg += "\
21 ${PYTHON_SITEPACKAGES_DIR}/.debug \
22 ${PYTHON_SITEPACKAGES_DIR}/*/.debug \
23 ${PYTHON_SITEPACKAGES_DIR}/*/*/.debug \
24"
diff --git a/meta/classes/distutils-native-base.bbclass b/meta/classes/distutils-native-base.bbclass
new file mode 100644
index 0000000000..509cb9551a
--- /dev/null
+++ b/meta/classes/distutils-native-base.bbclass
@@ -0,0 +1,3 @@
1inherit distutils-common-base
2
3DEPENDS += "${@["${PYTHON_PN}-native", ""][(d.getVar('PACKAGES', True) == '')]}"
diff --git a/meta/classes/distutils-tools.bbclass b/meta/classes/distutils-tools.bbclass
new file mode 100644
index 0000000000..f43450e56f
--- /dev/null
+++ b/meta/classes/distutils-tools.bbclass
@@ -0,0 +1,77 @@
1DISTUTILS_BUILD_ARGS ?= ""
2DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
3DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
4 --install-data=${STAGING_DATADIR}"
5DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
6 --install-data=${D}/${datadir}"
7
8distutils_do_compile() {
9 STAGING_INCDIR=${STAGING_INCDIR} \
10 STAGING_LIBDIR=${STAGING_LIBDIR} \
11 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
12 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
13 bbfatal "${PYTHON_PN} setup.py build_ext execution failed."
14}
15
16distutils_stage_headers() {
17 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
18 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
19 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
20 bbfatal "${PYTHON_PN} setup.py install_headers execution failed."
21}
22
23distutils_stage_all() {
24 STAGING_INCDIR=${STAGING_INCDIR} \
25 STAGING_LIBDIR=${STAGING_LIBDIR} \
26 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
27 PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
28 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
29 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
30 bbfatal "${PYTHON_PN} setup.py install (stage) execution failed."
31}
32
33distutils_do_install() {
34 echo "Beginning ${PN} Install ..."
35 install -d ${D}${PYTHON_SITEPACKAGES_DIR}
36 echo "Step 2 of ${PN} Install ..."
37 STAGING_INCDIR=${STAGING_INCDIR} \
38 STAGING_LIBDIR=${STAGING_LIBDIR} \
39 PYTHONPATH=${D}/${PYTHON_SITEPACKAGES_DIR} \
40 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
41 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
42 bbfatal "${PYTHON_PN} setup.py install execution failed."
43
44 echo "Step 3 of ${PN} Install ..."
45 # support filenames with *spaces*
46 find ${D} -name "*.py" -print0 | while read -d $'\0' i ; do \
47 sed -i -e s:${D}::g $i
48 done
49
50 echo "Step 4 of ${PN} Install ..."
51 if test -e ${D}${bindir} ; then
52 for i in ${D}${bindir}/* ; do \
53 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
54 done
55 fi
56
57 echo "Step 4 of ${PN} Install ..."
58 if test -e ${D}${sbindir}; then
59 for i in ${D}${sbindir}/* ; do \
60 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
61 done
62 fi
63
64 echo "Step 5 of ${PN} Install ..."
65 rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
66
67 #
68 # FIXME: Bandaid against wrong datadir computation
69 #
70 if test -e ${D}${datadir}/share; then
71 mv -f ${D}${datadir}/share/* ${D}${datadir}/
72 fi
73}
74
75#EXPORT_FUNCTIONS do_compile do_install
76
77export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/distutils.bbclass b/meta/classes/distutils.bbclass
new file mode 100644
index 0000000000..6ed7ecc99f
--- /dev/null
+++ b/meta/classes/distutils.bbclass
@@ -0,0 +1,80 @@
1inherit distutils-base
2
3DISTUTILS_BUILD_ARGS ?= ""
4DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
5DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
6 --install-data=${STAGING_DATADIR}"
7DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
8 --install-data=${D}/${datadir}"
9
10distutils_do_compile() {
11 STAGING_INCDIR=${STAGING_INCDIR} \
12 STAGING_LIBDIR=${STAGING_LIBDIR} \
13 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
14 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
15 bbfatal "${PYTHON_PN} setup.py build_ext execution failed."
16}
17
18distutils_stage_headers() {
19 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
20 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
21 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
22 bbfatal "${PYTHON_PN} setup.py install_headers execution failed."
23}
24
25distutils_stage_all() {
26 STAGING_INCDIR=${STAGING_INCDIR} \
27 STAGING_LIBDIR=${STAGING_LIBDIR} \
28 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
29 PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
30 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
31 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
32 bbfatal "${PYTHON_PN} setup.py install (stage) execution failed."
33}
34
35distutils_do_install() {
36 install -d ${D}${PYTHON_SITEPACKAGES_DIR}
37 STAGING_INCDIR=${STAGING_INCDIR} \
38 STAGING_LIBDIR=${STAGING_LIBDIR} \
39 PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
40 BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
41 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
42 bbfatal "${PYTHON_PN} setup.py install execution failed."
43
44 # support filenames with *spaces*
45 # only modify file if it contains path to avoid recompilation on the target
46 find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \;
47
48 if test -e ${D}${bindir} ; then
49 for i in ${D}${bindir}/* ; do \
50 if [ ${PN} != "${BPN}-native" ]; then
51 sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i
52 fi
53 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
54 done
55 fi
56
57 if test -e ${D}${sbindir}; then
58 for i in ${D}${sbindir}/* ; do \
59 if [ ${PN} != "${BPN}-native" ]; then
60 sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${bindir}/env\ python:g $i
61 fi
62 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
63 done
64 fi
65
66 rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
67 rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/site.py*
68
69 #
70 # FIXME: Bandaid against wrong datadir computation
71 #
72 if test -e ${D}${datadir}/share; then
73 mv -f ${D}${datadir}/share/* ${D}${datadir}/
74 rmdir ${D}${datadir}/share
75 fi
76}
77
78EXPORT_FUNCTIONS do_compile do_install
79
80export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/distutils3-base.bbclass b/meta/classes/distutils3-base.bbclass
new file mode 100644
index 0000000000..d4d25dccb9
--- /dev/null
+++ b/meta/classes/distutils3-base.bbclass
@@ -0,0 +1,8 @@
1DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}"
2RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
3
4PYTHON_BASEVERSION = "3.3"
5PYTHON_ABI = "m"
6
7inherit distutils-common-base python3native
8
diff --git a/meta/classes/distutils3-native-base.bbclass b/meta/classes/distutils3-native-base.bbclass
new file mode 100644
index 0000000000..ed3fe54587
--- /dev/null
+++ b/meta/classes/distutils3-native-base.bbclass
@@ -0,0 +1,4 @@
1PYTHON_BASEVERSION = "3.3"
2PYTHON_ABI = "m"
3
4inherit distutils-native-base
diff --git a/meta/classes/distutils3.bbclass b/meta/classes/distutils3.bbclass
new file mode 100644
index 0000000000..e909ef41b6
--- /dev/null
+++ b/meta/classes/distutils3.bbclass
@@ -0,0 +1,96 @@
1inherit distutils3-base
2
3DISTUTILS_BUILD_ARGS ?= ""
4DISTUTILS_BUILD_EXT_ARGS ?= ""
5DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
6DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
7 --install-data=${STAGING_DATADIR}"
8DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
9 --install-data=${D}/${datadir}"
10
11distutils3_do_compile() {
12 if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
13 SYS=${MACHINE}
14 else
15 SYS=${HOST_SYS}
16 fi
17 STAGING_INCDIR=${STAGING_INCDIR} \
18 STAGING_LIBDIR=${STAGING_LIBDIR} \
19 BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
20 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
21 build ${DISTUTILS_BUILD_ARGS} || \
22 bbfatal "${PYTHON_PN} setup.py build_ext execution failed."
23}
24
25distutils3_stage_headers() {
26 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
27 if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
28 SYS=${MACHINE}
29 else
30 SYS=${HOST_SYS}
31 fi
32 BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
33 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
34 bbfatal "${PYTHON_PN} setup.py install_headers execution failed."
35}
36
37distutils3_stage_all() {
38 if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
39 SYS=${MACHINE}
40 else
41 SYS=${HOST_SYS}
42 fi
43 STAGING_INCDIR=${STAGING_INCDIR} \
44 STAGING_LIBDIR=${STAGING_LIBDIR} \
45 install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
46 PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
47 BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
48 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
49 bbfatal "${PYTHON_PN} setup.py install (stage) execution failed."
50}
51
52distutils3_do_install() {
53 install -d ${D}${PYTHON_SITEPACKAGES_DIR}
54 if [ ${BUILD_SYS} != ${HOST_SYS} ]; then
55 SYS=${MACHINE}
56 else
57 SYS=${HOST_SYS}
58 fi
59 STAGING_INCDIR=${STAGING_INCDIR} \
60 STAGING_LIBDIR=${STAGING_LIBDIR} \
61 PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
62 BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
63 ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
64 bbfatal "${PYTHON_PN} setup.py install execution failed."
65
66 # support filenames with *spaces*
67 find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \;
68
69 if test -e ${D}${bindir} ; then
70 for i in ${D}${bindir}/* ; do \
71 sed -i -e s:${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}:${bindir}/env\ ${PYTHON_PN}:g $i
72 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
73 done
74 fi
75
76 if test -e ${D}${sbindir}; then
77 for i in ${D}${sbindir}/* ; do \
78 sed -i -e s:${STAGING_BINDIR_NATIVE}/python-${PYTHON_PN}/${PYTHON_PN}:${bindir}/env\ ${PYTHON_PN}:g $i
79 sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
80 done
81 fi
82
83 rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
84
85 #
86 # FIXME: Bandaid against wrong datadir computation
87 #
88 if test -e ${D}${datadir}/share; then
89 mv -f ${D}${datadir}/share/* ${D}${datadir}/
90 rmdir ${D}${datadir}/share
91 fi
92}
93
94EXPORT_FUNCTIONS do_compile do_install
95
96export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
new file mode 100644
index 0000000000..2ac62747a2
--- /dev/null
+++ b/meta/classes/externalsrc.bbclass
@@ -0,0 +1,53 @@
1# Copyright (C) 2012 Linux Foundation
2# Author: Richard Purdie
3# Some code and influence taken from srctree.bbclass:
4# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
5# Released under the MIT license (see COPYING.MIT for the terms)
6#
7# externalsrc.bbclass enables use of an existing source tree, usually external to
8# the build system to build a piece of software rather than the usual fetch/unpack/patch
9# process.
10#
11# To use, add externalsrc to the global inherit and set EXTERNALSRC to point at the
12# directory you want to use containing the sources e.g. from local.conf for a recipe
13# called "myrecipe" you would do:
14#
15# INHERIT += "externalsrc"
16# EXTERNALSRC_pn-myrecipe = "/path/to/my/source/tree"
17#
18# In order to make this class work for both target and native versions (or with
19# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
20# directory under the work directory (split source and build directories). This is
21# the default, but the build directory can be set to the source directory if
22# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
23#
24# EXTERNALSRC_BUILD_pn-myrecipe = "/path/to/my/source/tree"
25#
26
27SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
28
29python () {
30 externalsrc = d.getVar('EXTERNALSRC', True)
31 if externalsrc:
32 d.setVar('S', externalsrc)
33 externalsrcbuild = d.getVar('EXTERNALSRC_BUILD', True)
34 if externalsrcbuild:
35 d.setVar('B', externalsrcbuild)
36 else:
37 d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
38 d.setVar('SRC_URI', '')
39
40 tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
41
42 for task in tasks:
43 if task.endswith("_setscene"):
44 # sstate is never going to work for external source trees, disable it
45 bb.build.deltask(task, d)
46 else:
47 # Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
48 d.appendVarFlag(task, "lockfiles", "${S}/singletask.lock")
49
50 for task in d.getVar("SRCTREECOVEREDTASKS", True).split():
51 bb.build.deltask(task, d)
52}
53
diff --git a/meta/classes/extrausers.bbclass b/meta/classes/extrausers.bbclass
new file mode 100644
index 0000000000..faf57b108e
--- /dev/null
+++ b/meta/classes/extrausers.bbclass
@@ -0,0 +1,65 @@
1# This bbclass is mainly used for image level user/group configuration.
2# Inherit this class if you want to make EXTRA_USERS_PARAMS effective.
3
4# Below is an example showing how to use this functionality.
5# INHERIT += "extrausers"
6# EXTRA_USERS_PARAMS = "\
7# useradd -p '' tester; \
8# groupadd developers; \
9# userdel nobody; \
10# groupdel -g video; \
11# groupmod -g 1020 developers; \
12# usermod -s /bin/sh tester; \
13# "
14
15
16inherit useradd_base
17
18IMAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS', True))]}"
19
20# Image level user / group settings
21ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group;"
22
23# Image level user / group settings
24set_user_group () {
25 user_group_settings="${EXTRA_USERS_PARAMS}"
26 export PSEUDO="${FAKEROOTENV} ${STAGING_DIR_NATIVE}${bindir}/pseudo"
27 setting=`echo $user_group_settings | cut -d ';' -f1`
28 remaining=`echo $user_group_settings | cut -d ';' -f2-`
29 while test "x$setting" != "x"; do
30 cmd=`echo $setting | cut -d ' ' -f1`
31 opts=`echo $setting | cut -d ' ' -f2-`
32 # Different from useradd.bbclass, there's no file locking issue here, as
33 # this setting is actually a serial process. So we only retry once.
34 case $cmd in
35 useradd)
36 perform_useradd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
37 ;;
38 groupadd)
39 perform_groupadd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
40 ;;
41 userdel)
42 perform_userdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
43 ;;
44 groupdel)
45 perform_groupdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
46 ;;
47 usermod)
48 perform_usermod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
49 ;;
50 groupmod)
51 perform_groupmod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
52 ;;
53 *)
54 bbfatal "Invalid command in EXTRA_USERS_PARAMS: $cmd"
55 ;;
56 esac
57 # Avoid infinite loop if the last parameter doesn't end with ';'
58 if [ "$setting" = "$remaining" ]; then
59 break
60 fi
61 # iterate to the next setting
62 setting=`echo $remaining | cut -d ';' -f1`
63 remaining=`echo $remaining | cut -d ';' -f2-`
64 done
65}
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
new file mode 100644
index 0000000000..d122387ffd
--- /dev/null
+++ b/meta/classes/fontcache.bbclass
@@ -0,0 +1,45 @@
1#
2# This class will generate the proper postinst/postrm scriptlets for font
3# packages.
4#
5
6DEPENDS += "qemu-native"
7inherit qemu
8
9FONT_PACKAGES ??= "${PN}"
10FONT_EXTRA_RDEPENDS ?= "fontconfig-utils"
11FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
12fontcache_common() {
13if [ "x$D" != "x" ] ; then
14 $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} bindir=${bindir} \
15 libdir=${libdir} base_libdir=${base_libdir} fontconfigcachedir=${FONTCONFIG_CACHE_DIR}
16else
17 fc-cache
18fi
19}
20
21python () {
22 font_pkgs = d.getVar('FONT_PACKAGES', True).split()
23 deps = d.getVar("FONT_EXTRA_RDEPENDS", True)
24
25 for pkg in font_pkgs:
26 if deps: d.appendVar('RDEPENDS_' + pkg, ' '+deps)
27}
28
29python add_fontcache_postinsts() {
30 for pkg in d.getVar('FONT_PACKAGES', True).split():
31 bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
32 postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
33 if not postinst:
34 postinst = '#!/bin/sh\n'
35 postinst += d.getVar('fontcache_common', True)
36 d.setVar('pkg_postinst_%s' % pkg, postinst)
37
38 postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
39 if not postrm:
40 postrm = '#!/bin/sh\n'
41 postrm += d.getVar('fontcache_common', True)
42 d.setVar('pkg_postrm_%s' % pkg, postrm)
43}
44
45PACKAGEFUNCS =+ "add_fontcache_postinsts"
diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass
new file mode 100644
index 0000000000..e9076b2779
--- /dev/null
+++ b/meta/classes/gconf.bbclass
@@ -0,0 +1,70 @@
1DEPENDS += "gconf gconf-native"
2
3# These are for when gconftool is used natively and the prefix isn't necessarily
4# the sysroot. TODO: replicate the postinst logic for -native packages going
5# into sysroot as they won't be running their own install-time schema
6# registration (disabled below) nor the postinst script (as they don't happen).
7export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults"
8export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2"
9
10# Disable install-time schema registration as we're a packaging system so this
11# happens in the postinst script, not at install time. Set both the configure
12# script option and the traditional envionment variable just to make sure.
13EXTRA_OECONF += "--disable-schemas-install"
14export GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL = "1"
15
16gconf_postinst() {
17if [ "x$D" != "x" ]; then
18 export GCONF_CONFIG_SOURCE="xml::$D${sysconfdir}/gconf/gconf.xml.defaults"
19else
20 export GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source`
21fi
22
23SCHEMA_LOCATION=$D/etc/gconf/schemas
24for SCHEMA in ${SCHEMA_FILES}; do
25 if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
26 HOME=$D/root gconftool-2 \
27 --makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
28 fi
29done
30}
31
32gconf_prerm() {
33SCHEMA_LOCATION=/etc/gconf/schemas
34for SCHEMA in ${SCHEMA_FILES}; do
35 if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
36 HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
37 gconftool-2 \
38 --makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
39 fi
40done
41}
42
43python populate_packages_append () {
44 import re
45 packages = d.getVar('PACKAGES', True).split()
46 pkgdest = d.getVar('PKGDEST', True)
47
48 for pkg in packages:
49 schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
50 schemas = []
51 schema_re = re.compile(".*\.schemas$")
52 if os.path.exists(schema_dir):
53 for f in os.listdir(schema_dir):
54 if schema_re.match(f):
55 schemas.append(f)
56 if schemas != []:
57 bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
58 d.setVar('SCHEMA_FILES', " ".join(schemas))
59 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
60 if not postinst:
61 postinst = '#!/bin/sh\n'
62 postinst += d.getVar('gconf_postinst', True)
63 d.setVar('pkg_postinst_%s' % pkg, postinst)
64 prerm = d.getVar('pkg_prerm_%s' % pkg, True)
65 if not prerm:
66 prerm = '#!/bin/sh\n'
67 prerm += d.getVar('gconf_prerm', True)
68 d.setVar('pkg_prerm_%s' % pkg, prerm)
69 d.appendVar("RDEPENDS_%s" % pkg, ' ' + d.getVar('MLPREFIX') + 'gconf')
70}
diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass
new file mode 100644
index 0000000000..03b89b2455
--- /dev/null
+++ b/meta/classes/gettext.bbclass
@@ -0,0 +1,19 @@
1def gettext_dependencies(d):
2 if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
3 return ""
4 if d.getVar('USE_NLS', True) == 'no':
5 return "gettext-minimal-native"
6 return d.getVar('DEPENDS_GETTEXT', False)
7
8def gettext_oeconf(d):
9 if d.getVar('USE_NLS', True) == 'no':
10 return '--disable-nls'
11 # Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set
12 if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
13 return '--disable-nls'
14 return "--enable-nls"
15
16DEPENDS_GETTEXT ??= "virtual/gettext gettext-native"
17
18BASEDEPENDS =+ "${@gettext_dependencies(d)}"
19EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
diff --git a/meta/classes/gnome.bbclass b/meta/classes/gnome.bbclass
new file mode 100644
index 0000000000..0de22dd6d2
--- /dev/null
+++ b/meta/classes/gnome.bbclass
@@ -0,0 +1,5 @@
1inherit gnomebase gtk-icon-cache gconf mime
2
3EXTRA_OECONF += "--disable-introspection"
4
5UNKNOWN_CONFIGURE_WHITELIST += "--disable-introspection"
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
new file mode 100644
index 0000000000..6ca13cb1e0
--- /dev/null
+++ b/meta/classes/gnomebase.bbclass
@@ -0,0 +1,30 @@
1def gnome_verdir(v):
2 return oe.utils.trim_version(v, 2)
3
4GNOME_COMPRESS_TYPE ?= "bz2"
5SECTION ?= "x11/gnome"
6GNOMEBN ?= "${BPN}"
7SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
8
9DEPENDS += "gnome-common-native"
10
11FILES_${PN} += "${datadir}/application-registry \
12 ${datadir}/mime-info \
13 ${datadir}/mime/packages \
14 ${datadir}/mime/application \
15 ${datadir}/gnome-2.0 \
16 ${datadir}/polkit* \
17 ${datadir}/GConf \
18 ${datadir}/glib-2.0/schemas \
19"
20
21FILES_${PN}-doc += "${datadir}/devhelp"
22
23inherit autotools pkgconfig
24
25do_install_append() {
26 rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
27 rm -rf ${D}${localstatedir}/scrollkeeper/*
28 rm -f ${D}${datadir}/applications/*.cache
29}
30
diff --git a/meta/classes/grub-efi.bbclass b/meta/classes/grub-efi.bbclass
new file mode 100644
index 0000000000..47bd35e049
--- /dev/null
+++ b/meta/classes/grub-efi.bbclass
@@ -0,0 +1,141 @@
1# grub-efi.bbclass
2# Copyright (c) 2011, Intel Corporation.
3# All rights reserved.
4#
5# Released under the MIT license (see packages/COPYING)
6
7# Provide grub-efi specific functions for building bootable images.
8
9# External variables
10# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
11# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
12# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
13# ${LABELS} - a list of targets for the automatic config
14# ${APPEND} - an override list of append strings for each label
15# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
16# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
17
18do_bootimg[depends] += "${MLPREFIX}grub-efi:do_deploy"
19do_bootdirectdisk[depends] += "${MLPREFIX}grub-efi:do_deploy"
20
21GRUB_SERIAL ?= "console=ttyS0,115200"
22GRUBCFG = "${S}/grub.cfg"
23GRUB_TIMEOUT ?= "10"
24#FIXME: build this from the machine config
25GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
26
27EFIDIR = "/EFI/BOOT"
28
29efi_populate() {
30 # DEST must be the root of the image so that EFIDIR is not
31 # nested under a top level directory.
32 DEST=$1
33
34 install -d ${DEST}${EFIDIR}
35
36 GRUB_IMAGE="bootia32.efi"
37 if [ "${TARGET_ARCH}" = "x86_64" ]; then
38 GRUB_IMAGE="bootx64.efi"
39 fi
40 install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}
41
42 install -m 0644 ${GRUBCFG} ${DEST}${EFIDIR}
43}
44
45efi_iso_populate() {
46 iso_dir=$1
47 efi_populate $iso_dir
48 # Build a EFI directory to create efi.img
49 mkdir -p ${EFIIMGDIR}/${EFIDIR}
50 cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
51 cp $iso_dir/vmlinuz ${EFIIMGDIR}
52 echo "${GRUB_IMAGE}" > ${EFIIMGDIR}/startup.nsh
53 if [ -f "$iso_dir/initrd" ] ; then
54 cp $iso_dir/initrd ${EFIIMGDIR}
55 fi
56}
57
58efi_hddimg_populate() {
59 efi_populate $1
60}
61
62python build_efi_cfg() {
63 import sys
64
65 workdir = d.getVar('WORKDIR', True)
66 if not workdir:
67 bb.error("WORKDIR not defined, unable to package")
68 return
69
70 gfxserial = d.getVar('GRUB_GFXSERIAL', True) or ""
71
72 labels = d.getVar('LABELS', True)
73 if not labels:
74 bb.debug(1, "LABELS not defined, nothing to do")
75 return
76
77 if labels == []:
78 bb.debug(1, "No labels, nothing to do")
79 return
80
81 cfile = d.getVar('GRUBCFG', True)
82 if not cfile:
83 raise bb.build.FuncFailed('Unable to read GRUBCFG')
84
85 try:
86 cfgfile = file(cfile, 'w')
87 except OSError:
88 raise bb.build.funcFailed('Unable to open %s' % (cfile))
89
90 cfgfile.write('# Automatically created by OE\n')
91
92 opts = d.getVar('GRUB_OPTS', True)
93 if opts:
94 for opt in opts.split(';'):
95 cfgfile.write('%s\n' % opt)
96
97 cfgfile.write('default=%s\n' % (labels.split()[0]))
98
99 timeout = d.getVar('GRUB_TIMEOUT', True)
100 if timeout:
101 cfgfile.write('timeout=%s\n' % timeout)
102 else:
103 cfgfile.write('timeout=50\n')
104
105 if gfxserial == "1":
106 btypes = [ [ " graphics console", "" ],
107 [ " serial console", d.getVar('GRUB_SERIAL', True) or "" ] ]
108 else:
109 btypes = [ [ "", "" ] ]
110
111 for label in labels.split():
112 localdata = d.createCopy()
113
114 overrides = localdata.getVar('OVERRIDES', True)
115 if not overrides:
116 raise bb.build.FuncFailed('OVERRIDES not defined')
117
118 for btype in btypes:
119 localdata.setVar('OVERRIDES', label + ':' + overrides)
120 bb.data.update_data(localdata)
121
122 cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
123 lb = label
124 if label == "install":
125 lb = "install-efi"
126 cfgfile.write('linux /vmlinuz LABEL=%s' % (lb))
127
128 append = localdata.getVar('APPEND', True)
129 initrd = localdata.getVar('INITRD', True)
130
131 if append:
132 cfgfile.write('%s' % (append))
133 cfgfile.write(' %s' % btype[1])
134 cfgfile.write('\n')
135
136 if initrd:
137 cfgfile.write('initrd /initrd')
138 cfgfile.write('\n}\n')
139
140 cfgfile.close()
141}
diff --git a/meta/classes/gsettings.bbclass b/meta/classes/gsettings.bbclass
new file mode 100644
index 0000000000..dec5abc026
--- /dev/null
+++ b/meta/classes/gsettings.bbclass
@@ -0,0 +1,37 @@
1# A bbclass to handle installed GSettings (glib) schemas, updated the compiled
2# form on package install and remove.
3#
4# The compiled schemas are platform-agnostic, so we can depend on
5# glib-2.0-native for the native tool and run the postinst script when the
6# rootfs builds to save a little time on first boot.
7
8# TODO use a trigger so that this runs once per package operation run
9
10DEPENDS += "glib-2.0-native"
11
12RDEPENDS_${PN} += "glib-2.0-utils"
13
14FILES_${PN} += "${datadir}/glib-2.0/schemas"
15
16gsettings_postinstrm () {
17 glib-compile-schemas $D${datadir}/glib-2.0/schemas
18}
19
20python populate_packages_append () {
21 pkg = d.getVar('PN', True)
22 bb.note("adding gsettings postinst scripts to %s" % pkg)
23
24 postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
25 if not postinst:
26 postinst = '#!/bin/sh\n'
27 postinst += d.getVar('gsettings_postinstrm', True)
28 d.setVar('pkg_postinst_%s' % pkg, postinst)
29
30 bb.note("adding gsettings postrm scripts to %s" % pkg)
31
32 postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
33 if not postrm:
34 postrm = '#!/bin/sh\n'
35 postrm += d.getVar('gsettings_postinstrm', True)
36 d.setVar('pkg_postrm_%s' % pkg, postrm)
37}
diff --git a/meta/classes/gtk-doc.bbclass b/meta/classes/gtk-doc.bbclass
new file mode 100644
index 0000000000..e32f98dcfc
--- /dev/null
+++ b/meta/classes/gtk-doc.bbclass
@@ -0,0 +1,25 @@
1# Helper class to pull in the right gtk-doc dependencies and disable
2# gtk-doc.
3#
4# Long-term it would be great if this class could be toggled between
5# gtk-doc-stub-native and the real gtk-doc-native, which would enable
6# re-generation of documentation. For now, we'll make do with this which
7# packages up any existing documentation (so from tarball builds).
8
9# The documentation directory, where the infrastructure will be copied.
10# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
11GTKDOC_DOCDIR ?= "${S}"
12
13DEPENDS_append = " gtk-doc-stub-native"
14
15EXTRA_OECONF_append = "\
16 --disable-gtk-doc \
17 --disable-gtk-doc-html \
18 --disable-gtk-doc-pdf \
19"
20
21do_configure_prepend () {
22 ( cd ${S}; gtkdocize --docdir ${GTKDOC_DOCDIR} )
23}
24
25inherit pkgconfig
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
new file mode 100644
index 0000000000..789fa38a16
--- /dev/null
+++ b/meta/classes/gtk-icon-cache.bbclass
@@ -0,0 +1,62 @@
1FILES_${PN} += "${datadir}/icons/hicolor"
2
3DEPENDS += "${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk-update-icon-cache-native"
4
5gtk_icon_cache_postinst() {
6if [ "x$D" != "x" ]; then
7 $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
8 base_libdir=${base_libdir}
9else
10
11 # Update the pixbuf loaders in case they haven't been registered yet
12 GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache
13
14 for icondir in /usr/share/icons/* ; do
15 if [ -d $icondir ] ; then
16 gtk-update-icon-cache -fqt $icondir
17 fi
18 done
19fi
20}
21
22gtk_icon_cache_postrm() {
23if [ "x$D" != "x" ]; then
24 $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
25 base_libdir=${base_libdir}
26else
27 for icondir in /usr/share/icons/* ; do
28 if [ -d $icondir ] ; then
29 gtk-update-icon-cache -qt $icondir
30 fi
31 done
32fi
33}
34
35python populate_packages_append () {
36 packages = d.getVar('PACKAGES', True).split()
37 pkgdest = d.getVar('PKGDEST', True)
38
39 for pkg in packages:
40 icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir', True))
41 if not os.path.exists(icon_dir):
42 continue
43
44 bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
45 rdepends = ' ' + d.getVar('MLPREFIX') + "hicolor-icon-theme"
46 d.appendVar('RDEPENDS_%s' % pkg, rdepends)
47
48 bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
49
50 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
51 if not postinst:
52 postinst = '#!/bin/sh\n'
53 postinst += d.getVar('gtk_icon_cache_postinst', True)
54 d.setVar('pkg_postinst_%s' % pkg, postinst)
55
56 postrm = d.getVar('pkg_postrm_%s' % pkg, True)
57 if not postrm:
58 postrm = '#!/bin/sh\n'
59 postrm += d.getVar('gtk_icon_cache_postrm', True)
60 d.setVar('pkg_postrm_%s' % pkg, postrm)
61}
62
diff --git a/meta/classes/gtk-immodules-cache.bbclass b/meta/classes/gtk-immodules-cache.bbclass
new file mode 100644
index 0000000000..5b45149080
--- /dev/null
+++ b/meta/classes/gtk-immodules-cache.bbclass
@@ -0,0 +1,83 @@
1# This class will update the inputmethod module cache for virtual keyboards
2#
3# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules
4
5DEPENDS =+ "qemu-native"
6
7inherit qemu
8
9GTKIMMODULES_PACKAGES ?= "${PN}"
10
11gtk_immodule_cache_postinst() {
12if [ "x$D" != "x" ]; then
13 for maj_ver in 2 3; do
14 if [ -x $D${bindir}/gtk-query-immodules-$maj_ver.0 ]; then
15 IMFILES=$(ls $D${libdir}/gtk-$maj_ver.0/*/immodules/*.so)
16 ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-$maj_ver.0')} \
17 $IMFILES > $D/etc/gtk-$maj_ver.0/gtk.immodules 2>/dev/null &&
18 sed -i -e "s:$D::" $D/etc/gtk-$maj_ver.0/gtk.immodules
19
20 [ $? -ne 0 ] && exit 1
21 fi
22 done
23
24 exit 0
25fi
26if [ ! -z `which gtk-query-immodules-2.0` ]; then
27 gtk-query-immodules-2.0 > /etc/gtk-2.0/gtk.immodules
28fi
29if [ ! -z `which gtk-query-immodules-3.0` ]; then
30 gtk-query-immodules-3.0 > /etc/gtk-3.0/gtk.immodules
31fi
32}
33
34gtk_immodule_cache_postrm() {
35if [ "x$D" != "x" ]; then
36 for maj_ver in 2 3; do
37 if [ -x $D${bindir}/gtk-query-immodules-$maj_ver.0 ]; then
38 IMFILES=$(ls $D${libdir}/gtk-$maj_ver.0/*/immodules/*.so)
39 ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-$maj_ver.0')} \
40 $IMFILES > $D/etc/gtk-$maj_ver.0/gtk.immodules 2>/dev/null &&
41 sed -i -e "s:$D::" $D/etc/gtk-$maj_ver.0/gtk.immodules
42
43 [ $? -ne 0 ] && exit 1
44 fi
45 done
46
47 exit 0
48fi
49if [ ! -z `which gtk-query-immodules-2.0` ]; then
50 gtk-query-immodules-2.0 > /etc/gtk-2.0/gtk.immodules
51fi
52if [ ! -z `which gtk-query-immodules-3.0` ]; then
53 gtk-query-immodules-3.0 > /etc/gtk-3.0/gtk.immodules
54fi
55}
56
57python populate_packages_append () {
58 gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES', True).split()
59
60 for pkg in gtkimmodules_pkgs:
61 bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
62
63 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
64 if not postinst:
65 postinst = '#!/bin/sh\n'
66 postinst += d.getVar('gtk_immodule_cache_postinst', True)
67 d.setVar('pkg_postinst_%s' % pkg, postinst)
68
69 postrm = d.getVar('pkg_postrm_%s' % pkg, True)
70 if not postrm:
71 postrm = '#!/bin/sh\n'
72 postrm += d.getVar('gtk_immodule_cache_postrm', True)
73 d.setVar('pkg_postrm_%s' % pkg, postrm)
74}
75
76python __anonymous() {
77 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
78 gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES')
79 if not gtkimmodules_check:
80 bb_filename = d.getVar('FILE')
81 raise bb.build.FuncFailed("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename)
82}
83
diff --git a/meta/classes/gummiboot.bbclass b/meta/classes/gummiboot.bbclass
new file mode 100644
index 0000000000..dae19775c3
--- /dev/null
+++ b/meta/classes/gummiboot.bbclass
@@ -0,0 +1,114 @@
1# Copyright (C) 2014 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5# gummiboot.bbclass - equivalent of grub-efi.bbclass
6# Set EFI_PROVIDER = "gummiboot" to use gummiboot on your live images instead of grub-efi
7# (images built by bootimage.bbclass or boot-directdisk.bbclass)
8
9do_bootimg[depends] += "${MLPREFIX}gummiboot:do_deploy"
10do_bootdirectdisk[depends] += "${MLPREFIX}gummiboot:do_deploy"
11
12EFIDIR = "/EFI/BOOT"
13
14GUMMIBOOT_CFG ?= "${S}/loader.conf"
15GUMMIBOOT_ENTRIES ?= ""
16GUMMIBOOT_TIMEOUT ?= "10"
17
18efi_populate() {
19 DEST=$1
20
21 EFI_IMAGE="gummibootia32.efi"
22 DEST_EFI_IMAGE="bootia32.efi"
23 if [ "${TARGET_ARCH}" = "x86_64" ]; then
24 EFI_IMAGE="gummibootx64.efi"
25 DEST_EFI_IMAGE="bootx64.efi"
26 fi
27
28 install -d ${DEST}${EFIDIR}
29 # gummiboot requires these paths for configuration files
30 # they are not customizable so no point in new vars
31 install -d ${DEST}/loader
32 install -d ${DEST}/loader/entries
33 install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE}
34 install -m 0644 ${GUMMIBOOT_CFG} ${DEST}/loader/loader.conf
35 for i in ${GUMMIBOOT_ENTRIES}; do
36 install -m 0644 ${i} ${DEST}/loader/entries
37 done
38}
39
40efi_iso_populate() {
41 iso_dir=$1
42 efi_populate $iso_dir
43 mkdir -p ${EFIIMGDIR}/${EFIDIR}
44 cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
45 cp $iso_dir/vmlinuz ${EFIIMGDIR}
46 echo "${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
47 if [ -f "$iso_dir/initrd" ] ; then
48 cp $iso_dir/initrd ${EFIIMGDIR}
49 fi
50}
51
52efi_hddimg_populate() {
53 efi_populate $1
54}
55
56python build_efi_cfg() {
57 s = d.getVar("S", True)
58 labels = d.getVar('LABELS', True)
59 if not labels:
60 bb.debug(1, "LABELS not defined, nothing to do")
61 return
62
63 if labels == []:
64 bb.debug(1, "No labels, nothing to do")
65 return
66
67 cfile = d.getVar('GUMMIBOOT_CFG', True)
68 try:
69 cfgfile = open(cfile, 'w')
70 except OSError:
71 raise bb.build.funcFailed('Unable to open %s' % (cfile))
72
73 cfgfile.write('# Automatically created by OE\n')
74 cfgfile.write('default %s\n' % (labels.split()[0]))
75 timeout = d.getVar('GUMMIBOOT_TIMEOUT', True)
76 if timeout:
77 cfgfile.write('timeout %s\n' % timeout)
78 else:
79 cfgfile.write('timeout 10\n')
80 cfgfile.close()
81
82 for label in labels.split():
83 localdata = d.createCopy()
84
85 overrides = localdata.getVar('OVERRIDES', True)
86 if not overrides:
87 raise bb.build.FuncFailed('OVERRIDES not defined')
88
89 entryfile = "%s/%s.conf" % (s, label)
90 d.appendVar("GUMMIBOOT_ENTRIES", " " + entryfile)
91 try:
92 entrycfg = open(entryfile, "w")
93 except OSError:
94 raise bb.build.funcFailed('Unable to open %s' % (entryfile))
95 localdata.setVar('OVERRIDES', label + ':' + overrides)
96 bb.data.update_data(localdata)
97
98 entrycfg.write('title %s\n' % label)
99 entrycfg.write('linux /vmlinuz\n')
100
101 append = localdata.getVar('APPEND', True)
102 initrd = localdata.getVar('INITRD', True)
103
104 if initrd:
105 entrycfg.write('initrd /initrd\n')
106 lb = label
107 if label == "install":
108 lb = "install-efi"
109 entrycfg.write('options LABEL=%s ' % lb)
110 if append:
111 entrycfg.write('%s' % append)
112 entrycfg.write('\n')
113 entrycfg.close()
114}
diff --git a/meta/classes/gzipnative.bbclass b/meta/classes/gzipnative.bbclass
new file mode 100644
index 0000000000..326cbbb6f6
--- /dev/null
+++ b/meta/classes/gzipnative.bbclass
@@ -0,0 +1,5 @@
1EXTRANATIVEPATH += "pigz-native gzip-native"
2DEPENDS += "gzip-native"
3
4# tar may get run by do_unpack or do_populate_lic which could call gzip
5do_unpack[depends] += "gzip-native:do_populate_sysroot"
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
new file mode 100644
index 0000000000..2f9e3cf8ef
--- /dev/null
+++ b/meta/classes/icecc.bbclass
@@ -0,0 +1,332 @@
1# IceCream distributed compiling support
2#
3# Stages directories with symlinks from gcc/g++ to icecc, for both
4# native and cross compilers. Depending on each configure or compile,
5# the directories are added at the head of the PATH list and ICECC_CXX
6# and ICEC_CC are set.
7#
8# For the cross compiler, creates a tar.gz of our toolchain and sets
9# ICECC_VERSION accordingly.
10#
11# The class now handles all 3 different compile 'stages' (i.e native ,cross-kernel and target) creating the
12# necessary environment tar.gz file to be used by the remote machines.
13# It also supports meta-toolchain generation
14#
15# If ICECC_PATH is not set in local.conf then the class will try to locate it using 'bb.utils.which'
16# but nothing is sure ;)
17#
18# If ICECC_ENV_EXEC is set in local.conf, then it should point to the icecc-create-env script provided by the user
19# or the default one provided by icecc-create-env.bb will be used
20# (NOTE that this is a modified version of the script need it and *not the one that comes with icecc*
21#
22# User can specify if specific packages or packages belonging to class should not use icecc to distribute
23# compile jobs to remote machines, but handled locally, by defining ICECC_USER_CLASS_BL and ICECC_USER_PACKAGE_BL
24# with the appropriate values in local.conf. In addition the user can force to enable icecc for packages
25# which set an empty PARALLEL_MAKE variable by defining ICECC_USER_PACKAGE_WL.
26#
27#########################################################################################
28#Error checking is kept to minimum so double check any parameters you pass to the class
29###########################################################################################
30
31BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL ICECC_PATH ICECC_ENV_EXEC"
32
33ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
34
35def icecc_dep_prepend(d):
36 # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
37 # we need that built is the responsibility of the patch function / class, not
38 # the application.
39 if not d.getVar('INHIBIT_DEFAULT_DEPS'):
40 return "icecc-create-env-native"
41 return ""
42
43DEPENDS_prepend += "${@icecc_dep_prepend(d)} "
44
45def get_cross_kernel_cc(bb,d):
46 kernel_cc = d.getVar('KERNEL_CC')
47
48 # evaluate the expression by the shell if necessary
49 if '`' in kernel_cc or '$(' in kernel_cc:
50 kernel_cc = os.popen("echo %s" % kernel_cc).read()[:-1]
51
52 kernel_cc = d.expand(kernel_cc)
53 kernel_cc = kernel_cc.replace('ccache', '').strip()
54 kernel_cc = kernel_cc.split(' ')[0]
55 kernel_cc = kernel_cc.strip()
56 return kernel_cc
57
58def get_icecc(d):
59 return d.getVar('ICECC_PATH') or bb.utils.which(os.getenv("PATH"), "icecc")
60
61def create_path(compilers, bb, d):
62 """
63 Create Symlinks for the icecc in the staging directory
64 """
65 staging = os.path.join(d.expand('${STAGING_BINDIR}'), "ice")
66 if icc_is_kernel(bb, d):
67 staging += "-kernel"
68
69 #check if the icecc path is set by the user
70 icecc = get_icecc(d)
71
72 # Create the dir if necessary
73 try:
74 os.stat(staging)
75 except:
76 try:
77 os.makedirs(staging)
78 except:
79 pass
80
81 for compiler in compilers:
82 gcc_path = os.path.join(staging, compiler)
83 try:
84 os.stat(gcc_path)
85 except:
86 try:
87 os.symlink(icecc, gcc_path)
88 except:
89 pass
90
91 return staging
92
93def use_icc(bb,d):
94 if d.getVar('ICECC_DISABLED') == "1":
95 # don't even try it, when explicitly disabled
96 return "no"
97
98 # allarch recipes don't use compiler
99 if icc_is_allarch(bb, d):
100 return "no"
101
102 pn = d.getVar('PN', True)
103
104 system_class_blacklist = []
105 user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
106 package_class_blacklist = system_class_blacklist + user_class_blacklist
107
108 for black in package_class_blacklist:
109 if bb.data.inherits_class(black, d):
110 bb.debug(1, "%s: class %s found in blacklist, disable icecc" % (pn, black))
111 return "no"
112
113 # "system" recipe blacklist contains a list of packages that can not distribute compile tasks
114 # for one reason or the other
115 # this is the old list (which doesn't seem to be valid anymore, because I was able to build
116 # all these with icecc enabled)
117 # system_package_blacklist = [ "uclibc", "glibc", "gcc", "bind", "u-boot", "dhcp-forwarder", "enchant", "connman", "orbit2" ]
118 # when adding new entry, please document why (how it failed) so that we can re-evaluate it later
119 # e.g. when there is new version
120 system_package_blacklist = []
121 user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split()
122 user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL') or "").split()
123 package_blacklist = system_package_blacklist + user_package_blacklist
124
125 if pn in package_blacklist:
126 bb.debug(1, "%s: found in blacklist, disable icecc" % pn)
127 return "no"
128
129 if pn in user_package_whitelist:
130 bb.debug(1, "%s: found in whitelist, enable icecc" % pn)
131 return "yes"
132
133 if d.getVar('PARALLEL_MAKE') == "":
134 bb.debug(1, "%s: has empty PARALLEL_MAKE, disable icecc" % pn)
135 return "no"
136
137 return "yes"
138
139def icc_is_allarch(bb, d):
140 return d.getVar("PACKAGE_ARCH") == "all"
141
142def icc_is_kernel(bb, d):
143 return \
144 bb.data.inherits_class("kernel", d);
145
146def icc_is_native(bb, d):
147 return \
148 bb.data.inherits_class("cross", d) or \
149 bb.data.inherits_class("native", d);
150
151# Don't pollute allarch signatures with TARGET_FPU
152icc_version[vardepsexclude] += "TARGET_FPU"
153def icc_version(bb, d):
154 if use_icc(bb, d) == "no":
155 return ""
156
157 parallel = d.getVar('ICECC_PARALLEL_MAKE') or ""
158 if not d.getVar('PARALLEL_MAKE') == "" and parallel:
159 d.setVar("PARALLEL_MAKE", parallel)
160
161 if icc_is_native(bb, d):
162 archive_name = "local-host-env"
163 elif d.expand('${HOST_PREFIX}') == "":
164 bb.fatal(d.expand("${PN}"), " NULL prefix")
165 else:
166 prefix = d.expand('${HOST_PREFIX}' )
167 distro = d.expand('${DISTRO}')
168 target_sys = d.expand('${TARGET_SYS}')
169 float = d.getVar('TARGET_FPU') or "hard"
170 archive_name = prefix + distro + "-" + target_sys + "-" + float
171 if icc_is_kernel(bb, d):
172 archive_name += "-kernel"
173
174 import socket
175 ice_dir = d.expand('${STAGING_DIR_NATIVE}${prefix_native}')
176 tar_file = os.path.join(ice_dir, 'ice', archive_name + "-@VERSION@-" + socket.gethostname() + '.tar.gz')
177
178 return tar_file
179
180def icc_path(bb,d):
181 if use_icc(bb, d) == "no":
182 # don't create unnecessary directories when icecc is disabled
183 return
184
185 if icc_is_kernel(bb, d):
186 return create_path( [get_cross_kernel_cc(bb,d), ], bb, d)
187
188 else:
189 prefix = d.expand('${HOST_PREFIX}')
190 return create_path( [prefix+"gcc", prefix+"g++"], bb, d)
191
192def icc_get_external_tool(bb, d, tool):
193 external_toolchain_bindir = d.expand('${EXTERNAL_TOOLCHAIN}${bindir_cross}')
194 target_prefix = d.expand('${TARGET_PREFIX}')
195 return os.path.join(external_toolchain_bindir, '%s%s' % (target_prefix, tool))
196
197# Don't pollute native signatures with target TUNE_PKGARCH through STAGING_BINDIR_TOOLCHAIN
198icc_get_tool[vardepsexclude] += "STAGING_BINDIR_TOOLCHAIN"
199def icc_get_tool(bb, d, tool):
200 if icc_is_native(bb, d):
201 return bb.utils.which(os.getenv("PATH"), tool)
202 elif icc_is_kernel(bb, d):
203 return bb.utils.which(os.getenv("PATH"), get_cross_kernel_cc(bb, d))
204 else:
205 ice_dir = d.expand('${STAGING_BINDIR_TOOLCHAIN}')
206 target_sys = d.expand('${TARGET_SYS}')
207 tool_bin = os.path.join(ice_dir, "%s-%s" % (target_sys, tool))
208 if os.path.isfile(tool_bin):
209 return tool_bin
210 else:
211 external_tool_bin = icc_get_external_tool(bb, d, tool)
212 if os.path.isfile(external_tool_bin):
213 return external_tool_bin
214 else:
215 return ""
216
217def icc_get_and_check_tool(bb, d, tool):
218 # Check that g++ or gcc is not a symbolic link to icecc binary in
219 # PATH or icecc-create-env script will silently create an invalid
220 # compiler environment package.
221 t = icc_get_tool(bb, d, tool)
222 if t and os.popen("readlink -f %s" % t).read()[:-1] == get_icecc(d):
223 bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, get_icecc(d)))
224 return ""
225 else:
226 return t
227
228wait_for_file() {
229 local TIME_ELAPSED=0
230 local FILE_TO_TEST=$1
231 local TIMEOUT=$2
232 until [ -f "$FILE_TO_TEST" ]
233 do
234 TIME_ELAPSED=`expr $TIME_ELAPSED + 1`
235 if [ $TIME_ELAPSED -gt $TIMEOUT ]
236 then
237 return 1
238 fi
239 sleep 1
240 done
241}
242
243def set_icecc_env():
244 # dummy python version of set_icecc_env
245 return
246
247set_icecc_env() {
248 if [ "${@use_icc(bb, d)}" = "no" ]
249 then
250 return
251 fi
252 ICECC_VERSION="${@icc_version(bb, d)}"
253 if [ "x${ICECC_VERSION}" = "x" ]
254 then
255 bbwarn "Cannot use icecc: could not get ICECC_VERSION"
256 return
257 fi
258
259 ICE_PATH="${@icc_path(bb, d)}"
260 if [ "x${ICE_PATH}" = "x" ]
261 then
262 bbwarn "Cannot use icecc: could not get ICE_PATH"
263 return
264 fi
265
266 ICECC_CC="${@icc_get_and_check_tool(bb, d, "gcc")}"
267 ICECC_CXX="${@icc_get_and_check_tool(bb, d, "g++")}"
268 # cannot use icc_get_and_check_tool here because it assumes as without target_sys prefix
269 ICECC_WHICH_AS="${@bb.utils.which(os.getenv('PATH'), 'as')}"
270 if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
271 then
272 bbwarn "Cannot use icecc: could not get ICECC_CC or ICECC_CXX"
273 return
274 fi
275
276 ICE_VERSION=`$ICECC_CC -dumpversion`
277 ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"`
278 if [ ! -x "${ICECC_ENV_EXEC}" ]
279 then
280 bbwarn "Cannot use icecc: invalid ICECC_ENV_EXEC"
281 return
282 fi
283
284 ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
285 # for target recipes should return something like:
286 # /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
287 # and just "as" for native, if it returns "as" in current directory (for whatever reason) use "as" from PATH
288 if [ "`dirname "${ICECC_AS}"`" = "." ]
289 then
290 ICECC_AS="${ICECC_WHICH_AS}"
291 fi
292
293 if [ ! -f "${ICECC_VERSION}.done" ]
294 then
295 mkdir -p "`dirname "${ICECC_VERSION}"`"
296
297 # the ICECC_VERSION generation step must be locked by a mutex
298 # in order to prevent race conditions
299 if flock -n "${ICECC_VERSION}.lock" \
300 ${ICECC_ENV_EXEC} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
301 then
302 touch "${ICECC_VERSION}.done"
303 elif [ ! wait_for_file "${ICECC_VERSION}.done" 30 ]
304 then
305 # locking failed so wait for ${ICECC_VERSION}.done to appear
306 bbwarn "Timeout waiting for ${ICECC_VERSION}.done"
307 return
308 fi
309 fi
310
311 export ICECC_VERSION ICECC_CC ICECC_CXX
312 export PATH="$ICE_PATH:$PATH"
313 export CCACHE_PATH="$PATH"
314
315 bbnote "Using icecc"
316}
317
318do_configure_prepend() {
319 set_icecc_env
320}
321
322do_compile_prepend() {
323 set_icecc_env
324}
325
326do_compile_kernelmodules_prepend() {
327 set_icecc_env
328}
329
330do_install_prepend() {
331 set_icecc_env
332}
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
new file mode 100644
index 0000000000..7b770fb353
--- /dev/null
+++ b/meta/classes/image-live.bbclass
@@ -0,0 +1,18 @@
1
2AUTO_SYSLINUXCFG = "1"
3INITRD_IMAGE ?= "core-image-minimal-initramfs"
4INITRD ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE}-${MACHINE}.cpio.gz"
5SYSLINUX_ROOT = "root=/dev/ram0"
6SYSLINUX_TIMEOUT ?= "10"
7SYSLINUX_LABELS ?= "boot install"
8LABELS_append = " ${SYSLINUX_LABELS} "
9
10ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext3"
11
12do_bootimg[depends] += "${INITRD_IMAGE}:do_rootfs"
13do_bootimg[depends] += "${PN}:do_rootfs"
14
15inherit bootimg
16
17IMAGE_TYPEDEP_live = "ext3"
18IMAGE_TYPES_MASKED += "live"
diff --git a/meta/classes/image-mklibs.bbclass b/meta/classes/image-mklibs.bbclass
new file mode 100644
index 0000000000..c455a8e2d4
--- /dev/null
+++ b/meta/classes/image-mklibs.bbclass
@@ -0,0 +1,71 @@
1do_rootfs[depends] += "mklibs-native:do_populate_sysroot"
2
3IMAGE_PREPROCESS_COMMAND += "mklibs_optimize_image; "
4
5mklibs_optimize_image_doit() {
6 rm -rf ${WORKDIR}/mklibs
7 mkdir -p ${WORKDIR}/mklibs/dest
8 cd ${IMAGE_ROOTFS}
9 du -bs > ${WORKDIR}/mklibs/du.before.mklibs.txt
10 for i in `find .`; do file $i; done \
11 | grep ELF \
12 | grep "LSB *executable" \
13 | grep "dynamically linked" \
14 | sed "s/:.*//" \
15 | sed "s+^\./++" \
16 > ${WORKDIR}/mklibs/executables.list
17
18 case ${TARGET_ARCH} in
19 powerpc | mips | mipsel | microblaze )
20 dynamic_loader="${base_libdir}/ld.so.1"
21 ;;
22 powerpc64)
23 dynamic_loader="${base_libdir}/ld64.so.1"
24 ;;
25 x86_64)
26 dynamic_loader="${base_libdir}/ld-linux-x86-64.so.2"
27 ;;
28 i586 )
29 dynamic_loader="${base_libdir}/ld-linux.so.2"
30 ;;
31 arm )
32 dynamic_loader="${base_libdir}/ld-linux.so.3"
33 ;;
34 * )
35 dynamic_loader="/unknown_dynamic_linker"
36 ;;
37 esac
38
39 mklibs -v \
40 --ldlib ${dynamic_loader} \
41 --libdir ${baselib} \
42 --sysroot ${PKG_CONFIG_SYSROOT_DIR} \
43 --gcc-options "--sysroot=${PKG_CONFIG_SYSROOT_DIR}" \
44 --root ${IMAGE_ROOTFS} \
45 --target `echo ${TARGET_PREFIX} | sed 's/-$//' ` \
46 -d ${WORKDIR}/mklibs/dest \
47 `cat ${WORKDIR}/mklibs/executables.list`
48
49 cd ${WORKDIR}/mklibs/dest
50 for i in *
51 do
52 cp $i `find ${IMAGE_ROOTFS} -name $i`
53 done
54
55 cd ${IMAGE_ROOTFS}
56 du -bs > ${WORKDIR}/mklibs/du.after.mklibs.txt
57
58 echo rootfs size before mklibs optimization: `cat ${WORKDIR}/mklibs/du.before.mklibs.txt`
59 echo rootfs size after mklibs optimization: `cat ${WORKDIR}/mklibs/du.after.mklibs.txt`
60}
61
62mklibs_optimize_image() {
63 for img in ${MKLIBS_OPTIMIZED_IMAGES}
64 do
65 if [ "${img}" = "${PN}" ] || [ "${img}" = "all" ]
66 then
67 mklibs_optimize_image_doit
68 break
69 fi
70 done
71}
diff --git a/meta/classes/image-prelink.bbclass b/meta/classes/image-prelink.bbclass
new file mode 100644
index 0000000000..d4bb3aec39
--- /dev/null
+++ b/meta/classes/image-prelink.bbclass
@@ -0,0 +1,33 @@
1do_rootfs[depends] += "prelink-native:do_populate_sysroot"
2
3IMAGE_PREPROCESS_COMMAND += "prelink_image; "
4
5prelink_image () {
6# export PSEUDO_DEBUG=4
7# /bin/env | /bin/grep PSEUDO
8# echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
9# echo "LD_PRELOAD=$LD_PRELOAD"
10
11 pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
12 echo "Size before prelinking $pre_prelink_size."
13
14 # We need a prelink conf on the filesystem, add one if it's missing
15 if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
16 cp ${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf \
17 ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
18 dummy_prelink_conf=true;
19 else
20 dummy_prelink_conf=false;
21 fi
22
23 # prelink!
24 ${STAGING_DIR_NATIVE}${sbindir_native}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf
25
26 # Remove the prelink.conf if we had to add it.
27 if [ "$dummy_prelink_conf" = "true" ]; then
28 rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
29 fi
30
31 pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
32 echo "Size after prelinking $pre_prelink_size."
33}
diff --git a/meta/classes/image-swab.bbclass b/meta/classes/image-swab.bbclass
new file mode 100644
index 0000000000..89318560db
--- /dev/null
+++ b/meta/classes/image-swab.bbclass
@@ -0,0 +1,94 @@
1HOST_DATA ?= "${TMPDIR}/host-contamination-data/"
2SWABBER_REPORT ?= "${LOG_DIR}/swabber/"
3SWABBER_LOGS ?= "${LOG_DIR}/contamination-logs"
4TRACE_LOGDIR ?= "${SWABBER_LOGS}/${PACKAGE_ARCH}"
5TRACE_LOGFILE = "${TRACE_LOGDIR}/${PN}-${PV}"
6
7SWAB_ORIG_TASK := "${BB_DEFAULT_TASK}"
8BB_DEFAULT_TASK = "generate_swabber_report"
9
10# Several recipes don't build with parallel make when run under strace
11# Ideally these should be fixed but as a temporary measure disable parallel
12# builds for troublesome recipes
13PARALLEL_MAKE_pn-openssl = ""
14PARALLEL_MAKE_pn-glibc = ""
15PARALLEL_MAKE_pn-glib-2.0 = ""
16PARALLEL_MAKE_pn-libxml2 = ""
17PARALLEL_MAKE_pn-readline = ""
18PARALLEL_MAKE_pn-util-linux = ""
19PARALLEL_MAKE_pn-binutils = ""
20PARALLEL_MAKE_pn-bison = ""
21PARALLEL_MAKE_pn-cmake = ""
22PARALLEL_MAKE_pn-elfutils = ""
23PARALLEL_MAKE_pn-gcc = ""
24PARALLEL_MAKE_pn-gcc-runtime = ""
25PARALLEL_MAKE_pn-m4 = ""
26PARALLEL_MAKE_pn-opkg = ""
27PARALLEL_MAKE_pn-pkgconfig = ""
28PARALLEL_MAKE_pn-prelink = ""
29PARALLEL_MAKE_pn-rpm = ""
30PARALLEL_MAKE_pn-tcl = ""
31PARALLEL_MAKE_pn-beecrypt = ""
32PARALLEL_MAKE_pn-curl = ""
33PARALLEL_MAKE_pn-gmp = ""
34PARALLEL_MAKE_pn-libmpc = ""
35PARALLEL_MAKE_pn-libxslt = ""
36PARALLEL_MAKE_pn-lzo = ""
37PARALLEL_MAKE_pn-popt = ""
38PARALLEL_MAKE_pn-linux-wrs = ""
39PARALLEL_MAKE_pn-libgcrypt = ""
40PARALLEL_MAKE_pn-gpgme = ""
41PARALLEL_MAKE_pn-udev = ""
42PARALLEL_MAKE_pn-gnutls = ""
43
44python() {
45 # NOTE: It might be useful to detect host infection on native and cross
46 # packages but as it turns out to be pretty hard to do this for all native
47 # and cross packages which aren't swabber-native or one of its dependencies
48 # I have ignored them for now...
49 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('nativesdk', d) and not bb.data.inherits_class('cross', d):
50 deps = (d.getVarFlag('do_setscene', 'depends') or "").split()
51 deps.append('strace-native:do_populate_sysroot')
52 d.setVarFlag('do_setscene', 'depends', " ".join(deps))
53 logdir = d.expand("${TRACE_LOGDIR}")
54 bb.utils.mkdirhier(logdir)
55 else:
56 d.setVar('STRACEFUNC', '')
57}
58
59STRACEPID = "${@os.getpid()}"
60STRACEFUNC = "imageswab_attachstrace"
61
62do_configure[prefuncs] += "${STRACEFUNC}"
63do_compile[prefuncs] += "${STRACEFUNC}"
64
65imageswab_attachstrace () {
66 STRACE=`which strace`
67
68 if [ -x "$STRACE" ]; then
69 swabber-strace-attach "$STRACE -f -o ${TRACE_LOGFILE}-${BB_CURRENTTASK}.log -e trace=open,execve -p ${STRACEPID}" "${TRACE_LOGFILE}-traceattach-${BB_CURRENTTASK}.log"
70 fi
71}
72
73do_generate_swabber_report () {
74
75 update_distro ${HOST_DATA}
76
77 # Swabber can't create the directory for us
78 mkdir -p ${SWABBER_REPORT}
79
80 REPORTSTAMP=${SWAB_ORIG_TASK}-`date +%2m%2d%2H%2M%Y`
81
82 if [ `which ccache` ] ; then
83 CCACHE_DIR=`( ccache -s | grep "cache directory" | grep -o '[^ ]*$' 2> /dev/null )`
84 fi
85
86 if [ "$(ls -A ${HOST_DATA})" ]; then
87 echo "Generating swabber report"
88 swabber -d ${HOST_DATA} -l ${SWABBER_LOGS} -o ${SWABBER_REPORT}/report-${REPORTSTAMP}.txt -r ${SWABBER_REPORT}/extra_report-${REPORTSTAMP}.txt -c all -p ${TOPDIR} -f ${OEROOT}/meta/conf/swabber ${TOPDIR} ${OEROOT} ${CCACHE_DIR}
89 else
90 echo "No host data, cannot generate swabber report."
91 fi
92}
93addtask generate_swabber_report after do_${SWAB_ORIG_TASK}
94do_generate_swabber_report[depends] = "swabber-native:do_populate_sysroot"
diff --git a/meta/classes/image-vmdk.bbclass b/meta/classes/image-vmdk.bbclass
new file mode 100644
index 0000000000..77b7facd41
--- /dev/null
+++ b/meta/classes/image-vmdk.bbclass
@@ -0,0 +1,35 @@
1
2#NOISO = "1"
3
4SYSLINUX_ROOT ?= "root=/dev/sda2"
5SYSLINUX_PROMPT ?= "0"
6SYSLINUX_TIMEOUT ?= "10"
7SYSLINUX_LABELS = "boot"
8LABELS_append = " ${SYSLINUX_LABELS} "
9
10# need to define the dependency and the ROOTFS for directdisk
11do_bootdirectdisk[depends] += "${PN}:do_rootfs"
12ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext3"
13
14# creating VMDK relies on having a live hddimg so ensure we
15# inherit it here.
16#inherit image-live
17inherit boot-directdisk
18
19IMAGE_TYPEDEP_vmdk = "ext3"
20IMAGE_TYPES_MASKED += "vmdk"
21
22create_vmdk_image () {
23 qemu-img convert -O vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.vmdk
24 ln -sf ${IMAGE_NAME}.vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.vmdk
25}
26
27python do_vmdkimg() {
28 bb.build.exec_func('create_vmdk_image', d)
29}
30
31#addtask vmdkimg after do_bootimg before do_build
32addtask vmdkimg after do_bootdirectdisk before do_build
33
34do_vmdkimg[depends] += "qemu-native:do_populate_sysroot"
35
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
new file mode 100644
index 0000000000..1c0fda7d60
--- /dev/null
+++ b/meta/classes/image.bbclass
@@ -0,0 +1,448 @@
1inherit rootfs_${IMAGE_PKGTYPE}
2
3inherit populate_sdk_base
4
5TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
6TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
7POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; "
8
9inherit gzipnative
10
11LICENSE = "MIT"
12PACKAGES = ""
13DEPENDS += "${MLPREFIX}qemuwrapper-cross ${MLPREFIX}depmodwrapper-cross"
14RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL}"
15RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
16
17INHIBIT_DEFAULT_DEPS = "1"
18
19TESTIMAGECLASS = "${@base_conditional('TEST_IMAGE', '1', 'testimage-auto', '', d)}"
20inherit ${TESTIMAGECLASS}
21
22# IMAGE_FEATURES may contain any available package group
23IMAGE_FEATURES ?= ""
24IMAGE_FEATURES[type] = "list"
25IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs"
26
27# rootfs bootstrap install
28ROOTFS_BOOTSTRAP_INSTALL = "${@bb.utils.contains("IMAGE_FEATURES", "package-management", "", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)}"
29
30# packages to install from features
31FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
32FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
33
34# Define some very basic feature package groups
35FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}"
36SPLASH ?= "psplash"
37FEATURE_PACKAGES_splash = "${SPLASH}"
38
39IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
40
41def check_image_features(d):
42 valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems', True) or "").split()
43 valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys()
44 for var in d:
45 if var.startswith("PACKAGE_GROUP_"):
46 bb.warn("PACKAGE_GROUP is deprecated, please use FEATURE_PACKAGES instead")
47 valid_features.append(var[14:])
48 elif var.startswith("FEATURE_PACKAGES_"):
49 valid_features.append(var[17:])
50 valid_features.sort()
51
52 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
53 for feature in features:
54 if feature not in valid_features:
55 bb.fatal("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
56
57IMAGE_INSTALL ?= ""
58IMAGE_INSTALL[type] = "list"
59export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}"
60PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
61
62# Images are generally built explicitly, do not need to be part of world.
63EXCLUDE_FROM_WORLD = "1"
64
65USE_DEVFS ?= "1"
66
67PID = "${@os.getpid()}"
68
69PACKAGE_ARCH = "${MACHINE_ARCH}"
70
71LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
72LDCONFIGDEPEND_libc-uclibc = ""
73LDCONFIGDEPEND_libc-musl = ""
74
75do_rootfs[depends] += "makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND}"
76do_rootfs[depends] += "virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot"
77do_rootfs[recrdeptask] += "do_packagedata"
78
79def command_variables(d):
80 return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND',
81 'IMAGE_PREPROCESS_COMMAND','ROOTFS_POSTPROCESS_COMMAND','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS',
82 'RPM_POSTPROCESS_COMMANDS']
83
84python () {
85 variables = command_variables(d)
86 for var in variables:
87 if d.getVar(var):
88 d.setVarFlag(var, 'func', '1')
89}
90
91def rootfs_variables(d):
92 from oe.rootfs import variable_depends
93 variables = ['IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPEDEP_','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
94 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','RM_OLD_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS','SDK_OS',
95 'SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT','SDKTARGETSYSROOT','MULTILIBRE_ALLOW_REP',
96 'MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
97 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','BUILDNAME','USE_DEVFS',
98 'STAGING_KERNEL_DIR','COMPRESSIONTYPES']
99 variables.extend(command_variables(d))
100 variables.extend(variable_depends(d))
101 return " ".join(variables)
102
103do_rootfs[vardeps] += "${@rootfs_variables(d)}"
104
105do_build[depends] += "virtual/kernel:do_deploy"
106
107def build_live(d):
108 if bb.utils.contains("IMAGE_FSTYPES", "live", "live", "0", d) == "0": # live is not set but hob might set iso or hddimg
109 d.setVar('NOISO', bb.utils.contains('IMAGE_FSTYPES', "iso", "0", "1", d))
110 d.setVar('NOHDD', bb.utils.contains('IMAGE_FSTYPES', "hddimg", "0", "1", d))
111 if d.getVar('NOISO', True) == "0" or d.getVar('NOHDD', True) == "0":
112 return "image-live"
113 return ""
114 return "image-live"
115
116IMAGE_TYPE_live = "${@build_live(d)}"
117
118inherit ${IMAGE_TYPE_live}
119IMAGE_TYPE_vmdk = '${@bb.utils.contains("IMAGE_FSTYPES", "vmdk", "image-vmdk", "", d)}'
120inherit ${IMAGE_TYPE_vmdk}
121
122python () {
123 deps = " " + imagetypes_getdepends(d)
124 d.appendVarFlag('do_rootfs', 'depends', deps)
125
126 deps = ""
127 for dep in (d.getVar('EXTRA_IMAGEDEPENDS', True) or "").split():
128 deps += " %s:do_populate_sysroot" % dep
129 d.appendVarFlag('do_build', 'depends', deps)
130
131 #process IMAGE_FEATURES, we must do this before runtime_mapping_rename
132 #Check for replaces image features
133 features = set(oe.data.typed_value('IMAGE_FEATURES', d))
134 remain_features = features.copy()
135 for feature in features:
136 replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature, True) or "").split())
137 remain_features -= replaces
138
139 #Check for conflict image features
140 for feature in remain_features:
141 conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature, True) or "").split())
142 temp = conflicts & remain_features
143 if temp:
144 bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN', True), feature, ' '.join(list(temp))))
145
146 d.setVar('IMAGE_FEATURES', ' '.join(list(remain_features)))
147
148 check_image_features(d)
149 initramfs_image = d.getVar('INITRAMFS_IMAGE', True) or ""
150 if initramfs_image != "":
151 d.appendVarFlag('do_build', 'depends', " %s:do_bundle_initramfs" % d.getVar('PN', True))
152 d.appendVarFlag('do_bundle_initramfs', 'depends', " %s:do_rootfs" % initramfs_image)
153}
154
155IMAGE_CLASSES += "image_types"
156inherit ${IMAGE_CLASSES}
157
158IMAGE_POSTPROCESS_COMMAND ?= ""
159MACHINE_POSTPROCESS_COMMAND ?= ""
160# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks is enabled
161ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "debug-tweaks", "ssh_allow_empty_password; ", "",d)}'
162# Enable postinst logging if debug-tweaks is enabled
163ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "debug-tweaks", "postinst_enable_logging; ", "",d)}'
164# Write manifest
165IMAGE_MANIFEST = "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest"
166ROOTFS_POSTPROCESS_COMMAND =+ "write_image_manifest ; "
167# Set default postinst log file
168POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
169# Set default target for systemd images
170SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
171ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; ", "", d)}'
172
173# some default locales
174IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
175
176LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS', True).split()))}"
177
178# Prefer image, but use the fallback files for lookups if the image ones
179# aren't yet available.
180PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
181
182do_rootfs[dirs] = "${TOPDIR}"
183do_rootfs[lockfiles] += "${IMAGE_ROOTFS}.lock"
184do_rootfs[cleandirs] += "${S}"
185
186# Must call real_do_rootfs() from inside here, rather than as a separate
187# task, so that we have a single fakeroot context for the whole process.
188do_rootfs[umask] = "022"
189
190# A hook function to support read-only-rootfs IMAGE_FEATURES
191# Currently, it only supports sysvinit system.
192read_only_rootfs_hook () {
193 # Tweak the mount option and fs_passno for rootfs in fstab
194 sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
195
196 if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
197 # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
198 if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
199 sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS
200 fi
201 # Run populate-volatile.sh at rootfs time to set up basic files
202 # and directories to support read-only rootfs.
203 if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
204 ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
205 fi
206 # If we're using openssh and the /etc/ssh directory has no pre-generated keys,
207 # we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
208 # and the keys under /var/run/ssh.
209 if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
210 if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
211 echo "SYSCONFDIR=/etc/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
212 echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
213 else
214 echo "SYSCONFDIR=/var/run/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
215 echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
216 fi
217 fi
218 fi
219
220 if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
221 # Update user database files so that services don't fail for a read-only systemd system
222 for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
223 [ -e $conffile ] || continue
224 grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
225 if [ "$type" = "u" ]; then
226 useradd_params=""
227 [ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
228 [ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
229 useradd_params="$useradd_params --system $name"
230 eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
231 elif [ "$type" = "g" ]; then
232 groupadd_params=""
233 [ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
234 groupadd_params="$groupadd_params --system $name"
235 eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
236 fi
237 done
238 done
239 fi
240}
241
242PACKAGE_EXCLUDE ??= ""
243PACKAGE_EXCLUDE[type] = "list"
244
245python rootfs_process_ignore() {
246 excl_pkgs = d.getVar("PACKAGE_EXCLUDE", True).split()
247 inst_pkgs = d.getVar("PACKAGE_INSTALL", True).split()
248 inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY", True).split()
249
250 d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
251 d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
252
253 for pkg in excl_pkgs:
254 if pkg in inst_pkgs:
255 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs))
256 inst_pkgs.remove(pkg)
257
258 if pkg in inst_attempt_pkgs:
259 bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN', True), inst_pkgs))
260 inst_attempt_pkgs.remove(pkg)
261
262 d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
263 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
264}
265do_rootfs[prefuncs] += "rootfs_process_ignore"
266
267# We have to delay the runtime_mapping_rename until just before rootfs runs
268# otherwise, the multilib renaming could step in and squash any fixups that
269# may have occurred.
270python rootfs_runtime_mapping() {
271 pn = d.getVar('PN', True)
272 runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
273 runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
274 runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
275}
276do_rootfs[prefuncs] += "rootfs_runtime_mapping"
277
278fakeroot python do_rootfs () {
279 from oe.rootfs import create_rootfs
280 from oe.image import create_image
281 from oe.manifest import create_manifest
282
283 # generate the initial manifest
284 create_manifest(d)
285
286 # generate rootfs
287 create_rootfs(d)
288
289 # generate final images
290 create_image(d)
291}
292
293insert_feed_uris () {
294
295 echo "Building feeds for [${DISTRO}].."
296
297 for line in ${FEED_URIS}
298 do
299 # strip leading and trailing spaces/tabs, then split into name and uri
300 line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`"
301 feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`"
302 feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`"
303
304 echo "Added $feed_name feed with URL $feed_uri"
305
306 # insert new feed-sources
307 echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/opkg/${feed_name}-feed.conf
308 done
309}
310
311MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|${sysconfdir}|${nonarch_base_libdir}/udev|/lib/modules/[^/]*/modules.*|"
312MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
313MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
314
315# This function is intended to disallow empty root password if 'debug-tweaks' is not in IMAGE_FEATURES.
316zap_empty_root_password () {
317 if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then
318 sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow
319 elif [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
320 sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
321 fi
322}
323
324# allow dropbear/openssh to accept root logins and logins from accounts with an empty password string
325ssh_allow_empty_password () {
326 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then
327 sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
328 sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
329 fi
330
331 if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
332 if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
333 if ! grep -q "DROPBEAR_EXTRA_ARGS=.*-B" ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear ; then
334 sed -i 's/^DROPBEAR_EXTRA_ARGS="*\([^"]*\)"*/DROPBEAR_EXTRA_ARGS="\1 -B"/' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
335 fi
336 else
337 printf '\nDROPBEAR_EXTRA_ARGS="-B"\n' >> ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
338 fi
339 fi
340
341 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then
342 sed -i 's/nullok_secure/nullok/' ${IMAGE_ROOTFS}${sysconfdir}/pam.d/*
343 fi
344}
345
346# Disable DNS lookups, the SSH_DISABLE_DNS_LOOKUP can be overridden to allow
347# distros to choose not to take this change
348SSH_DISABLE_DNS_LOOKUP ?= " ssh_disable_dns_lookup ; "
349ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}"
350ssh_disable_dns_lookup () {
351 if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then
352 sed -i -e 's:#UseDNS yes:UseDNS no:' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
353 fi
354}
355
356# Enable postinst logging if debug-tweaks is enabled
357postinst_enable_logging () {
358 mkdir -p ${IMAGE_ROOTFS}${sysconfdir}/default
359 echo "POSTINST_LOGGING=1" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
360 echo "LOGFILE=${POSTINST_LOGFILE}" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
361}
362
363# Modify systemd default target
364set_systemd_default_target () {
365 if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ]; then
366 ln -sf ${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
367 fi
368}
369
370# Turn any symbolic /sbin/init link into a file
371remove_init_link () {
372 if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
373 LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
374 rm ${IMAGE_ROOTFS}/sbin/init
375 cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
376 fi
377}
378
379make_zimage_symlink_relative () {
380 if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
381 (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
382 fi
383}
384
385python write_image_manifest () {
386 from oe.rootfs import image_list_installed_packages
387 with open(d.getVar('IMAGE_MANIFEST', True), 'w+') as image_manifest:
388 image_manifest.write(image_list_installed_packages(d, 'ver'))
389}
390
391# Make login manager(s) enable automatic login.
392# Useful for devices where we do not want to log in at all (e.g. phones)
393set_image_autologin () {
394 sed -i 's%^AUTOLOGIN=\"false"%AUTOLOGIN="true"%g' ${IMAGE_ROOTFS}/etc/sysconfig/gpelogin
395}
396
397# Can be use to create /etc/timestamp during image construction to give a reasonably
398# sane default time setting
399rootfs_update_timestamp () {
400 date -u +%4Y%2m%2d%2H%2M%2S >${IMAGE_ROOTFS}/etc/timestamp
401}
402
403# Prevent X from being started
404rootfs_no_x_startup () {
405 if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then
406 chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm
407 fi
408}
409
410rootfs_trim_schemas () {
411 for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas
412 do
413 # Need this in case no files exist
414 if [ -e $schema ]; then
415 oe-trim-schemas $schema > $schema.new
416 mv $schema.new $schema
417 fi
418 done
419}
420
421# Make any absolute links in a sysroot relative
422rootfs_sysroot_relativelinks () {
423 sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT}
424}
425
426do_fetch[noexec] = "1"
427do_unpack[noexec] = "1"
428do_patch[noexec] = "1"
429do_configure[noexec] = "1"
430do_compile[noexec] = "1"
431do_install[noexec] = "1"
432do_populate_sysroot[noexec] = "1"
433do_package[noexec] = "1"
434do_package_qa[noexec] = "1"
435do_packagedata[noexec] = "1"
436do_package_write_ipk[noexec] = "1"
437do_package_write_deb[noexec] = "1"
438do_package_write_rpm[noexec] = "1"
439
440addtask rootfs before do_build
441# Allow the kernel to be repacked with the initramfs and boot image file as a single file
442do_bundle_initramfs[depends] += "virtual/kernel:do_bundle_initramfs"
443do_bundle_initramfs[nostamp] = "1"
444do_bundle_initramfs[noexec] = "1"
445do_bundle_initramfs () {
446 :
447}
448addtask bundle_initramfs after do_rootfs
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
new file mode 100644
index 0000000000..c7da4c3ed8
--- /dev/null
+++ b/meta/classes/image_types.bbclass
@@ -0,0 +1,163 @@
1
2# The default aligment of the size of the rootfs is set to 1KiB. In case
3# you're using the SD card emulation of a QEMU system simulator you may
4# set this value to 2048 (2MiB alignment).
5IMAGE_ROOTFS_ALIGNMENT ?= "1"
6
7def imagetypes_getdepends(d):
8 def adddep(depstr, deps):
9 for i in (depstr or "").split():
10 if i not in deps:
11 deps.append(i)
12
13 deps = []
14 ctypes = d.getVar('COMPRESSIONTYPES', True).split()
15 for type in (d.getVar('IMAGE_FSTYPES', True) or "").split():
16 if type == "vmdk" or type == "live" or type == "iso" or type == "hddimg":
17 type = "ext3"
18 basetype = type
19 for ctype in ctypes:
20 if type.endswith("." + ctype):
21 basetype = type[:-len("." + ctype)]
22 adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps)
23 break
24 adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps)
25
26 depstr = ""
27 for dep in deps:
28 depstr += " " + dep + ":do_populate_sysroot"
29 return depstr
30
31
32XZ_COMPRESSION_LEVEL ?= "-e -6"
33XZ_INTEGRITY_CHECK ?= "crc32"
34XZ_THREADS ?= "-T 0"
35
36JFFS2_SUM_EXTRA_ARGS ?= ""
37IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 ${EXTRA_IMAGECMD}"
38
39IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cramfs ${EXTRA_IMAGECMD}"
40
41oe_mkext234fs () {
42 fstype=$1
43 extra_imagecmd=""
44
45 if [ $# -gt 1 ]; then
46 shift
47 extra_imagecmd=$@
48 fi
49
50 # Create a sparse image block
51 dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$fstype seek=$ROOTFS_SIZE count=0 bs=1k
52 mkfs.$fstype -F $extra_imagecmd ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$fstype -d ${IMAGE_ROOTFS}
53}
54
55IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
56IMAGE_CMD_ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
57IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
58
59IMAGE_CMD_btrfs () {
60 touch ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
61 mkfs.btrfs -b `expr ${ROOTFS_SIZE} \* 1024` ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
62}
63
64IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs ${EXTRA_IMAGECMD} -noappend"
65IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
66IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
67IMAGE_CMD_tar = "tar -cvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.tar -C ${IMAGE_ROOTFS} ."
68
69do_rootfs[cleandirs] += "${WORKDIR}/cpio_append"
70IMAGE_CMD_cpio () {
71 (cd ${IMAGE_ROOTFS} && find . | cpio -o -H newc >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio)
72 if [ ! -L ${IMAGE_ROOTFS}/init -a ! -e ${IMAGE_ROOTFS}/init ]; then
73 if [ -L ${IMAGE_ROOTFS}/sbin/init -o -e ${IMAGE_ROOTFS}/sbin/init ]; then
74 ln -sf /sbin/init ${WORKDIR}/cpio_append/init
75 else
76 touch ${WORKDIR}/cpio_append/init
77 fi
78 (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio)
79 fi
80}
81
82ELF_KERNEL ?= "${STAGING_DIR_HOST}/usr/src/kernel/${KERNEL_IMAGETYPE}"
83ELF_APPEND ?= "ramdisk_size=32768 root=/dev/ram0 rw console="
84
85IMAGE_CMD_elf () {
86 test -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf && rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf
87 mkelfImage --kernel=${ELF_KERNEL} --initrd=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio.gz --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD}
88}
89IMAGE_TYPEDEP_elf = "cpio.gz"
90
91UBI_VOLNAME ?= "${MACHINE}-rootfs"
92
93IMAGE_CMD_ubi () {
94 echo \[ubifs\] > ubinize.cfg
95 echo mode=ubi >> ubinize.cfg
96 echo image=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs >> ubinize.cfg
97 echo vol_id=0 >> ubinize.cfg
98 echo vol_type=dynamic >> ubinize.cfg
99 echo vol_name=${UBI_VOLNAME} >> ubinize.cfg
100 echo vol_flags=autoresize >> ubinize.cfg
101 mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS}
102 ubinize -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubi ${UBINIZE_ARGS} ubinize.cfg
103}
104IMAGE_TYPEDEP_ubi = "ubifs"
105
106IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS}"
107
108EXTRA_IMAGECMD = ""
109
110inherit siteinfo
111JFFS2_ENDIANNESS ?= "${@base_conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
112JFFS2_ERASEBLOCK ?= "0x40000"
113EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
114
115# Change these if you want default mkfs behavior (i.e. create minimal inode number)
116EXTRA_IMAGECMD_ext2 ?= "-i 4096"
117EXTRA_IMAGECMD_ext3 ?= "-i 4096"
118EXTRA_IMAGECMD_ext4 ?= "-i 4096"
119EXTRA_IMAGECMD_btrfs ?= ""
120EXTRA_IMAGECMD_elf ?= ""
121
122IMAGE_DEPENDS = ""
123IMAGE_DEPENDS_jffs2 = "mtd-utils-native"
124IMAGE_DEPENDS_cramfs = "util-linux-native"
125IMAGE_DEPENDS_ext2 = "e2fsprogs-native"
126IMAGE_DEPENDS_ext3 = "e2fsprogs-native"
127IMAGE_DEPENDS_ext4 = "e2fsprogs-native"
128IMAGE_DEPENDS_btrfs = "btrfs-tools-native"
129IMAGE_DEPENDS_squashfs = "squashfs-tools-native"
130IMAGE_DEPENDS_squashfs-xz = "squashfs-tools-native"
131IMAGE_DEPENDS_squashfs-lzo = "squashfs-tools-native"
132IMAGE_DEPENDS_elf = "virtual/kernel mkelfimage-native"
133IMAGE_DEPENDS_ubi = "mtd-utils-native"
134IMAGE_DEPENDS_ubifs = "mtd-utils-native"
135
136# This variable is available to request which values are suitable for IMAGE_FSTYPES
137IMAGE_TYPES = "jffs2 jffs2.sum cramfs ext2 ext2.gz ext2.bz2 ext3 ext3.gz ext2.lzma btrfs iso hddimg squashfs squashfs-xz squashfs-lzo ubi ubifs tar tar.gz tar.bz2 tar.xz tar.lz4 cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 vmdk elf"
138
139COMPRESSIONTYPES = "gz bz2 lzma xz lz4 sum"
140COMPRESS_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}.rootfs.${type}"
141COMPRESS_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.gz"
142COMPRESS_CMD_bz2 = "bzip2 -f -k ${IMAGE_NAME}.rootfs.${type}"
143COMPRESS_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.xz"
144COMPRESS_CMD_lz4 = "lz4c -9 -c ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.lz4"
145COMPRESS_CMD_sum = "sumtool -i ${IMAGE_NAME}.rootfs.${type} -o ${IMAGE_NAME}.rootfs.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
146COMPRESS_DEPENDS_lzma = "xz-native"
147COMPRESS_DEPENDS_gz = ""
148COMPRESS_DEPENDS_bz2 = ""
149COMPRESS_DEPENDS_xz = "xz-native"
150COMPRESS_DEPENDS_lz4 = "lz4-native"
151COMPRESS_DEPENDS_sum = "mtd-utils-native"
152
153RUNNABLE_IMAGE_TYPES ?= "ext2 ext3"
154RUNNABLE_MACHINE_PATTERNS ?= "qemu"
155
156DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
157
158# Use IMAGE_EXTENSION_xxx to map image type 'xxx' with real image file extension name(s) for Hob
159IMAGE_EXTENSION_live = "hddimg iso"
160
161# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
162# images that will not be built at do_rootfs time: vmdk, hddimg, iso, etc.
163IMAGE_TYPES_MASKED ?= ""
diff --git a/meta/classes/image_types_uboot.bbclass b/meta/classes/image_types_uboot.bbclass
new file mode 100644
index 0000000000..07837b566c
--- /dev/null
+++ b/meta/classes/image_types_uboot.bbclass
@@ -0,0 +1,23 @@
1inherit image_types kernel-arch
2
3oe_mkimage () {
4 mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C $2 -n ${IMAGE_NAME} \
5 -d ${DEPLOY_DIR_IMAGE}/$1 ${DEPLOY_DIR_IMAGE}/$1.u-boot
6}
7
8COMPRESSIONTYPES += "gz.u-boot bz2.u-boot lzma.u-boot u-boot"
9
10COMPRESS_DEPENDS_u-boot = "u-boot-mkimage-native"
11COMPRESS_CMD_u-boot = "oe_mkimage ${IMAGE_NAME}.rootfs.${type} none"
12
13COMPRESS_DEPENDS_gz.u-boot = "u-boot-mkimage-native"
14COMPRESS_CMD_gz.u-boot = "${COMPRESS_CMD_gz}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.gz gzip"
15
16COMPRESS_DEPENDS_bz2.u-boot = "u-boot-mkimage-native"
17COMPRESS_CMD_bz2.u-boot = "${COMPRESS_CMD_bz2}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.bz2 bzip2"
18
19COMPRESS_DEPENDS_lzma.u-boot = "u-boot-mkimage-native"
20COMPRESS_CMD_lzma.u-boot = "${COMPRESS_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma"
21
22IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot"
23
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
new file mode 100644
index 0000000000..c6dea22618
--- /dev/null
+++ b/meta/classes/insane.bbclass
@@ -0,0 +1,1153 @@
1# BB Class inspired by ebuild.sh
2#
3# This class will test files after installation for certain
4# security issues and other kind of issues.
5#
6# Checks we do:
7# -Check the ownership and permissions
8# -Check the RUNTIME path for the $TMPDIR
9# -Check if .la files wrongly point to workdir
10# -Check if .pc files wrongly point to workdir
11# -Check if packages contains .debug directories or .so files
12# where they should be in -dev or -dbg
13# -Check if config.log contains traces to broken autoconf tests
14# -Ensure that binaries in base_[bindir|sbindir|libdir] do not link
15# into exec_prefix
16# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
17# files under exec_prefix
18
19
20# unsafe-references-in-binaries requires prelink-rtld from
21# prelink-native, but we don't want this DEPENDS for -native builds
22QADEPENDS = "prelink-native"
23QADEPENDS_class-native = ""
24QADEPENDS_class-nativesdk = ""
25QA_SANE = "True"
26
27# Elect whether a given type of error is a warning or error, they may
28# have been set by other files.
29WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \
30 textrel already-stripped incompatible-license files-invalid \
31 installed-vs-shipped compile-host-path install-host-path \
32 pn-overrides infodir build-deps file-rdeps \
33 "
34ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
35 perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
36 split-strip packages-list pkgv-undefined var-undefined \
37 version-going-backwards \
38 "
39
40ALL_QA = "${WARN_QA} ${ERROR_QA}"
41
42UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot"
43
44#
45# dictionary for elf headers
46#
47# feel free to add and correct.
48#
49# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
50def package_qa_get_machine_dict():
51 return {
52 "darwin9" : {
53 "arm" : (40, 0, 0, True, 32),
54 },
55 "linux" : {
56 "aarch64" : (183, 0, 0, True, 64),
57 "aarch64_be" :(183, 0, 0, False, 64),
58 "arm" : (40, 97, 0, True, 32),
59 "armeb": (40, 97, 0, False, 32),
60 "powerpc": (20, 0, 0, False, 32),
61 "powerpc64": (21, 0, 0, False, 64),
62 "i386": ( 3, 0, 0, True, 32),
63 "i486": ( 3, 0, 0, True, 32),
64 "i586": ( 3, 0, 0, True, 32),
65 "i686": ( 3, 0, 0, True, 32),
66 "x86_64": (62, 0, 0, True, 64),
67 "ia64": (50, 0, 0, True, 64),
68 "alpha": (36902, 0, 0, True, 64),
69 "hppa": (15, 3, 0, False, 32),
70 "m68k": ( 4, 0, 0, False, 32),
71 "mips": ( 8, 0, 0, False, 32),
72 "mipsel": ( 8, 0, 0, True, 32),
73 "mips64": ( 8, 0, 0, False, 64),
74 "mips64el": ( 8, 0, 0, True, 64),
75 "s390": (22, 0, 0, False, 32),
76 "sh4": (42, 0, 0, True, 32),
77 "sparc": ( 2, 0, 0, False, 32),
78 "microblaze": (189, 0, 0, False, 32),
79 "microblazeel":(189, 0, 0, True, 32),
80 },
81 "linux-uclibc" : {
82 "arm" : ( 40, 97, 0, True, 32),
83 "armeb": ( 40, 97, 0, False, 32),
84 "powerpc": ( 20, 0, 0, False, 32),
85 "i386": ( 3, 0, 0, True, 32),
86 "i486": ( 3, 0, 0, True, 32),
87 "i586": ( 3, 0, 0, True, 32),
88 "i686": ( 3, 0, 0, True, 32),
89 "x86_64": ( 62, 0, 0, True, 64),
90 "mips": ( 8, 0, 0, False, 32),
91 "mipsel": ( 8, 0, 0, True, 32),
92 "mips64": ( 8, 0, 0, False, 64),
93 "mips64el": ( 8, 0, 0, True, 64),
94 "avr32": (6317, 0, 0, False, 32),
95 "sh4": (42, 0, 0, True, 32),
96
97 },
98 "linux-musl" : {
99 "arm" : ( 40, 97, 0, True, 32),
100 "armeb": ( 40, 97, 0, False, 32),
101 "powerpc": ( 20, 0, 0, False, 32),
102 "i386": ( 3, 0, 0, True, 32),
103 "i486": ( 3, 0, 0, True, 32),
104 "i586": ( 3, 0, 0, True, 32),
105 "i686": ( 3, 0, 0, True, 32),
106 "x86_64": ( 62, 0, 0, True, 64),
107 "mips": ( 8, 0, 0, False, 32),
108 "mipsel": ( 8, 0, 0, True, 32),
109 "mips64": ( 8, 0, 0, False, 64),
110 "mips64el": ( 8, 0, 0, True, 64),
111 },
112 "uclinux-uclibc" : {
113 "bfin": ( 106, 0, 0, True, 32),
114 },
115 "linux-gnueabi" : {
116 "arm" : (40, 0, 0, True, 32),
117 "armeb" : (40, 0, 0, False, 32),
118 },
119 "linux-musleabi" : {
120 "arm" : (40, 0, 0, True, 32),
121 "armeb" : (40, 0, 0, False, 32),
122 },
123 "linux-uclibceabi" : {
124 "arm" : (40, 0, 0, True, 32),
125 "armeb" : (40, 0, 0, False, 32),
126 },
127 "linux-gnuspe" : {
128 "powerpc": (20, 0, 0, False, 32),
129 },
130 "linux-muslspe" : {
131 "powerpc": (20, 0, 0, False, 32),
132 },
133 "linux-uclibcspe" : {
134 "powerpc": (20, 0, 0, False, 32),
135 },
136 "linux-gnu" : {
137 "powerpc": (20, 0, 0, False, 32),
138 "sh4": (42, 0, 0, True, 32),
139 },
140 "linux-gnux32" : {
141 "x86_64": (62, 0, 0, True, 32),
142 },
143 "linux-gnun32" : {
144 "mips64": ( 8, 0, 0, False, 32),
145 "mips64el": ( 8, 0, 0, True, 32),
146 },
147 }
148
149
150def package_qa_clean_path(path,d):
151 """ Remove the common prefix from the path. In this case it is the TMPDIR"""
152 return path.replace(d.getVar('TMPDIR',True),"")
153
154def package_qa_write_error(type, error, d):
155 logfile = d.getVar('QA_LOGFILE', True)
156 if logfile:
157 p = d.getVar('P', True)
158 f = file( logfile, "a+")
159 print >> f, "%s: %s [%s]" % (p, error, type)
160 f.close()
161
162def package_qa_handle_error(error_class, error_msg, d):
163 package_qa_write_error(error_class, error_msg, d)
164 if error_class in (d.getVar("ERROR_QA", True) or "").split():
165 bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
166 d.setVar("QA_SANE", False)
167 return False
168 elif error_class in (d.getVar("WARN_QA", True) or "").split():
169 bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
170 else:
171 bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
172 return True
173
174QAPATHTEST[libexec] = "package_qa_check_libexec"
175def package_qa_check_libexec(path,name, d, elf, messages):
176
177 # Skip the case where the default is explicitly /usr/libexec
178 libexec = d.getVar('libexecdir', True)
179 if libexec == "/usr/libexec":
180 return True
181
182 if 'libexec' in path.split(os.path.sep):
183 messages["libexec"] = "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec)
184 return False
185
186 return True
187
188QAPATHTEST[rpaths] = "package_qa_check_rpath"
189def package_qa_check_rpath(file,name, d, elf, messages):
190 """
191 Check for dangerous RPATHs
192 """
193 if not elf:
194 return
195
196 if os.path.islink(file):
197 return
198
199 bad_dirs = [d.getVar('BASE_WORKDIR', True), d.getVar('STAGING_DIR_TARGET', True)]
200
201 phdrs = elf.run_objdump("-p", d)
202
203 import re
204 rpath_re = re.compile("\s+RPATH\s+(.*)")
205 for line in phdrs.split("\n"):
206 m = rpath_re.match(line)
207 if m:
208 rpath = m.group(1)
209 for dir in bad_dirs:
210 if dir in rpath:
211 messages["rpaths"] = "package %s contains bad RPATH %s in file %s" % (name, rpath, file)
212
213QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
214def package_qa_check_useless_rpaths(file, name, d, elf, messages):
215 """
216 Check for RPATHs that are useless but not dangerous
217 """
218 def rpath_eq(a, b):
219 return os.path.normpath(a) == os.path.normpath(b)
220
221 if not elf:
222 return
223
224 if os.path.islink(file):
225 return
226
227 libdir = d.getVar("libdir", True)
228 base_libdir = d.getVar("base_libdir", True)
229
230 phdrs = elf.run_objdump("-p", d)
231
232 import re
233 rpath_re = re.compile("\s+RPATH\s+(.*)")
234 for line in phdrs.split("\n"):
235 m = rpath_re.match(line)
236 if m:
237 rpath = m.group(1)
238 if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
239 # The dynamic linker searches both these places anyway. There is no point in
240 # looking there again.
241 messages["useless-rpaths"] = "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath)
242
243QAPATHTEST[dev-so] = "package_qa_check_dev"
244def package_qa_check_dev(path, name, d, elf, messages):
245 """
246 Check for ".so" library symlinks in non-dev packages
247 """
248
249 if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
250 messages["dev-so"] = "non -dev/-dbg/-nativesdk package contains symlink .so: %s path '%s'" % \
251 (name, package_qa_clean_path(path,d))
252
253QAPATHTEST[staticdev] = "package_qa_check_staticdev"
254def package_qa_check_staticdev(path, name, d, elf, messages):
255 """
256 Check for ".a" library in non-staticdev packages
257 There are a number of exceptions to this rule, -pic packages can contain
258 static libraries, the _nonshared.a belong with their -dev packages and
259 libgcc.a, libgcov.a will be skipped in their packages
260 """
261
262 if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a"):
263 messages["staticdev"] = "non -staticdev package contains static .a library: %s path '%s'" % \
264 (name, package_qa_clean_path(path,d))
265
266def package_qa_check_libdir(d):
267 """
268 Check for wrong library installation paths. For instance, catch
269 recipes installing /lib/bar.so when ${base_libdir}="lib32" or
270 installing in /usr/lib64 when ${libdir}="/usr/lib"
271 """
272 import re
273
274 pkgdest = d.getVar('PKGDEST', True)
275 base_libdir = d.getVar("base_libdir",True) + os.sep
276 libdir = d.getVar("libdir", True) + os.sep
277 exec_prefix = d.getVar("exec_prefix", True) + os.sep
278
279 messages = []
280
281 lib_re = re.compile("^/lib.+\.so(\..+)?$")
282 exec_re = re.compile("^%s.*/lib.+\.so(\..+)?$" % exec_prefix)
283
284 for root, dirs, files in os.walk(pkgdest):
285 if root == pkgdest:
286 # Skip subdirectories for any packages with libdir in INSANE_SKIP
287 skippackages = []
288 for package in dirs:
289 if 'libdir' in (d.getVar('INSANE_SKIP_' + package, True) or "").split():
290 bb.note("Package %s skipping libdir QA test" % (package))
291 skippackages.append(package)
292 for package in skippackages:
293 dirs.remove(package)
294 for file in files:
295 full_path = os.path.join(root, file)
296 rel_path = os.path.relpath(full_path, pkgdest)
297 if os.sep in rel_path:
298 package, rel_path = rel_path.split(os.sep, 1)
299 rel_path = os.sep + rel_path
300 if lib_re.match(rel_path):
301 if base_libdir not in rel_path:
302 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
303 if exec_re.match(rel_path):
304 if libdir not in rel_path:
305 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
306
307 if messages:
308 package_qa_handle_error("libdir", "\n".join(messages), d)
309
310QAPATHTEST[debug-files] = "package_qa_check_dbg"
311def package_qa_check_dbg(path, name, d, elf, messages):
312 """
313 Check for ".debug" files or directories outside of the dbg package
314 """
315
316 if not "-dbg" in name and not "-ptest" in name:
317 if '.debug' in path.split(os.path.sep):
318 messages["debug-files"] = "non debug package contains .debug directory: %s path %s" % \
319 (name, package_qa_clean_path(path,d))
320
321QAPATHTEST[perms] = "package_qa_check_perm"
322def package_qa_check_perm(path,name,d, elf, messages):
323 """
324 Check the permission of files
325 """
326 return
327
328QAPATHTEST[unsafe-references-in-binaries] = "package_qa_check_unsafe_references_in_binaries"
329def package_qa_check_unsafe_references_in_binaries(path, name, d, elf, messages):
330 """
331 Ensure binaries in base_[bindir|sbindir|libdir] do not link to files under exec_prefix
332 """
333 if unsafe_references_skippable(path, name, d):
334 return
335
336 if elf:
337 import subprocess as sub
338 pn = d.getVar('PN', True)
339
340 exec_prefix = d.getVar('exec_prefix', True)
341 sysroot_path = d.getVar('STAGING_DIR_TARGET', True)
342 sysroot_path_usr = sysroot_path + exec_prefix
343
344 try:
345 ldd_output = bb.process.Popen(["prelink-rtld", "--root", sysroot_path, path], stdout=sub.PIPE).stdout.read()
346 except bb.process.CmdError:
347 error_msg = pn + ": prelink-rtld aborted when processing %s" % path
348 package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
349 return False
350
351 if sysroot_path_usr in ldd_output:
352 ldd_output = ldd_output.replace(sysroot_path, "")
353
354 pkgdest = d.getVar('PKGDEST', True)
355 packages = d.getVar('PACKAGES', True)
356
357 for package in packages.split():
358 short_path = path.replace('%s/%s' % (pkgdest, package), "", 1)
359 if (short_path != path):
360 break
361
362 base_err = pn + ": %s, installed in the base_prefix, requires a shared library under exec_prefix (%s)" % (short_path, exec_prefix)
363 for line in ldd_output.split('\n'):
364 if exec_prefix in line:
365 error_msg = "%s: %s" % (base_err, line.strip())
366 package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
367
368 return False
369
370QAPATHTEST[unsafe-references-in-scripts] = "package_qa_check_unsafe_references_in_scripts"
371def package_qa_check_unsafe_references_in_scripts(path, name, d, elf, messages):
372 """
373 Warn if scripts in base_[bindir|sbindir|libdir] reference files under exec_prefix
374 """
375 if unsafe_references_skippable(path, name, d):
376 return
377
378 if not elf:
379 import stat
380 import subprocess
381 pn = d.getVar('PN', True)
382
383 # Ensure we're checking an executable script
384 statinfo = os.stat(path)
385 if bool(statinfo.st_mode & stat.S_IXUSR):
386 # grep shell scripts for possible references to /exec_prefix/
387 exec_prefix = d.getVar('exec_prefix', True)
388 statement = "grep -e '%s/' %s > /dev/null" % (exec_prefix, path)
389 if subprocess.call(statement, shell=True) == 0:
390 error_msg = pn + ": Found a reference to %s/ in %s" % (exec_prefix, path)
391 package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
392 error_msg = "Shell scripts in base_bindir and base_sbindir should not reference anything in exec_prefix"
393 package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
394
395def unsafe_references_skippable(path, name, d):
396 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d):
397 return True
398
399 if "-dbg" in name or "-dev" in name:
400 return True
401
402 # Other package names to skip:
403 if name.startswith("kernel-module-"):
404 return True
405
406 # Skip symlinks
407 if os.path.islink(path):
408 return True
409
410 # Skip unusual rootfs layouts which make these tests irrelevant
411 exec_prefix = d.getVar('exec_prefix', True)
412 if exec_prefix == "":
413 return True
414
415 pkgdest = d.getVar('PKGDEST', True)
416 pkgdest = pkgdest + "/" + name
417 pkgdest = os.path.abspath(pkgdest)
418 base_bindir = pkgdest + d.getVar('base_bindir', True)
419 base_sbindir = pkgdest + d.getVar('base_sbindir', True)
420 base_libdir = pkgdest + d.getVar('base_libdir', True)
421 bindir = pkgdest + d.getVar('bindir', True)
422 sbindir = pkgdest + d.getVar('sbindir', True)
423 libdir = pkgdest + d.getVar('libdir', True)
424
425 if base_bindir == bindir and base_sbindir == sbindir and base_libdir == libdir:
426 return True
427
428 # Skip files not in base_[bindir|sbindir|libdir]
429 path = os.path.abspath(path)
430 if not (base_bindir in path or base_sbindir in path or base_libdir in path):
431 return True
432
433 return False
434
435QAPATHTEST[arch] = "package_qa_check_arch"
436def package_qa_check_arch(path,name,d, elf, messages):
437 """
438 Check if archs are compatible
439 """
440 if not elf:
441 return
442
443 target_os = d.getVar('TARGET_OS', True)
444 target_arch = d.getVar('TARGET_ARCH', True)
445 provides = d.getVar('PROVIDES', True)
446 bpn = d.getVar('BPN', True)
447
448 # FIXME: Cross package confuse this check, so just skip them
449 for s in ['cross', 'nativesdk', 'cross-canadian']:
450 if bb.data.inherits_class(s, d):
451 return
452
453 # avoid following links to /usr/bin (e.g. on udev builds)
454 # we will check the files pointed to anyway...
455 if os.path.islink(path):
456 return
457
458 #if this will throw an exception, then fix the dict above
459 (machine, osabi, abiversion, littleendian, bits) \
460 = package_qa_get_machine_dict()[target_os][target_arch]
461
462 # Check the architecture and endiannes of the binary
463 if not ((machine == elf.machine()) or \
464 ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))):
465 messages["arch"] = "Architecture did not match (%d to %d) on %s" % \
466 (machine, elf.machine(), package_qa_clean_path(path,d))
467 elif not ((bits == elf.abiSize()) or \
468 ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))):
469 messages["arch"] = "Bit size did not match (%d to %d) %s on %s" % \
470 (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d))
471 elif not littleendian == elf.isLittleEndian():
472 messages["arch"] = "Endiannes did not match (%d to %d) on %s" % \
473 (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d))
474
475QAPATHTEST[desktop] = "package_qa_check_desktop"
476def package_qa_check_desktop(path, name, d, elf, messages):
477 """
478 Run all desktop files through desktop-file-validate.
479 """
480 if path.endswith(".desktop"):
481 desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE',True),'desktop-file-validate')
482 output = os.popen("%s %s" % (desktop_file_validate, path))
483 # This only produces output on errors
484 for l in output:
485 messages["desktop"] = "Desktop file issue: " + l.strip()
486
487QAPATHTEST[textrel] = "package_qa_textrel"
488def package_qa_textrel(path, name, d, elf, messages):
489 """
490 Check if the binary contains relocations in .text
491 """
492
493 if not elf:
494 return
495
496 if os.path.islink(path):
497 return
498
499 phdrs = elf.run_objdump("-p", d)
500 sane = True
501
502 import re
503 textrel_re = re.compile("\s+TEXTREL\s+")
504 for line in phdrs.split("\n"):
505 if textrel_re.match(line):
506 sane = False
507
508 if not sane:
509 messages["textrel"] = "ELF binary '%s' has relocations in .text" % path
510
511QAPATHTEST[ldflags] = "package_qa_hash_style"
512def package_qa_hash_style(path, name, d, elf, messages):
513 """
514 Check if the binary has the right hash style...
515 """
516
517 if not elf:
518 return
519
520 if os.path.islink(path):
521 return
522
523 gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS', True)
524 if not gnu_hash:
525 gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS', True)
526 if not gnu_hash:
527 return
528
529 sane = False
530 has_syms = False
531
532 phdrs = elf.run_objdump("-p", d)
533
534 # If this binary has symbols, we expect it to have GNU_HASH too.
535 for line in phdrs.split("\n"):
536 if "SYMTAB" in line:
537 has_syms = True
538 if "GNU_HASH" in line:
539 sane = True
540 if "[mips32]" in line or "[mips64]" in line:
541 sane = True
542
543 if has_syms and not sane:
544 messages["ldflags"] = "No GNU_HASH in the elf binary: '%s'" % path
545
546
547QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
548def package_qa_check_buildpaths(path, name, d, elf, messages):
549 """
550 Check for build paths inside target files and error if not found in the whitelist
551 """
552 # Ignore .debug files, not interesting
553 if path.find(".debug") != -1:
554 return
555
556 # Ignore symlinks
557 if os.path.islink(path):
558 return
559
560 tmpdir = d.getVar('TMPDIR', True)
561 with open(path) as f:
562 file_content = f.read()
563 if tmpdir in file_content:
564 messages["buildpaths"] = "File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d)
565
566
567QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
568def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
569 """
570 Check that all packages containing Xorg drivers have ABI dependencies
571 """
572
573 # Skip dev, dbg or nativesdk packages
574 if name.endswith("-dev") or name.endswith("-dbg") or name.startswith("nativesdk-"):
575 return
576
577 driverdir = d.expand("${libdir}/xorg/modules/drivers/")
578 if driverdir in path and path.endswith(".so"):
579 mlprefix = d.getVar('MLPREFIX', True) or ''
580 for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name, True) or ""):
581 if rdep.startswith("%sxorg-abi-" % mlprefix):
582 return
583 messages["xorg-driver-abi"] = "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path))
584
585QAPATHTEST[infodir] = "package_qa_check_infodir"
586def package_qa_check_infodir(path, name, d, elf, messages):
587 """
588 Check that /usr/share/info/dir isn't shipped in a particular package
589 """
590 infodir = d.expand("${infodir}/dir")
591
592 if infodir in path:
593 messages["infodir"] = "The /usr/share/info/dir file is not meant to be shipped in a particular package."
594
595QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
596def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
597 """
598 Check that the package doesn't contain any absolute symlinks to the sysroot.
599 """
600 if os.path.islink(path):
601 target = os.readlink(path)
602 if os.path.isabs(target):
603 tmpdir = d.getVar('TMPDIR', True)
604 if target.startswith(tmpdir):
605 trimmed = path.replace(os.path.join (d.getVar("PKGDEST", True), name), "")
606 messages["symlink-to-sysroot"] = "Symlink %s in %s points to TMPDIR" % (trimmed, name)
607
608def package_qa_check_license(workdir, d):
609 """
610 Check for changes in the license files
611 """
612 import tempfile
613 sane = True
614
615 lic_files = d.getVar('LIC_FILES_CHKSUM', True)
616 lic = d.getVar('LICENSE', True)
617 pn = d.getVar('PN', True)
618
619 if lic == "CLOSED":
620 return True
621
622 if not lic_files:
623 bb.error(pn + ": Recipe file does not have license file information (LIC_FILES_CHKSUM)")
624 return False
625
626 srcdir = d.getVar('S', True)
627
628 for url in lic_files.split():
629 try:
630 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
631 except bb.fetch.MalformedUrl:
632 raise bb.build.FuncFailed( pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
633 srclicfile = os.path.join(srcdir, path)
634 if not os.path.isfile(srclicfile):
635 raise bb.build.FuncFailed( pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile)
636
637 recipemd5 = parm.get('md5', '')
638 beginline, endline = 0, 0
639 if 'beginline' in parm:
640 beginline = int(parm['beginline'])
641 if 'endline' in parm:
642 endline = int(parm['endline'])
643
644 if (not beginline) and (not endline):
645 md5chksum = bb.utils.md5_file(srclicfile)
646 else:
647 fi = open(srclicfile, 'rb')
648 fo = tempfile.NamedTemporaryFile(mode='wb', prefix='poky.', suffix='.tmp', delete=False)
649 tmplicfile = fo.name;
650 lineno = 0
651 linesout = 0
652 for line in fi:
653 lineno += 1
654 if (lineno >= beginline):
655 if ((lineno <= endline) or not endline):
656 fo.write(line)
657 linesout += 1
658 else:
659 break
660 fo.flush()
661 fo.close()
662 fi.close()
663 md5chksum = bb.utils.md5_file(tmplicfile)
664 os.unlink(tmplicfile)
665
666 if recipemd5 == md5chksum:
667 bb.note (pn + ": md5 checksum matched for ", url)
668 else:
669 if recipemd5:
670 bb.error(pn + ": md5 data is not matching for ", url)
671 bb.error(pn + ": The new md5 checksum is ", md5chksum)
672 if beginline:
673 if endline:
674 srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
675 else:
676 srcfiledesc = "%s (beginning on line %d)" % (srclicfile, beginline)
677 elif endline:
678 srcfiledesc = "%s (ending on line %d)" % (srclicfile, endline)
679 else:
680 srcfiledesc = srclicfile
681 bb.error(pn + ": Check if the license information has changed in %s to verify that the LICENSE value \"%s\" remains valid" % (srcfiledesc, lic))
682 else:
683 bb.error(pn + ": md5 checksum is not specified for ", url)
684 bb.error(pn + ": The md5 checksum is ", md5chksum)
685 sane = False
686
687 return sane
688
689def package_qa_check_staged(path,d):
690 """
691 Check staged la and pc files for sanity
692 -e.g. installed being false
693
694 As this is run after every stage we should be able
695 to find the one responsible for the errors easily even
696 if we look at every .pc and .la file
697 """
698
699 sane = True
700 tmpdir = d.getVar('TMPDIR', True)
701 workdir = os.path.join(tmpdir, "work")
702
703 installed = "installed=yes"
704 if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
705 pkgconfigcheck = workdir
706 else:
707 pkgconfigcheck = tmpdir
708
709 # find all .la and .pc files
710 # read the content
711 # and check for stuff that looks wrong
712 for root, dirs, files in os.walk(path):
713 for file in files:
714 path = os.path.join(root,file)
715 if file.endswith(".la"):
716 with open(path) as f:
717 file_content = f.read()
718 if workdir in file_content:
719 error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
720 sane = package_qa_handle_error("la", error_msg, d)
721 elif file.endswith(".pc"):
722 with open(path) as f:
723 file_content = f.read()
724 if pkgconfigcheck in file_content:
725 error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
726 sane = package_qa_handle_error("pkgconfig", error_msg, d)
727
728 return sane
729
730# Walk over all files in a directory and call func
731def package_qa_walk(path, warnfuncs, errorfuncs, skip, package, d):
732 import oe.qa
733
734 #if this will throw an exception, then fix the dict above
735 target_os = d.getVar('TARGET_OS', True)
736 target_arch = d.getVar('TARGET_ARCH', True)
737
738 warnings = {}
739 errors = {}
740 for path in pkgfiles[package]:
741 elf = oe.qa.ELFFile(path)
742 try:
743 elf.open()
744 except:
745 elf = None
746 for func in warnfuncs:
747 func(path, package, d, elf, warnings)
748 for func in errorfuncs:
749 func(path, package, d, elf, errors)
750
751 for w in warnings:
752 package_qa_handle_error(w, warnings[w], d)
753 for e in errors:
754 package_qa_handle_error(e, errors[e], d)
755
756 return len(errors) == 0
757
758def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
759 # Don't do this check for kernel/module recipes, there aren't too many debug/development
760 # packages and you can get false positives e.g. on kernel-module-lirc-dev
761 if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
762 return True
763
764 sane = True
765 if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg:
766 localdata = bb.data.createCopy(d)
767 localdata.setVar('OVERRIDES', pkg)
768 bb.data.update_data(localdata)
769
770 # Now check the RDEPENDS
771 rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS', True) or "")
772
773 # Now do the sanity check!!!
774 for rdepend in rdepends:
775 if "-dbg" in rdepend and "debug-deps" not in skip:
776 error_msg = "%s rdepends on %s" % (pkg,rdepend)
777 sane = package_qa_handle_error("debug-deps", error_msg, d)
778 if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
779 error_msg = "%s rdepends on %s" % (pkg, rdepend)
780 sane = package_qa_handle_error("dev-deps", error_msg, d)
781 if rdepend not in packages:
782 rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
783 if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
784 continue
785 if not rdep_data or not 'PN' in rdep_data:
786 pkgdata_dir = d.getVar("PKGDATA_DIR", True)
787 try:
788 possibles = os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdepend))
789 except OSError:
790 possibles = []
791 for p in possibles:
792 rdep_data = oe.packagedata.read_subpkgdata(p, d)
793 if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
794 break
795 if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
796 continue
797 error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
798 sane = package_qa_handle_error("build-deps", error_msg, d)
799
800 if "file-rdeps" not in skip:
801 ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
802 if bb.data.inherits_class('nativesdk', d):
803 ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl'])
804 # For Saving the FILERDEPENDS
805 filerdepends = set()
806 rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
807 for key in rdep_data:
808 if key.startswith("FILERDEPENDS_"):
809 for subkey in rdep_data[key].split():
810 filerdepends.add(subkey)
811 filerdepends -= ignored_file_rdeps
812
813 if filerdepends:
814 next = rdepends
815 done = rdepends[:]
816 # Find all the rdepends on the dependency chain
817 while next:
818 new = []
819 for rdep in next:
820 rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
821 sub_rdeps = rdep_data.get("RDEPENDS_" + rdep)
822 if not sub_rdeps:
823 continue
824 for sub_rdep in sub_rdeps.split():
825 if sub_rdep in done:
826 continue
827 if not sub_rdep.startswith('(') and \
828 oe.packagedata.has_subpkgdata(sub_rdep, d):
829 # It's a new rdep
830 done.append(sub_rdep)
831 new.append(sub_rdep)
832 next = new
833
834 # Add the rprovides of itself
835 if pkg not in done:
836 done.insert(0, pkg)
837
838 # The python is not a package, but python-core provides it, so
839 # skip checking /usr/bin/python if python is in the rdeps, in
840 # case there is a RDEPENDS_pkg = "python" in the recipe.
841 for py in [ d.getVar('MLPREFIX', True) + "python", "python" ]:
842 if py in done:
843 filerdepends.discard("/usr/bin/python")
844 done.remove(py)
845 for rdep in done:
846 # For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO
847 rdep_rprovides = set()
848 rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
849 for key in rdep_data:
850 if key.startswith("FILERPROVIDES_") or key.startswith("RPROVIDES_"):
851 for subkey in rdep_data[key].split():
852 rdep_rprovides.add(subkey)
853 # Add the files list to the rprovides
854 if key == "FILES_INFO":
855 # Use eval() to make it as a dict
856 for subkey in eval(rdep_data[key]):
857 rdep_rprovides.add(subkey)
858 filerdepends -= rdep_rprovides
859 if not filerdepends:
860 # Break if all the file rdepends are met
861 break
862 else:
863 # Clear it for the next loop
864 rdep_rprovides.clear()
865 if filerdepends:
866 error_msg = "%s requires %s, but no providers in its RDEPENDS" % \
867 (pkg, ', '.join(str(e) for e in filerdepends))
868 sane = package_qa_handle_error("file-rdeps", error_msg, d)
869
870 return sane
871
872def package_qa_check_deps(pkg, pkgdest, skip, d):
873 sane = True
874
875 localdata = bb.data.createCopy(d)
876 localdata.setVar('OVERRIDES', pkg)
877 bb.data.update_data(localdata)
878
879 def check_valid_deps(var):
880 sane = True
881 try:
882 rvar = bb.utils.explode_dep_versions2(localdata.getVar(var, True) or "")
883 except ValueError as e:
884 bb.fatal("%s_%s: %s" % (var, pkg, e))
885 for dep in rvar:
886 for v in rvar[dep]:
887 if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
888 error_msg = "%s_%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
889 sane = package_qa_handle_error("dep-cmp", error_msg, d)
890 return sane
891
892 sane = True
893 if not check_valid_deps('RDEPENDS'):
894 sane = False
895 if not check_valid_deps('RRECOMMENDS'):
896 sane = False
897 if not check_valid_deps('RSUGGESTS'):
898 sane = False
899 if not check_valid_deps('RPROVIDES'):
900 sane = False
901 if not check_valid_deps('RREPLACES'):
902 sane = False
903 if not check_valid_deps('RCONFLICTS'):
904 sane = False
905
906 return sane
907
908# The PACKAGE FUNC to scan each package
909python do_package_qa () {
910 import subprocess
911 import oe.packagedata
912
913 bb.note("DO PACKAGE QA")
914
915 bb.build.exec_func("read_subpackage_metadata", d)
916
917 logdir = d.getVar('T', True)
918 pkg = d.getVar('PN', True)
919
920 # Check the compile log for host contamination
921 compilelog = os.path.join(logdir,"log.do_compile")
922
923 if os.path.exists(compilelog):
924 statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
925 if subprocess.call(statement, shell=True) == 0:
926 msg = "%s: The compile log indicates that host include and/or library paths were used.\n \
927 Please check the log '%s' for more information." % (pkg, compilelog)
928 package_qa_handle_error("compile-host-path", msg, d)
929
930 # Check the install log for host contamination
931 installlog = os.path.join(logdir,"log.do_install")
932
933 if os.path.exists(installlog):
934 statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
935 if subprocess.call(statement, shell=True) == 0:
936 msg = "%s: The install log indicates that host include and/or library paths were used.\n \
937 Please check the log '%s' for more information." % (pkg, installlog)
938 package_qa_handle_error("install-host-path", msg, d)
939
940 # Scan the packages...
941 pkgdest = d.getVar('PKGDEST', True)
942 packages = d.getVar('PACKAGES', True)
943
944 cpath = oe.cachedpath.CachedPath()
945 global pkgfiles
946 pkgfiles = {}
947 for pkg in (packages or "").split():
948 pkgfiles[pkg] = []
949 for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
950 for file in files:
951 pkgfiles[pkg].append(walkroot + os.sep + file)
952
953 # no packages should be scanned
954 if not packages:
955 return
956
957 testmatrix = d.getVarFlags("QAPATHTEST")
958 import re
959 # The package name matches the [a-z0-9.+-]+ regular expression
960 pkgname_pattern = re.compile("^[a-z0-9.+-]+$")
961
962 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
963 taskdeps = set()
964 for dep in taskdepdata:
965 taskdeps.add(taskdepdata[dep][0])
966
967 g = globals()
968 walk_sane = True
969 rdepends_sane = True
970 deps_sane = True
971 for package in packages.split():
972 skip = (d.getVar('INSANE_SKIP_' + package, True) or "").split()
973 if skip:
974 bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
975 warnchecks = []
976 for w in (d.getVar("WARN_QA", True) or "").split():
977 if w in skip:
978 continue
979 if w in testmatrix and testmatrix[w] in g:
980 warnchecks.append(g[testmatrix[w]])
981 errorchecks = []
982 for e in (d.getVar("ERROR_QA", True) or "").split():
983 if e in skip:
984 continue
985 if e in testmatrix and testmatrix[e] in g:
986 errorchecks.append(g[testmatrix[e]])
987
988 bb.note("Checking Package: %s" % package)
989 # Check package name
990 if not pkgname_pattern.match(package):
991 package_qa_handle_error("pkgname",
992 "%s doesn't match the [a-z0-9.+-]+ regex\n" % package, d)
993
994 path = "%s/%s" % (pkgdest, package)
995 if not package_qa_walk(path, warnchecks, errorchecks, skip, package, d):
996 walk_sane = False
997 if not package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d):
998 rdepends_sane = False
999 if not package_qa_check_deps(package, pkgdest, skip, d):
1000 deps_sane = False
1001
1002
1003 if 'libdir' in d.getVar("ALL_QA", True).split():
1004 package_qa_check_libdir(d)
1005
1006 qa_sane = d.getVar("QA_SANE", True)
1007 if not walk_sane or not rdepends_sane or not deps_sane or not qa_sane:
1008 bb.fatal("QA run found fatal errors. Please consider fixing them.")
1009 bb.note("DONE with PACKAGE QA")
1010}
1011
1012do_package_qa[rdeptask] = "do_packagedata"
1013addtask do_package_qa after do_packagedata do_package before do_build
1014
1015SSTATETASKS += "do_package_qa"
1016do_package_qa[sstate-inputdirs] = ""
1017do_package_qa[sstate-outputdirs] = ""
1018python do_package_qa_setscene () {
1019 sstate_setscene(d)
1020}
1021addtask do_package_qa_setscene
1022
1023python do_qa_staging() {
1024 bb.note("QA checking staging")
1025
1026 if not package_qa_check_staged(d.expand('${SYSROOT_DESTDIR}${STAGING_LIBDIR}'), d):
1027 bb.fatal("QA staging was broken by the package built above")
1028}
1029
1030python do_qa_configure() {
1031 import subprocess
1032
1033 ###########################################################################
1034 # Check config.log for cross compile issues
1035 ###########################################################################
1036
1037 configs = []
1038 workdir = d.getVar('WORKDIR', True)
1039 bb.note("Checking autotools environment for common misconfiguration")
1040 for root, dirs, files in os.walk(workdir):
1041 statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % \
1042 os.path.join(root,"config.log")
1043 if "config.log" in files:
1044 if subprocess.call(statement, shell=True) == 0:
1045 bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
1046Rerun configure task after fixing this. The path was '%s'""" % root)
1047
1048 if "configure.ac" in files:
1049 configs.append(os.path.join(root,"configure.ac"))
1050 if "configure.in" in files:
1051 configs.append(os.path.join(root, "configure.in"))
1052
1053 ###########################################################################
1054 # Check gettext configuration and dependencies are correct
1055 ###########################################################################
1056
1057 cnf = d.getVar('EXTRA_OECONF', True) or ""
1058 if "gettext" not in d.getVar('P', True) and "gcc-runtime" not in d.getVar('P', True) and "--disable-nls" not in cnf:
1059 ml = d.getVar("MLPREFIX", True) or ""
1060 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk', d):
1061 gt = "gettext-native"
1062 elif bb.data.inherits_class('cross-canadian', d):
1063 gt = "nativesdk-gettext"
1064 else:
1065 gt = "virtual/" + ml + "gettext"
1066 deps = bb.utils.explode_deps(d.getVar('DEPENDS', True) or "")
1067 if gt not in deps:
1068 for config in configs:
1069 gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
1070 if subprocess.call(gnu, shell=True) == 0:
1071 bb.fatal("""%s required but not in DEPENDS for file %s.
1072Missing inherit gettext?""" % (gt, config))
1073
1074 ###########################################################################
1075 # Check license variables
1076 ###########################################################################
1077
1078 if not package_qa_check_license(workdir, d):
1079 bb.fatal("Licensing Error: LIC_FILES_CHKSUM does not match, please fix")
1080
1081 ###########################################################################
1082 # Check unrecognised configure options (with a white list)
1083 ###########################################################################
1084 if bb.data.inherits_class("autotools", d):
1085 bb.note("Checking configure output for unrecognised options")
1086 try:
1087 flag = "WARNING: unrecognized options:"
1088 log = os.path.join(d.getVar('B', True), 'config.log')
1089 output = subprocess.check_output(['grep', '-F', flag, log]).replace(', ', ' ')
1090 options = set()
1091 for line in output.splitlines():
1092 options |= set(line.partition(flag)[2].split())
1093 whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST", True).split())
1094 options -= whitelist
1095 if options:
1096 pn = d.getVar('PN', True)
1097 error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
1098 package_qa_handle_error("unknown-configure-option", error_msg, d)
1099 except subprocess.CalledProcessError:
1100 pass
1101}
1102# The Staging Func, to check all staging
1103#addtask qa_staging after do_populate_sysroot before do_build
1104do_populate_sysroot[postfuncs] += "do_qa_staging "
1105
1106# Check broken config.log files, for packages requiring Gettext which don't
1107# have it in DEPENDS and for correct LIC_FILES_CHKSUM
1108#addtask qa_configure after do_configure before do_compile
1109do_configure[postfuncs] += "do_qa_configure "
1110
1111python () {
1112 tests = d.getVar('ALL_QA', True).split()
1113 if "desktop" in tests:
1114 d.appendVar("PACKAGE_DEPENDS", "desktop-file-utils-native")
1115
1116 ###########################################################################
1117 # Check various variables
1118 ###########################################################################
1119
1120 # Checking ${FILESEXTRAPATHS}
1121 extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
1122 if '__default' not in extrapaths.split(":"):
1123 msg = "FILESEXTRAPATHS-variable, must always use _prepend (or _append)\n"
1124 msg += "type of assignment, and don't forget the colon.\n"
1125 msg += "Please assign it with the format of:\n"
1126 msg += " FILESEXTRAPATHS_append := \":${THISDIR}/Your_Files_Path\" or\n"
1127 msg += " FILESEXTRAPATHS_prepend := \"${THISDIR}/Your_Files_Path:\"\n"
1128 msg += "in your bbappend file\n\n"
1129 msg += "Your incorrect assignment is:\n"
1130 msg += "%s\n" % extrapaths
1131 bb.warn(msg)
1132
1133 if d.getVar('do_stage', True) is not None:
1134 bb.fatal("Legacy staging found for %s as it has a do_stage function. This will need conversion to a do_install or often simply removal to work with OE-core" % d.getVar("FILE", True))
1135
1136 overrides = d.getVar('OVERRIDES', True).split(':')
1137 pn = d.getVar('PN', True)
1138 if pn in overrides:
1139 msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE", True), pn)
1140 package_qa_handle_error("pn-overrides", msg, d)
1141
1142 issues = []
1143 if (d.getVar('PACKAGES', True) or "").split():
1144 for dep in (d.getVar('QADEPENDS', True) or "").split():
1145 d.appendVarFlag('do_package_qa', 'depends', " %s:do_populate_sysroot" % dep)
1146 for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY':
1147 if d.getVar(var):
1148 issues.append(var)
1149 else:
1150 d.setVarFlag('do_package_qa', 'rdeptask', '')
1151 for i in issues:
1152 package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE", True), i), d)
1153}
diff --git a/meta/classes/insserv.bbclass b/meta/classes/insserv.bbclass
new file mode 100644
index 0000000000..14290a77e2
--- /dev/null
+++ b/meta/classes/insserv.bbclass
@@ -0,0 +1,5 @@
1do_rootfs[depends] += "insserv-native:do_populate_sysroot"
2run_insserv () {
3 insserv -p ${IMAGE_ROOTFS}/etc/init.d -c ${STAGING_ETCDIR_NATIVE}/insserv.conf
4}
5ROOTFS_POSTPROCESS_COMMAND += " run_insserv ; "
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
new file mode 100644
index 0000000000..bbcfa15b84
--- /dev/null
+++ b/meta/classes/kernel-arch.bbclass
@@ -0,0 +1,60 @@
1#
2# set the ARCH environment variable for kernel compilation (including
3# modules). return value must match one of the architecture directories
4# in the kernel source "arch" directory
5#
6
7valid_archs = "alpha cris ia64 \
8 i386 x86 \
9 m68knommu m68k ppc powerpc powerpc64 ppc64 \
10 sparc sparc64 \
11 arm aarch64 \
12 m32r mips \
13 sh sh64 um h8300 \
14 parisc s390 v850 \
15 avr32 blackfin \
16 microblaze"
17
18def map_kernel_arch(a, d):
19 import re
20
21 valid_archs = d.getVar('valid_archs', True).split()
22
23 if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
24 elif re.match('armeb$', a): return 'arm'
25 elif re.match('aarch64$', a): return 'arm64'
26 elif re.match('aarch64_be$', a): return 'arm64'
27 elif re.match('mips(el|64|64el)$', a): return 'mips'
28 elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
29 elif re.match('sh(3|4)$', a): return 'sh'
30 elif re.match('bfin', a): return 'blackfin'
31 elif re.match('microblazeel', a): return 'microblaze'
32 elif a in valid_archs: return a
33 else:
34 bb.error("cannot map '%s' to a linux kernel architecture" % a)
35
36export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH', True), d)}"
37
38def map_uboot_arch(a, d):
39 import re
40
41 if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
42 elif re.match('i.86$', a): return 'x86'
43 elif re.match('arm64$', a): return 'arm'
44 return a
45
46export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH', True), d)}"
47
48# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
49# specific options necessary for building the kernel and modules.
50TARGET_CC_KERNEL_ARCH ?= ""
51HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
52TARGET_LD_KERNEL_ARCH ?= ""
53HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
54TARGET_AR_KERNEL_ARCH ?= ""
55HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
56
57KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH}"
58KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
59KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
60
diff --git a/meta/classes/kernel-grub.bbclass b/meta/classes/kernel-grub.bbclass
new file mode 100644
index 0000000000..a63f482a91
--- /dev/null
+++ b/meta/classes/kernel-grub.bbclass
@@ -0,0 +1,91 @@
1#
2# While installing a rpm to update kernel on a deployed target, it will update
3# the boot area and the boot menu with the kernel as the priority but allow
4# you to fall back to the original kernel as well.
5#
6# - In kernel-image's preinstall scriptlet, it backs up original kernel to avoid
7# probable confliction with the new one.
8#
9# - In kernel-image's postinstall scriptlet, it modifies grub's config file to
10# updates the new kernel as the boot priority.
11#
12
13pkg_preinst_kernel-image_append () {
14 # Parsing confliction
15 [ -f "$D/boot/grub/menu.list" ] && grubcfg="$D/boot/grub/menu.list"
16 [ -f "$D/boot/grub/grub.cfg" ] && grubcfg="$D/boot/grub/grub.cfg"
17 if [ -n "$grubcfg" ]; then
18 # Dereference symlink to avoid confliction with new kernel name.
19 if grep -q "/${KERNEL_IMAGETYPE} \+root=" $grubcfg; then
20 if [ -L "$D/boot/${KERNEL_IMAGETYPE}" ]; then
21 kimage=`realpath $D/boot/${KERNEL_IMAGETYPE} 2>/dev/null`
22 if [ -f "$D$kimage" ]; then
23 sed -i "s:${KERNEL_IMAGETYPE} \+root=:${kimage##*/} root=:" $grubcfg
24 fi
25 fi
26 fi
27
28 # Rename old kernel if it conflicts with new kernel name.
29 if grep -q "/${KERNEL_IMAGETYPE}-${KERNEL_VERSION} \+root=" $grubcfg; then
30 if [ -f "$D/boot/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}" ]; then
31 timestamp=`date +%s`
32 kimage="$D/boot/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}-$timestamp-back"
33 sed -i "s:${KERNEL_IMAGETYPE}-${KERNEL_VERSION} \+root=:${kimage##*/} root=:" $grubcfg
34 mv "$D/boot/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}" "$kimage"
35 fi
36 fi
37 fi
38}
39
40pkg_postinst_kernel-image_prepend () {
41 get_new_grub_cfg() {
42 grubcfg="$1"
43 old_image="$2"
44 title="Update ${KERNEL_IMAGETYPE}-${KERNEL_VERSION}-${PV}"
45 if [ "${grubcfg##*/}" = "grub.cfg" ]; then
46 rootfs=`grep " *linux \+[^ ]\+ \+root=" $grubcfg -m 1 | \
47 sed "s#${old_image}#${old_image%/*}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}#"`
48
49 echo "menuentry \"$title\" {"
50 echo " set root=(hd0,1)"
51 echo "$rootfs"
52 echo "}"
53 elif [ "${grubcfg##*/}" = "menu.list" ]; then
54 rootfs=`grep "kernel \+[^ ]\+ \+root=" $grubcfg -m 1 | \
55 sed "s#${old_image}#${old_image%/*}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}#"`
56
57 echo "default 0"
58 echo "timeout 30"
59 echo "title $title"
60 echo "root (hd0,0)"
61 echo "$rootfs"
62 fi
63 }
64
65 get_old_grub_cfg() {
66 grubcfg="$1"
67 if [ "${grubcfg##*/}" = "grub.cfg" ]; then
68 cat "$grubcfg"
69 elif [ "${grubcfg##*/}" = "menu.list" ]; then
70 sed -e '/^default/d' -e '/^timeout/d' "$grubcfg"
71 fi
72 }
73
74 if [ -f "$D/boot/grub/grub.cfg" ]; then
75 grubcfg="$D/boot/grub/grub.cfg"
76 old_image=`grep ' *linux \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
77 elif [ -f "$D/boot/grub/menu.list" ]; then
78 grubcfg="$D/boot/grub/menu.list"
79 old_image=`grep '^kernel \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
80 fi
81
82 # Don't update grubcfg at first install while old bzImage doesn't exist.
83 if [ -f "$D/boot/${old_image##*/}" ]; then
84 grubcfgtmp="$grubcfg.tmp"
85 get_new_grub_cfg "$grubcfg" "$old_image" > $grubcfgtmp
86 get_old_grub_cfg "$grubcfg" >> $grubcfgtmp
87 mv $grubcfgtmp $grubcfg
88 echo "Caution! Update kernel may affect kernel-module!"
89 fi
90}
91
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
new file mode 100644
index 0000000000..9a95b72744
--- /dev/null
+++ b/meta/classes/kernel-module-split.bbclass
@@ -0,0 +1,200 @@
1pkg_postinst_modules () {
2if [ -z "$D" ]; then
3 depmod -a ${KERNEL_VERSION}
4else
5 # image.bbclass will call depmodwrapper after everything is installed,
6 # no need to do it here as well
7 :
8fi
9}
10
11pkg_postrm_modules () {
12if [ -z "$D" ]; then
13 depmod -a ${KERNEL_VERSION}
14else
15 depmodwrapper -a -b $D ${KERNEL_VERSION}
16fi
17}
18
19autoload_postinst_fragment() {
20if [ x"$D" = "x" ]; then
21 modprobe %s || true
22fi
23}
24
25do_install_append() {
26 install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
27}
28
29PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
30
31KERNEL_MODULES_META_PACKAGE ?= "kernel-modules"
32
33python split_kernel_module_packages () {
34 import re
35
36 modinfoexp = re.compile("([^=]+)=(.*)")
37 kerverrexp = re.compile('^(.*-hh.*)[\.\+].*$')
38 depmodpat0 = re.compile("^(.*\.k?o):..*$")
39 depmodpat1 = re.compile("^(.*\.k?o):\s*(.*\.k?o)\s*$")
40 depmodpat2 = re.compile("^(.*\.k?o):\s*(.*\.k?o)\s*\\\$")
41 depmodpat3 = re.compile("^\t(.*\.k?o)\s*\\\$")
42 depmodpat4 = re.compile("^\t(.*\.k?o)\s*$")
43
44 def extract_modinfo(file):
45 import tempfile, subprocess
46 tempfile.tempdir = d.getVar("WORKDIR", True)
47 tf = tempfile.mkstemp()
48 tmpfile = tf[1]
49 cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX", True) or "", file, tmpfile)
50 subprocess.call(cmd, shell=True)
51 f = open(tmpfile)
52 l = f.read().split("\000")
53 f.close()
54 os.close(tf[0])
55 os.unlink(tmpfile)
56 vals = {}
57 for i in l:
58 m = modinfoexp.match(i)
59 if not m:
60 continue
61 vals[m.group(1)] = m.group(2)
62 return vals
63
64 def parse_depmod():
65
66 dvar = d.getVar('PKGD', True)
67
68 kernelver = d.getVar('KERNEL_VERSION', True)
69 kernelver_stripped = kernelver
70 m = kerverrexp.match(kernelver)
71 if m:
72 kernelver_stripped = m.group(1)
73 staging_kernel_dir = d.getVar("STAGING_KERNEL_DIR", True)
74 system_map_file = "%s/boot/System.map-%s" % (dvar, kernelver)
75 if not os.path.exists(system_map_file):
76 system_map_file = "%s/System.map-%s" % (staging_kernel_dir, kernelver)
77 if not os.path.exists(system_map_file):
78 bb.fatal("System.map-%s does not exist in '%s/boot' nor STAGING_KERNEL_DIR '%s'" % (kernelver, dvar, staging_kernel_dir))
79
80 cmd = "depmod -n -a -b %s -F %s %s" % (dvar, system_map_file, kernelver_stripped)
81 f = os.popen(cmd, 'r')
82
83 deps = {}
84 line = f.readline()
85 while line:
86 if not depmodpat0.match(line):
87 line = f.readline()
88 continue
89 m1 = depmodpat1.match(line)
90 if m1:
91 deps[m1.group(1)] = m1.group(2).split()
92 else:
93 m2 = depmodpat2.match(line)
94 if m2:
95 deps[m2.group(1)] = m2.group(2).split()
96 line = f.readline()
97 m3 = depmodpat3.match(line)
98 while m3:
99 deps[m2.group(1)].extend(m3.group(1).split())
100 line = f.readline()
101 m3 = depmodpat3.match(line)
102 m4 = depmodpat4.match(line)
103 deps[m2.group(1)].extend(m4.group(1).split())
104 line = f.readline()
105 f.close()
106 return deps
107
108 def get_dependencies(file, pattern, format):
109 # file no longer includes PKGD
110 file = file.replace(d.getVar('PKGD', True) or '', '', 1)
111 # instead is prefixed with /lib/modules/${KERNEL_VERSION}
112 file = file.replace("/lib/modules/%s/" % d.getVar('KERNEL_VERSION', True) or '', '', 1)
113
114 if file in module_deps:
115 dependencies = []
116 for i in module_deps[file]:
117 m = re.match(pattern, os.path.basename(i))
118 if not m:
119 continue
120 on = legitimize_package_name(m.group(1))
121 dependency_pkg = format % on
122 dependencies.append(dependency_pkg)
123 return dependencies
124 return []
125
126 def frob_metadata(file, pkg, pattern, format, basename):
127 vals = extract_modinfo(file)
128
129 dvar = d.getVar('PKGD', True)
130
131 # If autoloading is requested, output /etc/modules-load.d/<name>.conf and append
132 # appropriate modprobe commands to the postinst
133 autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD", True) or "").split()
134 autoload = d.getVar('module_autoload_%s' % basename, True)
135 if autoload and autoload == basename:
136 bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename)
137 if autoload and basename not in autoloadlist:
138 bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename))
139 if basename in autoloadlist:
140 name = '%s/etc/modules-load.d/%s.conf' % (dvar, basename)
141 f = open(name, 'w')
142 if autoload:
143 for m in autoload.split():
144 f.write('%s\n' % m)
145 else:
146 f.write('%s\n' % basename)
147 f.close()
148 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
149 if not postinst:
150 bb.fatal("pkg_postinst_%s not defined" % pkg)
151 postinst += d.getVar('autoload_postinst_fragment', True) % autoload
152 d.setVar('pkg_postinst_%s' % pkg, postinst)
153
154 # Write out any modconf fragment
155 modconflist = (d.getVar("KERNEL_MODULE_PROBECONF", True) or "").split()
156 modconf = d.getVar('module_conf_%s' % basename, True)
157 if modconf and basename in modconflist:
158 name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
159 f = open(name, 'w')
160 f.write("%s\n" % modconf)
161 f.close()
162 elif modconf:
163 bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
164
165 files = d.getVar('FILES_%s' % pkg, True)
166 files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
167 d.setVar('FILES_%s' % pkg, files)
168
169 if "description" in vals:
170 old_desc = d.getVar('DESCRIPTION_' + pkg, True) or ""
171 d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
172
173 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
174 for dep in get_dependencies(file, pattern, format):
175 if not dep in rdepends:
176 rdepends[dep] = []
177 d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
178
179 module_deps = parse_depmod()
180 module_regex = '^(.*)\.k?o$'
181 module_pattern = 'kernel-module-%s'
182
183 postinst = d.getVar('pkg_postinst_modules', True)
184 postrm = d.getVar('pkg_postrm_modules', True)
185
186 modules = do_split_packages(d, root='/lib/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='kernel-%s' % (d.getVar("KERNEL_VERSION", True)))
187 if modules:
188 metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE', True)
189 d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules))
190
191 # If modules-load.d and modprobe.d are empty at this point, remove them to
192 # avoid warnings. removedirs only raises an OSError if an empty
193 # directory cannot be removed.
194 dvar = d.getVar('PKGD', True)
195 for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]:
196 if len(os.listdir(dir)) == 0:
197 os.rmdir(dir)
198}
199
200do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF", True) or "").split()))}'
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
new file mode 100644
index 0000000000..f42a5c2534
--- /dev/null
+++ b/meta/classes/kernel-yocto.bbclass
@@ -0,0 +1,361 @@
1S = "${WORKDIR}/linux"
2
3# remove tasks that modify the source tree in case externalsrc is inherited
4SRCTREECOVEREDTASKS += "do_kernel_link_vmlinux do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_patch"
5
6# returns local (absolute) path names for all valid patches in the
7# src_uri
8def find_patches(d):
9 patches = src_patches(d)
10 patch_list=[]
11 for p in patches:
12 _, _, local, _, _, _ = bb.fetch.decodeurl(p)
13 patch_list.append(local)
14
15 return patch_list
16
17# returns all the elements from the src uri that are .scc files
18def find_sccs(d):
19 sources=src_patches(d, True)
20 sources_list=[]
21 for s in sources:
22 base, ext = os.path.splitext(os.path.basename(s))
23 if ext and ext in [".scc", ".cfg"]:
24 sources_list.append(s)
25 elif base and base in 'defconfig':
26 sources_list.append(s)
27
28 return sources_list
29
30# check the SRC_URI for "kmeta" type'd git repositories. Return the name of
31# the repository as it will be found in WORKDIR
32def find_kernel_feature_dirs(d):
33 feature_dirs=[]
34 fetch = bb.fetch2.Fetch([], d)
35 for url in fetch.urls:
36 urldata = fetch.ud[url]
37 parm = urldata.parm
38 if "type" in parm:
39 type = parm["type"]
40 if "destsuffix" in parm:
41 destdir = parm["destsuffix"]
42 if type == "kmeta":
43 feature_dirs.append(destdir)
44
45 return feature_dirs
46
47# find the master/machine source branch. In the same way that the fetcher proceses
48# git repositories in the SRC_URI we take the first repo found, first branch.
49def get_machine_branch(d, default):
50 fetch = bb.fetch2.Fetch([], d)
51 for url in fetch.urls:
52 urldata = fetch.ud[url]
53 parm = urldata.parm
54 if "branch" in parm:
55 branches = urldata.parm.get("branch").split(',')
56 return branches[0]
57
58 return default
59
60do_patch() {
61 cd ${S}
62 export KMETA=${KMETA}
63
64 # if kernel tools are available in-tree, they are preferred
65 # and are placed on the path before any external tools. Unless
66 # the external tools flag is set, in that case we do nothing.
67 if [ -f "${S}/scripts/util/configme" ]; then
68 if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then
69 PATH=${S}/scripts/util:${PATH}
70 fi
71 fi
72
73 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
74 machine_srcrev="${SRCREV_machine}"
75 if [ -z "${machine_srcrev}" ]; then
76 # fallback to SRCREV if a non machine_meta tree is being built
77 machine_srcrev="${SRCREV}"
78 fi
79
80 # if we have a defined/set meta branch we should not be generating
81 # any meta data. The passed branch has what we need.
82 if [ -n "${KMETA}" ]; then
83 createme_flags="--disable-meta-gen --meta ${KMETA}"
84 fi
85
86 createme ${createme_flags} ${ARCH} ${machine_branch}
87 if [ $? -ne 0 ]; then
88 bbfatal "Could not create ${machine_branch}"
89 fi
90
91 sccs="${@" ".join(find_sccs(d))}"
92 patches="${@" ".join(find_patches(d))}"
93 feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
94
95 set +e
96 # add any explicitly referenced features onto the end of the feature
97 # list that is passed to the kernel build scripts.
98 if [ -n "${KERNEL_FEATURES}" ]; then
99 for feat in ${KERNEL_FEATURES}; do
100 addon_features="$addon_features --feature $feat"
101 done
102 fi
103
104 # check for feature directories/repos/branches that were part of the
105 # SRC_URI. If they were supplied, we convert them into include directives
106 # for the update part of the process
107 if [ -n "${feat_dirs}" ]; then
108 for f in ${feat_dirs}; do
109 if [ -d "${WORKDIR}/$f/meta" ]; then
110 includes="$includes -I${WORKDIR}/$f/meta"
111 elif [ -d "${WORKDIR}/$f" ]; then
112 includes="$includes -I${WORKDIR}/$f"
113 fi
114 done
115 fi
116
117 # updates or generates the target description
118 updateme ${updateme_flags} -DKDESC=${KMACHINE}:${LINUX_KERNEL_TYPE} \
119 ${includes} ${addon_features} ${ARCH} ${KMACHINE} ${sccs} ${patches}
120 if [ $? -ne 0 ]; then
121 bbfatal "Could not update ${machine_branch}"
122 fi
123
124 # executes and modifies the source tree as required
125 patchme ${KMACHINE}
126 if [ $? -ne 0 ]; then
127 bberror "Could not apply patches for ${KMACHINE}."
128 bbfatal "Patch failures can be resolved in the devshell (bitbake -c devshell ${PN})"
129 fi
130
131 # check to see if the specified SRCREV is reachable from the final branch.
132 # if it wasn't something wrong has happened, and we should error.
133 if [ "${machine_srcrev}" != "AUTOINC" ]; then
134 if ! [ "$(git rev-parse --verify ${machine_srcrev})" = "$(git merge-base ${machine_srcrev} HEAD)" ]; then
135 bberror "SRCREV ${machine_srcrev} was specified, but is not reachable"
136 bbfatal "Check the BSP description for incorrect branch selection, or other errors."
137 fi
138 fi
139}
140
141do_kernel_checkout() {
142 set +e
143
144 # A linux yocto SRC_URI should use the bareclone option. That
145 # ensures that all the branches are available in the WORKDIR version
146 # of the repository.
147 source_dir=`echo ${S} | sed 's%/$%%'`
148 source_workdir="${WORKDIR}/git"
149 if [ -d "${WORKDIR}/git/" ] && [ -d "${WORKDIR}/git/.git" ]; then
150 # case2: the repository is a non-bare clone
151
152 # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
153 if [ "${source_dir}" != "${source_workdir}" ]; then
154 rm -rf ${S}
155 mv ${WORKDIR}/git ${S}
156 fi
157 cd ${S}
158 elif [ -d "${WORKDIR}/git/" ] && [ ! -d "${WORKDIR}/git/.git" ]; then
159 # case2: the repository is a bare clone
160
161 # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
162 if [ "${source_dir}" != "${source_workdir}" ]; then
163 rm -rf ${S}
164 mkdir -p ${S}/.git
165 mv ${WORKDIR}/git/* ${S}/.git
166 rm -rf ${WORKDIR}/git/
167 fi
168 cd ${S}
169 git config core.bare false
170 else
171 # case 3: we have no git repository at all.
172 # To support low bandwidth options for building the kernel, we'll just
173 # convert the tree to a git repo and let the rest of the process work unchanged
174
175 # if ${S} hasn't been set to the proper subdirectory a default of "linux" is
176 # used, but we can't initialize that empty directory. So check it and throw a
177 # clear error
178
179 cd ${S}
180 if [ ! -f "Makefile" ]; then
181 bberror "S is not set to the linux source directory. Check "
182 bbfatal "the recipe and set S to the proper extracted subdirectory"
183 fi
184 git init
185 git add .
186 git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
187 fi
188 # end debare
189
190 # convert any remote branches to local tracking ones
191 for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
192 b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
193 git show-ref --quiet --verify -- "refs/heads/$b"
194 if [ $? -ne 0 ]; then
195 git branch $b $i > /dev/null
196 fi
197 done
198
199 # If KMETA is defined, the branch must exist, but a machine branch
200 # can be missing since it may be created later by the tools.
201 if [ -n "${KMETA}" ]; then
202 git show-ref --quiet --verify -- "refs/heads/${KMETA}"
203 if [ $? -eq 1 ]; then
204 bberror "The branch '${KMETA}' is required and was not found"
205 bberror "Ensure that the SRC_URI points to a valid linux-yocto"
206 bbfatal "kernel repository"
207 fi
208 fi
209
210
211 # Create a working tree copy of the kernel by checking out a branch
212 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
213 git show-ref --quiet --verify -- "refs/heads/${machine_branch}"
214 if [ $? -eq 0 ]; then
215 machine_branch="master"
216 fi
217
218 # checkout and clobber any unimportant files
219 git checkout -f ${machine_branch}
220}
221do_kernel_checkout[dirs] = "${S}"
222
223addtask kernel_checkout before do_patch after do_unpack
224
225do_kernel_configme[dirs] += "${S} ${B}"
226do_kernel_configme() {
227 bbnote "kernel configme"
228 export KMETA=${KMETA}
229
230 if [ -n "${KCONFIG_MODE}" ]; then
231 configmeflags=${KCONFIG_MODE}
232 else
233 # If a defconfig was passed, use =n as the baseline, which is achieved
234 # via --allnoconfig
235 if [ -f ${WORKDIR}/defconfig ]; then
236 configmeflags="--allnoconfig"
237 fi
238 fi
239
240 cd ${S}
241 PATH=${PATH}:${S}/scripts/util
242 configme ${configmeflags} --reconfig --output ${B} ${LINUX_KERNEL_TYPE} ${KMACHINE}
243 if [ $? -ne 0 ]; then
244 bbfatal "Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
245 fi
246
247 echo "# Global settings from linux recipe" >> ${B}/.config
248 echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
249}
250
251addtask kernel_configme after do_patch
252
253python do_kernel_configcheck() {
254 import re, string, sys
255
256 bb.plain("NOTE: validating kernel config, see log.do_kernel_configcheck for details")
257
258 # if KMETA isn't set globally by a recipe using this routine, we need to
259 # set the default to 'meta'. Otherwise, kconf_check is not passed a valid
260 # meta-series for processing
261 kmeta = d.getVar( "KMETA", True ) or "meta"
262 if not os.path.exists(kmeta):
263 kmeta = "." + kmeta
264
265 pathprefix = "export PATH=%s:%s; " % (d.getVar('PATH', True), "${S}/scripts/util/")
266 cmd = d.expand("cd ${S}; kconf_check -config- %s/meta-series ${S} ${B}" % kmeta)
267 ret, result = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
268
269 config_check_visibility = d.getVar( "KCONF_AUDIT_LEVEL", True ) or 1
270 if config_check_visibility == 1:
271 bb.debug( 1, "%s" % result )
272 else:
273 bb.note( "%s" % result )
274}
275
276# Ensure that the branches (BSP and meta) are on the locations specified by
277# their SRCREV values. If they are NOT on the right commits, the branches
278# are corrected to the proper commit.
279do_validate_branches() {
280 set +e
281 cd ${S}
282 export KMETA=${KMETA}
283
284 machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
285 machine_srcrev="${SRCREV_machine}"
286
287 # if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
288 # check and we can exit early
289 if [ "${machine_srcrev}" = "AUTOINC" ]; then
290 bbnote "SRCREV validation is not required for AUTOREV"
291 elif [ "${machine_srcrev}" = "" ] && [ "${SRCREV}" != "AUTOINC" ]; then
292 # SRCREV_machine_<MACHINE> was not set. This means that a custom recipe
293 # that doesn't use the SRCREV_FORMAT "machine_meta" is being built. In
294 # this case, we need to reset to the give SRCREV before heading to patching
295 bbnote "custom recipe is being built, forcing SRCREV to ${SRCREV}"
296 force_srcrev="${SRCREV}"
297 else
298 git cat-file -t ${machine_srcrev} > /dev/null
299 if [ $? -ne 0 ]; then
300 bberror "${machine_srcrev} is not a valid commit ID."
301 bbfatal "The kernel source tree may be out of sync"
302 fi
303 force_srcrev=${machine_srcrev}
304 fi
305
306 ## KMETA branch validation.
307 target_meta_head="${SRCREV_meta}"
308 if [ "${target_meta_head}" = "AUTOINC" ] || [ "${target_meta_head}" = "" ]; then
309 bbnote "SRCREV validation skipped for AUTOREV or empty meta branch"
310 else
311 meta_head=`git show-ref -s --heads ${KMETA}`
312
313 git cat-file -t ${target_meta_head} > /dev/null
314 if [ $? -ne 0 ]; then
315 bberror "${target_meta_head} is not a valid commit ID"
316 bbfatal "The kernel source tree may be out of sync"
317 fi
318 if [ "$meta_head" != "$target_meta_head" ]; then
319 bbnote "Setting branch ${KMETA} to ${target_meta_head}"
320 git branch -m ${KMETA} ${KMETA}-orig
321 git checkout -q -b ${KMETA} ${target_meta_head}
322 if [ $? -ne 0 ];then
323 bbfatal "Could not checkout ${KMETA} branch from known hash ${target_meta_head}"
324 fi
325 fi
326 fi
327
328 git checkout -q -f ${machine_branch}
329 if [ -n "${force_srcrev}" ]; then
330 # see if the branch we are about to patch has been properly reset to the defined
331 # SRCREV .. if not, we reset it.
332 branch_head=`git rev-parse HEAD`
333 if [ "${force_srcrev}" != "${branch_head}" ]; then
334 current_branch=`git rev-parse --abbrev-ref HEAD`
335 git branch "$current_branch-orig"
336 git reset --hard ${force_srcrev}
337 fi
338 fi
339}
340
341# Many scripts want to look in arch/$arch/boot for the bootable
342# image. This poses a problem for vmlinux based booting. This
343# task arranges to have vmlinux appear in the normalized directory
344# location.
345do_kernel_link_vmlinux() {
346 if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
347 mkdir ${B}/arch/${ARCH}/boot
348 fi
349 cd ${B}/arch/${ARCH}/boot
350 ln -sf ../../../vmlinux
351}
352
353OE_TERMINAL_EXPORTS += "GUILT_BASE KBUILD_OUTPUT"
354GUILT_BASE = "meta"
355KBUILD_OUTPUT = "${B}"
356
357python () {
358 # If diffconfig is available, ensure it runs after kernel_configme
359 if 'do_diffconfig' in d:
360 bb.build.addtask('do_diffconfig', None, 'do_kernel_configme', d)
361}
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
new file mode 100644
index 0000000000..2a6ec34c36
--- /dev/null
+++ b/meta/classes/kernel.bbclass
@@ -0,0 +1,505 @@
1inherit linux-kernel-base kernel-module-split
2
3PROVIDES += "virtual/kernel"
4DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native depmodwrapper-cross bc-native"
5
6# we include gcc above, we dont need virtual/libc
7INHIBIT_DEFAULT_DEPS = "1"
8
9KERNEL_IMAGETYPE ?= "zImage"
10INITRAMFS_IMAGE ?= ""
11INITRAMFS_TASK ?= ""
12INITRAMFS_IMAGE_BUNDLE ?= ""
13
14python __anonymous () {
15 kerneltype = d.getVar('KERNEL_IMAGETYPE', True)
16 if kerneltype == 'uImage':
17 depends = d.getVar("DEPENDS", True)
18 depends = "%s u-boot-mkimage-native" % depends
19 d.setVar("DEPENDS", depends)
20
21 image = d.getVar('INITRAMFS_IMAGE', True)
22 if image:
23 d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_rootfs')
24
25 # NOTE: setting INITRAMFS_TASK is for backward compatibility
26 # The preferred method is to set INITRAMFS_IMAGE, because
27 # this INITRAMFS_TASK has circular dependency problems
28 # if the initramfs requires kernel modules
29 image_task = d.getVar('INITRAMFS_TASK', True)
30 if image_task:
31 d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
32}
33
34inherit kernel-arch deploy
35
36PACKAGES_DYNAMIC += "^kernel-module-.*"
37PACKAGES_DYNAMIC += "^kernel-image-.*"
38PACKAGES_DYNAMIC += "^kernel-firmware-.*"
39
40export OS = "${TARGET_OS}"
41export CROSS_COMPILE = "${TARGET_PREFIX}"
42
43KERNEL_PRIORITY ?= "${@int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[0]) * 10000 + \
44 int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[1]) * 100 + \
45 int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[-1])}"
46
47KERNEL_RELEASE ?= "${KERNEL_VERSION}"
48
49# Where built kernel lies in the kernel tree
50KERNEL_OUTPUT ?= "arch/${ARCH}/boot/${KERNEL_IMAGETYPE}"
51KERNEL_IMAGEDEST = "boot"
52
53#
54# configuration
55#
56export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE",1) or "ttyS0"}"
57
58KERNEL_VERSION = "${@get_kernelversion('${B}')}"
59
60KERNEL_LOCALVERSION ?= ""
61
62# kernels are generally machine specific
63PACKAGE_ARCH = "${MACHINE_ARCH}"
64
65# U-Boot support
66UBOOT_ENTRYPOINT ?= "20008000"
67UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
68
69# Some Linux kernel configurations need additional parameters on the command line
70KERNEL_EXTRA_ARGS ?= ""
71
72# For the kernel, we don't want the '-e MAKEFLAGS=' in EXTRA_OEMAKE.
73# We don't want to override kernel Makefile variables from the environment
74EXTRA_OEMAKE = ""
75
76KERNEL_ALT_IMAGETYPE ??= ""
77
78# Define where the kernel headers are installed on the target as well as where
79# they are staged.
80KERNEL_SRC_PATH = "/usr/src/kernel"
81
82KERNEL_IMAGETYPE_FOR_MAKE = "${@(lambda s: s[:-3] if s[-3:] == ".gz" else s)(d.getVar('KERNEL_IMAGETYPE', True))}"
83
84copy_initramfs() {
85 echo "Copying initramfs into ./usr ..."
86 # In case the directory is not created yet from the first pass compile:
87 mkdir -p ${B}/usr
88 # Find and use the first initramfs image archive type we find
89 rm -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
90 for img in cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do
91 if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img" ]; then
92 cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img ${B}/usr/.
93 case $img in
94 *gz)
95 echo "gzip decompressing image"
96 gunzip -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
97 break
98 ;;
99 *lz4)
100 echo "lz4 decompressing image"
101 lz4 -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
102 break
103 ;;
104 *lzo)
105 echo "lzo decompressing image"
106 lzop -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
107 break
108 ;;
109 *lzma)
110 echo "lzma decompressing image"
111 lzma -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
112 break
113 ;;
114 *xz)
115 echo "xz decompressing image"
116 xz -df ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.$img
117 break
118 ;;
119 esac
120 fi
121 done
122 echo "Finished copy of initramfs into ./usr"
123}
124
125INITRAMFS_BASE_NAME = "${KERNEL_IMAGETYPE}-initramfs-${PV}-${PR}-${MACHINE}-${DATETIME}"
126INITRAMFS_BASE_NAME[vardepsexclude] = "DATETIME"
127do_bundle_initramfs () {
128 if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
129 echo "Creating a kernel image with a bundled initramfs..."
130 copy_initramfs
131 if [ -e ${KERNEL_OUTPUT} ] ; then
132 mv -f ${KERNEL_OUTPUT} ${KERNEL_OUTPUT}.bak
133 fi
134 use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
135 kernel_do_compile
136 mv -f ${KERNEL_OUTPUT} ${KERNEL_OUTPUT}.initramfs
137 mv -f ${KERNEL_OUTPUT}.bak ${KERNEL_OUTPUT}
138 # Update install area
139 echo "There is kernel image bundled with initramfs: ${B}/${KERNEL_OUTPUT}.initramfs"
140 install -m 0644 ${B}/${KERNEL_OUTPUT}.initramfs ${D}/boot/${KERNEL_IMAGETYPE}-initramfs-${MACHINE}.bin
141 echo "${B}/${KERNEL_OUTPUT}.initramfs"
142 fi
143}
144
145python do_devshell_prepend () {
146 os.environ["LDFLAGS"] = ''
147}
148
149addtask bundle_initramfs after do_install before do_deploy
150
151kernel_do_compile() {
152 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
153 # The $use_alternate_initrd is only set from
154 # do_bundle_initramfs() This variable is specifically for the
155 # case where we are making a second pass at the kernel
156 # compilation and we want to force the kernel build to use a
157 # different initramfs image. The way to do that in the kernel
158 # is to specify:
159 # make ...args... CONFIG_INITRAMFS_SOURCE=some_other_initramfs.cpio
160 if [ "$use_alternate_initrd" = "" ] && [ "${INITRAMFS_TASK}" != "" ] ; then
161 # The old style way of copying an prebuilt image and building it
162 # is turned on via INTIRAMFS_TASK != ""
163 copy_initramfs
164 use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio
165 fi
166 oe_runmake ${KERNEL_IMAGETYPE_FOR_MAKE} ${KERNEL_ALT_IMAGETYPE} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
167 if test "${KERNEL_IMAGETYPE_FOR_MAKE}.gz" = "${KERNEL_IMAGETYPE}"; then
168 gzip -9c < "${KERNEL_IMAGETYPE_FOR_MAKE}" > "${KERNEL_OUTPUT}"
169 fi
170}
171
172do_compile_kernelmodules() {
173 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
174 if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
175 oe_runmake ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
176 else
177 bbnote "no modules to compile"
178 fi
179}
180addtask compile_kernelmodules after do_compile before do_strip
181
182kernel_do_install() {
183 #
184 # First install the modules
185 #
186 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
187 if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
188 oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" modules_install
189 rm "${D}/lib/modules/${KERNEL_VERSION}/build"
190 rm "${D}/lib/modules/${KERNEL_VERSION}/source"
191 # If the kernel/ directory is empty remove it to prevent QA issues
192 rmdir --ignore-fail-on-non-empty "${D}/lib/modules/${KERNEL_VERSION}/kernel"
193 else
194 bbnote "no modules to install"
195 fi
196
197 #
198 # Install various kernel output (zImage, map file, config, module support files)
199 #
200 install -d ${D}/${KERNEL_IMAGEDEST}
201 install -d ${D}/boot
202 install -m 0644 ${KERNEL_OUTPUT} ${D}/${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}
203 install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
204 install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
205 install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
206 [ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/boot/Module.symvers-${KERNEL_VERSION}
207 install -d ${D}${sysconfdir}/modules-load.d
208 install -d ${D}${sysconfdir}/modprobe.d
209
210 #
211 # Support for external module building - create a minimal copy of the
212 # kernel source tree.
213 #
214 kerneldir=${D}${KERNEL_SRC_PATH}
215 install -d $kerneldir
216 mkdir -p ${D}/lib/modules/${KERNEL_VERSION}
217 ln -sf ${KERNEL_SRC_PATH} "${D}/lib/modules/${KERNEL_VERSION}/build"
218
219 #
220 # Store the kernel version in sysroots for module-base.bbclass
221 #
222
223 echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion
224
225 #
226 # Store kernel image name to allow use during image generation
227 #
228
229 echo "${KERNEL_IMAGE_BASE_NAME}" >$kerneldir/kernel-image-name
230
231 #
232 # Copy the entire source tree. In case an external build directory is
233 # used, copy the build directory over first, then copy over the source
234 # dir. This ensures the original Makefiles are used and not the
235 # redirecting Makefiles in the build directory.
236 #
237 find . -depth -not -name "*.cmd" -not -name "*.o" -not -name "*.so.dbg" -not -name "*.so" -not -path "./Documentation*" -not -path "./source*" -not -path "./.*" -print0 | cpio --null -pdlu $kerneldir
238 cp .config $kerneldir
239 if [ "${S}" != "${B}" ]; then
240 pwd="$PWD"
241 cd "${S}"
242 find . -depth -not -path "./Documentation*" -not -path "./.*" -print0 | cpio --null -pdlu $kerneldir
243 cd "$pwd"
244 fi
245
246 # Test to ensure that the output file and image type are not actually
247 # the same file. If hardlinking is used, they will be the same, and there's
248 # no need to install.
249 ! [ ${KERNEL_OUTPUT} -ef $kerneldir/${KERNEL_IMAGETYPE} ] && install -m 0644 ${KERNEL_OUTPUT} $kerneldir/${KERNEL_IMAGETYPE}
250 install -m 0644 System.map $kerneldir/System.map-${KERNEL_VERSION}
251
252 # Dummy Makefile so the clean below works
253 mkdir $kerneldir/Documentation
254 touch $kerneldir/Documentation/Makefile
255
256 #
257 # Clean and remove files not needed for building modules.
258 # Some distributions go through a lot more trouble to strip out
259 # unecessary headers, for now, we just prune the obvious bits.
260 #
261 # We don't want to leave host-arch binaries in /sysroots, so
262 # we clean the scripts dir while leaving the generated config
263 # and include files.
264 #
265 oe_runmake -C $kerneldir CC="${KERNEL_CC}" LD="${KERNEL_LD}" clean _mrproper_scripts
266
267 # hide directories that shouldn't have their .c, s and S files deleted
268 for d in tools scripts lib; do
269 mv $kerneldir/$d $kerneldir/.$d
270 done
271
272 # delete .c, .s and .S files, unless we hid a directory as .<dir>. This technique is
273 # much faster than find -prune and -exec
274 find $kerneldir -not -path '*/\.*' -type f -name "*.[csS]" -delete
275
276 # put the hidden dirs back
277 for d in tools scripts lib; do
278 mv $kerneldir/.$d $kerneldir/$d
279 done
280
281 # As of Linux kernel version 3.0.1, the clean target removes
282 # arch/powerpc/lib/crtsavres.o which is present in
283 # KBUILD_LDFLAGS_MODULE, making it required to build external modules.
284 if [ ${ARCH} = "powerpc" ]; then
285 cp -l arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
286 fi
287
288 # Necessary for building modules like compat-wireless.
289 if [ -f include/generated/bounds.h ]; then
290 cp -l include/generated/bounds.h $kerneldir/include/generated/bounds.h
291 fi
292 if [ -d arch/${ARCH}/include/generated ]; then
293 mkdir -p $kerneldir/arch/${ARCH}/include/generated/
294 cp -flR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
295 fi
296
297 # Remove the following binaries which cause strip or arch QA errors
298 # during do_package for cross-compiled platforms
299 bin_files="arch/powerpc/boot/addnote arch/powerpc/boot/hack-coff \
300 arch/powerpc/boot/mktree scripts/kconfig/zconf.tab.o \
301 scripts/kconfig/conf.o scripts/kconfig/kxgettext.o"
302 for entry in $bin_files; do
303 rm -f $kerneldir/$entry
304 done
305
306 # kernels <2.6.30 don't have $kerneldir/tools directory so we check if it exists before calling sed
307 if [ -f $kerneldir/tools/perf/Makefile ]; then
308 # Fix SLANG_INC for slang.h
309 sed -i 's#-I/usr/include/slang#-I=/usr/include/slang#g' $kerneldir/tools/perf/Makefile
310 fi
311}
312do_install[prefuncs] += "package_get_auto_pr"
313
314python sysroot_stage_all () {
315 oe.path.copyhardlinktree(d.expand("${D}${KERNEL_SRC_PATH}"), d.expand("${SYSROOT_DESTDIR}${KERNEL_SRC_PATH}"))
316}
317
318KERNEL_CONFIG_COMMAND ?= "oe_runmake_call oldnoconfig || yes '' | oe_runmake oldconfig"
319
320kernel_do_configure() {
321 # fixes extra + in /lib/modules/2.6.37+
322 # $ scripts/setlocalversion . => +
323 # $ make kernelversion => 2.6.37
324 # $ make kernelrelease => 2.6.37+
325 touch ${B}/.scmversion ${S}/.scmversion
326
327 # Copy defconfig to .config if .config does not exist. This allows
328 # recipes to manage the .config themselves in do_configure_prepend().
329 if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
330 cp "${WORKDIR}/defconfig" "${B}/.config"
331 fi
332 eval ${KERNEL_CONFIG_COMMAND}
333}
334
335do_savedefconfig() {
336 oe_runmake savedefconfig
337}
338do_savedefconfig[nostamp] = "1"
339addtask savedefconfig after do_configure
340
341inherit cml1
342
343EXPORT_FUNCTIONS do_compile do_install do_configure
344
345# kernel-base becomes kernel-${KERNEL_VERSION}
346# kernel-image becomes kernel-image-${KERNEL_VERISON}
347PACKAGES = "kernel kernel-base kernel-vmlinux kernel-image kernel-dev kernel-modules"
348FILES_${PN} = ""
349FILES_kernel-base = "/lib/modules/${KERNEL_VERSION}/modules.order /lib/modules/${KERNEL_VERSION}/modules.builtin"
350FILES_kernel-image = "/boot/${KERNEL_IMAGETYPE}*"
351FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} /lib/modules/${KERNEL_VERSION}/build"
352FILES_kernel-vmlinux = "/boot/vmlinux*"
353FILES_kernel-modules = ""
354RDEPENDS_kernel = "kernel-base"
355# Allow machines to override this dependency if kernel image files are
356# not wanted in images as standard
357RDEPENDS_kernel-base ?= "kernel-image"
358PKG_kernel-image = "kernel-image-${@legitimize_package_name('${KERNEL_VERSION}')}"
359PKG_kernel-base = "kernel-${@legitimize_package_name('${KERNEL_VERSION}')}"
360RPROVIDES_kernel-base += "kernel-${KERNEL_VERSION}"
361ALLOW_EMPTY_kernel = "1"
362ALLOW_EMPTY_kernel-base = "1"
363ALLOW_EMPTY_kernel-image = "1"
364ALLOW_EMPTY_kernel-modules = "1"
365DESCRIPTION_kernel-modules = "Kernel modules meta package"
366
367pkg_postinst_kernel-base () {
368 if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
369 mkdir -p $D/lib/modules/${KERNEL_VERSION}
370 fi
371 if [ -n "$D" ]; then
372 depmodwrapper -a -b $D ${KERNEL_VERSION}
373 else
374 depmod -a ${KERNEL_VERSION}
375 fi
376}
377
378pkg_postinst_kernel-image () {
379 update-alternatives --install /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE} /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION} ${KERNEL_PRIORITY} || true
380}
381
382pkg_postrm_kernel-image () {
383 update-alternatives --remove ${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE}-${KERNEL_VERSION} || true
384}
385
386PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
387
388python split_kernel_packages () {
389 do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.(bin|fw|cis|dsp)$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
390}
391
392do_strip() {
393 if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then
394 if [ "${KERNEL_IMAGETYPE}" != "vmlinux" ]; then
395 bbwarn "image type will not be stripped (not supported): ${KERNEL_IMAGETYPE}"
396 return
397 fi
398
399 cd ${B}
400 headers=`"$CROSS_COMPILE"readelf -S ${KERNEL_OUTPUT} | \
401 grep "^ \{1,\}\[[0-9 ]\{1,\}\] [^ ]" | \
402 sed "s/^ \{1,\}\[[0-9 ]\{1,\}\] //" | \
403 gawk '{print $1}'`
404
405 for str in ${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}; do {
406 if [ "$headers" != *"$str"* ]; then
407 bbwarn "Section not found: $str";
408 fi
409
410 "$CROSS_COMPILE"strip -s -R $str ${KERNEL_OUTPUT}
411 }; done
412
413 bbnote "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections:" \
414 "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}"
415 fi;
416}
417do_strip[dirs] = "${B}"
418
419addtask do_strip before do_sizecheck after do_kernel_link_vmlinux
420
421# Support checking the kernel size since some kernels need to reside in partitions
422# with a fixed length or there is a limit in transferring the kernel to memory
423do_sizecheck() {
424 if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
425 invalid=`echo ${KERNEL_IMAGE_MAXSIZE} | sed 's/[0-9]//g'`
426 if [ -n "$invalid" ]; then
427 die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integerx (The unit is Kbytes)"
428 fi
429 size=`du -ks ${B}/${KERNEL_OUTPUT} | awk '{ print $1}'`
430 if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
431 die "This kernel (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device. Please reduce the size of the kernel by making more of it modular."
432 fi
433 fi
434}
435do_sizecheck[dirs] = "${B}"
436
437addtask sizecheck before do_install after do_strip
438
439KERNEL_IMAGE_BASE_NAME ?= "${KERNEL_IMAGETYPE}-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}"
440# Don't include the DATETIME variable in the sstate package signatures
441KERNEL_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
442KERNEL_IMAGE_SYMLINK_NAME ?= "${KERNEL_IMAGETYPE}-${MACHINE}"
443MODULE_IMAGE_BASE_NAME ?= "modules-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}"
444MODULE_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
445MODULE_TARBALL_BASE_NAME ?= "${MODULE_IMAGE_BASE_NAME}.tgz"
446# Don't include the DATETIME variable in the sstate package signatures
447MODULE_TARBALL_SYMLINK_NAME ?= "modules-${MACHINE}.tgz"
448MODULE_TARBALL_DEPLOY ?= "1"
449
450do_uboot_mkimage() {
451 if test "x${KERNEL_IMAGETYPE}" = "xuImage" ; then
452 if test "x${KEEPUIMAGE}" != "xyes" ; then
453 ENTRYPOINT=${UBOOT_ENTRYPOINT}
454 if test -n "${UBOOT_ENTRYSYMBOL}"; then
455 ENTRYPOINT=`${HOST_PREFIX}nm ${S}/vmlinux | \
456 awk '$3=="${UBOOT_ENTRYSYMBOL}" {print $1}'`
457 fi
458 if test -e arch/${ARCH}/boot/compressed/vmlinux ; then
459 ${OBJCOPY} -O binary -R .note -R .comment -S arch/${ARCH}/boot/compressed/vmlinux linux.bin
460 uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C none -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin arch/${ARCH}/boot/uImage
461 rm -f linux.bin
462 else
463 ${OBJCOPY} -O binary -R .note -R .comment -S vmlinux linux.bin
464 rm -f linux.bin.gz
465 gzip -9 linux.bin
466 uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C gzip -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin.gz arch/${ARCH}/boot/uImage
467 rm -f linux.bin.gz
468 fi
469 fi
470 fi
471}
472
473addtask uboot_mkimage before do_install after do_compile
474
475kernel_do_deploy() {
476 install -m 0644 ${KERNEL_OUTPUT} ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin
477 if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
478 mkdir -p ${D}/lib
479 tar -cvzf ${DEPLOYDIR}/${MODULE_TARBALL_BASE_NAME} -C ${D} lib
480 ln -sf ${MODULE_TARBALL_BASE_NAME} ${DEPLOYDIR}/${MODULE_TARBALL_SYMLINK_NAME}
481 fi
482
483 ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${DEPLOYDIR}/${KERNEL_IMAGE_SYMLINK_NAME}.bin
484 ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${DEPLOYDIR}/${KERNEL_IMAGETYPE}
485
486 cp ${COREBASE}/meta/files/deploydir_readme.txt ${DEPLOYDIR}/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt
487
488 cd ${B}
489 # Update deploy directory
490 if [ -e "${KERNEL_OUTPUT}.initramfs" ]; then
491 echo "Copying deploy kernel-initramfs image and setting up links..."
492 initramfs_base_name=${INITRAMFS_BASE_NAME}
493 initramfs_symlink_name=${KERNEL_IMAGETYPE}-initramfs-${MACHINE}
494 install -m 0644 ${KERNEL_OUTPUT}.initramfs ${DEPLOYDIR}/${initramfs_base_name}.bin
495 cd ${DEPLOYDIR}
496 ln -sf ${initramfs_base_name}.bin ${initramfs_symlink_name}.bin
497 fi
498}
499do_deploy[dirs] = "${DEPLOYDIR} ${B}"
500do_deploy[prefuncs] += "package_get_auto_pr"
501
502addtask deploy before do_build after do_install
503
504EXPORT_FUNCTIONS do_deploy
505
diff --git a/meta/classes/lib_package.bbclass b/meta/classes/lib_package.bbclass
new file mode 100644
index 0000000000..8849f59042
--- /dev/null
+++ b/meta/classes/lib_package.bbclass
@@ -0,0 +1,7 @@
1#
2# ${PN}-bin is defined in bitbake.conf
3#
4# We need to allow the other packages to be greedy with what they
5# want out of /usr/bin and /usr/sbin before ${PN}-bin gets greedy.
6#
7PACKAGE_BEFORE_PN = "${PN}-bin"
diff --git a/meta/classes/libc-common.bbclass b/meta/classes/libc-common.bbclass
new file mode 100644
index 0000000000..bbc80167dd
--- /dev/null
+++ b/meta/classes/libc-common.bbclass
@@ -0,0 +1,43 @@
1do_install() {
2 oe_runmake install_root=${D} install
3 for r in ${rpcsvc}; do
4 h=`echo $r|sed -e's,\.x$,.h,'`
5 install -m 0644 ${S}/sunrpc/rpcsvc/$h ${D}/${includedir}/rpcsvc/
6 done
7 install -d ${D}/${sysconfdir}/
8 install -m 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/
9 install -d ${D}${localedir}
10 make -f ${WORKDIR}/generate-supported.mk IN="${S}/localedata/SUPPORTED" OUT="${WORKDIR}/SUPPORTED"
11 # get rid of some broken files...
12 for i in ${GLIBC_BROKEN_LOCALES}; do
13 grep -v $i ${WORKDIR}/SUPPORTED > ${WORKDIR}/SUPPORTED.tmp
14 mv ${WORKDIR}/SUPPORTED.tmp ${WORKDIR}/SUPPORTED
15 done
16 rm -f ${D}${sysconfdir}/rpc
17 rm -rf ${D}${datadir}/zoneinfo
18 rm -rf ${D}${libexecdir}/getconf
19}
20
21def get_libc_fpu_setting(bb, d):
22 if d.getVar('TARGET_FPU', True) in [ 'soft', 'ppc-efd' ]:
23 return "--without-fp"
24 return ""
25
26python populate_packages_prepend () {
27 if d.getVar('DEBIAN_NAMES', True):
28 pkgs = d.getVar('PACKAGES', True).split()
29 bpn = d.getVar('BPN', True)
30 prefix = d.getVar('MLPREFIX', True) or ""
31 # Set the base package...
32 d.setVar('PKG_' + prefix + bpn, prefix + 'libc6')
33 libcprefix = prefix + bpn + '-'
34 for p in pkgs:
35 # And all the subpackages.
36 if p.startswith(libcprefix):
37 renamed = p.replace(bpn, 'libc6', 1)
38 d.setVar('PKG_' + p, renamed)
39 # For backward compatibility with old -dbg package
40 d.appendVar('RPROVIDES_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
41 d.appendVar('RCONFLICTS_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
42 d.appendVar('RREPLACES_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
43}
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
new file mode 100644
index 0000000000..c1bc399c18
--- /dev/null
+++ b/meta/classes/libc-package.bbclass
@@ -0,0 +1,390 @@
1#
2# This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains
3# may need packaging and its pointless to duplicate this code.
4#
5# Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of:
6# "compile" - Use QEMU to generate the binary locale files
7# "precompiled" - The binary locale files are pregenerated and already present
8# "ondevice" - The device will build the locale files upon first boot through the postinst
9
10GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
11
12python __anonymous () {
13 enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION", True)
14
15 pn = d.getVar("PN", True)
16 if pn.endswith("-initial"):
17 enabled = False
18
19 if enabled and int(enabled):
20 import re
21
22 target_arch = d.getVar("TARGET_ARCH", True)
23 binary_arches = d.getVar("BINARY_LOCALE_ARCHES", True) or ""
24 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or ""
25
26 for regexp in binary_arches.split(" "):
27 r = re.compile(regexp)
28
29 if r.match(target_arch):
30 depends = d.getVar("DEPENDS", True)
31 if use_cross_localedef == "1" :
32 depends = "%s cross-localedef-native" % depends
33 else:
34 depends = "%s qemu-native" % depends
35 d.setVar("DEPENDS", depends)
36 d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
37 break
38
39 # try to fix disable charsets/locales/locale-code compile fail
40 if bb.utils.contains('DISTRO_FEATURES', 'libc-charsets', True, False, d) and \
41 bb.utils.contains('DISTRO_FEATURES', 'libc-locales', True, False, d) and \
42 bb.utils.contains('DISTRO_FEATURES', 'libc-locale-code', True, False, d):
43 d.setVar('PACKAGE_NO_GCONV', '0')
44 else:
45 d.setVar('PACKAGE_NO_GCONV', '1')
46}
47
48OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
49
50do_configure_prepend() {
51 if [ -e ${S}/elf/ldd.bash.in ]; then
52 sed -e "s#@BASH@#/bin/sh#" -i ${S}/elf/ldd.bash.in
53 fi
54}
55
56
57
58# indentation removed on purpose
59locale_base_postinst() {
60#!/bin/sh
61
62if [ "x$D" != "x" ]; then
63 exit 1
64fi
65
66rm -rf ${TMP_LOCALE}
67mkdir -p ${TMP_LOCALE}
68if [ -f ${localedir}/locale-archive ]; then
69 cp ${localedir}/locale-archive ${TMP_LOCALE}/
70fi
71localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s --prefix=/tmp/locale %s
72mkdir -p ${localedir}/
73mv ${TMP_LOCALE}/locale-archive ${localedir}/
74rm -rf ${TMP_LOCALE}
75}
76
77# indentation removed on purpose
78locale_base_postrm() {
79#!/bin/sh
80
81rm -rf ${TMP_LOCALE}
82mkdir -p ${TMP_LOCALE}
83if [ -f ${localedir}/locale-archive ]; then
84 cp ${localedir}/locale-archive ${TMP_LOCALE}/
85fi
86localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s --prefix=/tmp/locale %s
87mv ${TMP_LOCALE}/locale-archive ${localedir}/
88rm -rf ${TMP_LOCALE}
89}
90
91
92TMP_LOCALE="/tmp/locale${localedir}"
93LOCALETREESRC ?= "${PKGD}"
94
95do_prep_locale_tree() {
96 treedir=${WORKDIR}/locale-tree
97 rm -rf $treedir
98 mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${localedir}
99 tar -cf - -C ${LOCALETREESRC}${datadir} -p i18n | tar -xf - -C $treedir/${datadir}
100 # unzip to avoid parsing errors
101 for i in $treedir/${datadir}/i18n/charmaps/*gz; do
102 gunzip $i
103 done
104 tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir}
105 if [ -f ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.* ]; then
106 tar -cf - -C ${STAGING_DIR_NATIVE}/${prefix_native}/${base_libdir} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
107 fi
108 install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
109}
110
111do_collect_bins_from_locale_tree() {
112 treedir=${WORKDIR}/locale-tree
113
114 parent=$(dirname ${localedir})
115 mkdir -p ${PKGD}/$parent
116 tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent
117}
118
119inherit qemu
120
121python package_do_split_gconvs () {
122 import re
123 if (d.getVar('PACKAGE_NO_GCONV', True) == '1'):
124 bb.note("package requested not splitting gconvs")
125 return
126
127 if not d.getVar('PACKAGES', True):
128 return
129
130 mlprefix = d.getVar("MLPREFIX", True) or ""
131
132 bpn = d.getVar('BPN', True)
133 libdir = d.getVar('libdir', True)
134 if not libdir:
135 bb.error("libdir not defined")
136 return
137 datadir = d.getVar('datadir', True)
138 if not datadir:
139 bb.error("datadir not defined")
140 return
141
142 gconv_libdir = base_path_join(libdir, "gconv")
143 charmap_dir = base_path_join(datadir, "i18n", "charmaps")
144 locales_dir = base_path_join(datadir, "i18n", "locales")
145 binary_locales_dir = d.getVar('localedir', True)
146
147 def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
148 deps = []
149 f = open(fn, "rb")
150 c_re = re.compile('^copy "(.*)"')
151 i_re = re.compile('^include "(\w+)".*')
152 for l in f.readlines():
153 m = c_re.match(l) or i_re.match(l)
154 if m:
155 dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1)))
156 if not dp in deps:
157 deps.append(dp)
158 f.close()
159 if deps != []:
160 d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
161 if bpn != 'glibc':
162 d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
163
164 do_split_packages(d, gconv_libdir, file_regex='^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
165 description='gconv module for character set %s', hook=calc_gconv_deps, \
166 extra_depends=bpn+'-gconv')
167
168 def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
169 deps = []
170 f = open(fn, "rb")
171 c_re = re.compile('^copy "(.*)"')
172 i_re = re.compile('^include "(\w+)".*')
173 for l in f.readlines():
174 m = c_re.match(l) or i_re.match(l)
175 if m:
176 dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1)))
177 if not dp in deps:
178 deps.append(dp)
179 f.close()
180 if deps != []:
181 d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
182 if bpn != 'glibc':
183 d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
184
185 do_split_packages(d, charmap_dir, file_regex='^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
186 description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
187
188 def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
189 deps = []
190 f = open(fn, "rb")
191 c_re = re.compile('^copy "(.*)"')
192 i_re = re.compile('^include "(\w+)".*')
193 for l in f.readlines():
194 m = c_re.match(l) or i_re.match(l)
195 if m:
196 dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1))
197 if not dp in deps:
198 deps.append(dp)
199 f.close()
200 if deps != []:
201 d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
202 if bpn != 'glibc':
203 d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
204
205 do_split_packages(d, locales_dir, file_regex='(.*)', output_pattern=bpn+'-localedata-%s', \
206 description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
207 d.setVar('PACKAGES', d.getVar('PACKAGES') + ' ' + d.getVar('MLPREFIX') + bpn + '-gconv')
208
209 use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", True)
210
211 dot_re = re.compile("(.*)\.(.*)")
212
213 # Read in supported locales and associated encodings
214 supported = {}
215 with open(base_path_join(d.getVar('WORKDIR', True), "SUPPORTED")) as f:
216 for line in f.readlines():
217 try:
218 locale, charset = line.rstrip().split()
219 except ValueError:
220 continue
221 supported[locale] = charset
222
223 # GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
224 to_generate = d.getVar('GLIBC_GENERATE_LOCALES', True)
225 if not to_generate or to_generate == 'all':
226 to_generate = supported.keys()
227 else:
228 to_generate = to_generate.split()
229 for locale in to_generate:
230 if locale not in supported:
231 if '.' in locale:
232 charset = locale.split('.')[1]
233 else:
234 charset = 'UTF-8'
235 bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset))
236 supported[locale] = charset
237
238 def output_locale_source(name, pkgname, locale, encoding):
239 d.setVar('RDEPENDS_%s' % pkgname, 'localedef %s-localedata-%s %s-charmap-%s' % \
240 (mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
241 d.setVar('pkg_postinst_%s' % pkgname, d.getVar('locale_base_postinst', True) \
242 % (locale, encoding, locale))
243 d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm', True) % \
244 (locale, encoding, locale))
245
246 def output_locale_binary_rdepends(name, pkgname, locale, encoding):
247 m = re.match("(.*)\.(.*)", name)
248 if m:
249 libc_name = "%s.%s" % (m.group(1), m.group(2).lower())
250 else:
251 libc_name = name
252 d.setVar('RDEPENDS_%s' % pkgname, legitimize_package_name('%s-binary-localedata-%s' \
253 % (mlprefix+bpn, libc_name)))
254
255 commands = {}
256
257 def output_locale_binary(name, pkgname, locale, encoding):
258 treedir = base_path_join(d.getVar("WORKDIR", True), "locale-tree")
259 ldlibdir = base_path_join(treedir, d.getVar("base_libdir", True))
260 path = d.getVar("PATH", True)
261 i18npath = base_path_join(treedir, datadir, "i18n")
262 gconvpath = base_path_join(treedir, "iconvdata")
263 outputpath = base_path_join(treedir, binary_locales_dir)
264
265 use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or "0"
266 if use_cross_localedef == "1":
267 target_arch = d.getVar('TARGET_ARCH', True)
268 locale_arch_options = { \
269 "arm": " --uint32-align=4 --little-endian ", \
270 "armeb": " --uint32-align=4 --big-endian ", \
271 "aarch64_be": " --uint32-align=4 --big-endian ", \
272 "sh4": " --uint32-align=4 --big-endian ", \
273 "powerpc": " --uint32-align=4 --big-endian ", \
274 "powerpc64": " --uint32-align=4 --big-endian ", \
275 "mips": " --uint32-align=4 --big-endian ", \
276 "mips64": " --uint32-align=4 --big-endian ", \
277 "mipsel": " --uint32-align=4 --little-endian ", \
278 "mips64el":" --uint32-align=4 --little-endian ", \
279 "i586": " --uint32-align=4 --little-endian ", \
280 "i686": " --uint32-align=4 --little-endian ", \
281 "x86_64": " --uint32-align=4 --little-endian " }
282
283 if target_arch in locale_arch_options:
284 localedef_opts = locale_arch_options[target_arch]
285 else:
286 bb.error("locale_arch_options not found for target_arch=" + target_arch)
287 raise bb.build.FuncFailed("unknown arch:" + target_arch + " for locale_arch_options")
288
289 localedef_opts += " --force --old-style --no-archive --prefix=%s \
290 --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
291 % (treedir, treedir, datadir, locale, encoding, outputpath, name)
292
293 cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \
294 (path, i18npath, gconvpath, localedef_opts)
295 else: # earlier slower qemu way
296 qemu = qemu_target_binary(d)
297 localedef_opts = "--force --old-style --no-archive --prefix=%s \
298 --inputfile=%s/i18n/locales/%s --charmap=%s %s" \
299 % (treedir, datadir, locale, encoding, name)
300
301 qemu_options = d.getVar("QEMU_OPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True)
302 if not qemu_options:
303 qemu_options = d.getVar('QEMU_OPTIONS', True)
304
305 cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
306 -E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \
307 (path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
308
309 commands["%s/%s" % (outputpath, name)] = cmd
310
311 bb.note("generating locale %s (%s)" % (locale, encoding))
312
313 def output_locale(name, locale, encoding):
314 pkgname = d.getVar('MLPREFIX') + 'locale-base-' + legitimize_package_name(name)
315 d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
316 d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES', True)))
317 rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
318 m = re.match("(.*)_(.*)", name)
319 if m:
320 rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
321 d.setVar('RPROVIDES_%s' % pkgname, rprovides)
322
323 if use_bin == "compile":
324 output_locale_binary_rdepends(name, pkgname, locale, encoding)
325 output_locale_binary(name, pkgname, locale, encoding)
326 elif use_bin == "precompiled":
327 output_locale_binary_rdepends(name, pkgname, locale, encoding)
328 else:
329 output_locale_source(name, pkgname, locale, encoding)
330
331 if use_bin == "compile":
332 bb.note("preparing tree for binary locale generation")
333 bb.build.exec_func("do_prep_locale_tree", d)
334
335 utf8_only = int(d.getVar('LOCALE_UTF8_ONLY', True) or 0)
336 encodings = {}
337 for locale in to_generate:
338 charset = supported[locale]
339 if utf8_only and charset != 'UTF-8':
340 continue
341
342 m = dot_re.match(locale)
343 if m:
344 base = m.group(1)
345 else:
346 base = locale
347
348 # Precompiled locales are kept as is, obeying SUPPORTED, while
349 # others are adjusted, ensuring that the non-suffixed locales
350 # are utf-8, while the suffixed are not.
351 if use_bin == "precompiled":
352 output_locale(locale, base, charset)
353 else:
354 if charset == 'UTF-8':
355 output_locale(base, base, charset)
356 else:
357 output_locale('%s.%s' % (base, charset), base, charset)
358
359 if use_bin == "compile":
360 makefile = base_path_join(d.getVar("WORKDIR", True), "locale-tree", "Makefile")
361 m = open(makefile, "w")
362 m.write("all: %s\n\n" % " ".join(commands.keys()))
363 for cmd in commands:
364 m.write(cmd + ":\n")
365 m.write("\t" + commands[cmd] + "\n\n")
366 m.close()
367 d.setVar("B", os.path.dirname(makefile))
368 d.setVar("EXTRA_OEMAKE", "${PARALLEL_MAKE}")
369 bb.note("Executing binary locale generation makefile")
370 bb.build.exec_func("oe_runmake", d)
371 bb.note("collecting binary locales from locale tree")
372 bb.build.exec_func("do_collect_bins_from_locale_tree", d)
373 do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
374 output_pattern=bpn+'-binary-localedata-%s', \
375 description='binary locale definition for %s', extra_depends='', allow_dirs=True)
376 elif use_bin == "precompiled":
377 do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
378 output_pattern=bpn+'-binary-localedata-%s', \
379 description='binary locale definition for %s', extra_depends='', allow_dirs=True)
380 else:
381 bb.note("generation of binary locales disabled. this may break i18n!")
382
383}
384
385# We want to do this indirection so that we can safely 'return'
386# from the called function even though we're prepending
387python populate_packages_prepend () {
388 bb.build.exec_func('package_do_split_gconvs', d)
389}
390
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
new file mode 100644
index 0000000000..69e8f12cba
--- /dev/null
+++ b/meta/classes/license.bbclass
@@ -0,0 +1,397 @@
1# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
2# LIC_FILES_CHKSUM.
3# TODO:
4# - There is a real issue revolving around license naming standards.
5
6LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
7LICSSTATEDIR = "${WORKDIR}/license-destdir/"
8
9# Create extra package with license texts and add it to RRECOMMENDS_${PN}
10LICENSE_CREATE_PACKAGE[type] = "boolean"
11LICENSE_CREATE_PACKAGE ??= "0"
12LICENSE_PACKAGE_SUFFIX ??= "-lic"
13LICENSE_FILES_DIRECTORY ??= "${datadir}/licenses/"
14
15addtask populate_lic after do_patch before do_build
16do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
17do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
18
19python write_package_manifest() {
20 # Get list of installed packages
21 license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
22 bb.utils.mkdirhier(license_image_dir)
23 from oe.rootfs import image_list_installed_packages
24 open(os.path.join(license_image_dir, 'package.manifest'),
25 'w+').write(image_list_installed_packages(d))
26}
27
28license_create_manifest() {
29 # Test if BUILD_IMAGES_FROM_FEEDS is defined in env
30 if [ -n "${BUILD_IMAGES_FROM_FEEDS}" ]; then
31 exit 0
32 fi
33
34 INSTALLED_PKGS=`cat ${LICENSE_DIRECTORY}/${IMAGE_NAME}/package.manifest`
35 LICENSE_MANIFEST="${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest"
36 # remove existing license.manifest file
37 if [ -f ${LICENSE_MANIFEST} ]; then
38 rm ${LICENSE_MANIFEST}
39 fi
40 touch ${LICENSE_MANIFEST}
41 for pkg in ${INSTALLED_PKGS}; do
42 filename=`ls ${PKGDATA_DIR}/runtime-reverse/${pkg}| head -1`
43 pkged_pn="$(sed -n 's/^PN: //p' ${filename})"
44
45 # check to see if the package name exists in the manifest. if so, bail.
46 if grep -q "^PACKAGE NAME: ${pkg}" ${LICENSE_MANIFEST}; then
47 continue
48 fi
49
50 pkged_pv="$(sed -n 's/^PV: //p' ${filename})"
51 pkged_name="$(basename $(readlink ${filename}))"
52 pkged_lic="$(sed -n "/^LICENSE_${pkged_name}: /{ s/^LICENSE_${pkged_name}: //; s/[|&()*]/ /g; s/ */ /g; p }" ${filename})"
53 if [ -z ${pkged_lic} ]; then
54 # fallback checking value of LICENSE
55 pkged_lic="$(sed -n "/^LICENSE: /{ s/^LICENSE: //; s/[|&()*]/ /g; s/ */ /g; p }" ${filename})"
56 fi
57
58 echo "PACKAGE NAME:" ${pkg} >> ${LICENSE_MANIFEST}
59 echo "PACKAGE VERSION:" ${pkged_pv} >> ${LICENSE_MANIFEST}
60 echo "RECIPE NAME:" ${pkged_pn} >> ${LICENSE_MANIFEST}
61 printf "LICENSE:" >> ${LICENSE_MANIFEST}
62 for lic in ${pkged_lic}; do
63 # to reference a license file trim trailing + symbol
64 if ! [ -e "${LICENSE_DIRECTORY}/${pkged_pn}/generic_${lic%+}" ]; then
65 bbwarn "The license listed ${lic} was not in the licenses collected for ${pkged_pn}"
66 fi
67 printf " ${lic}" >> ${LICENSE_MANIFEST}
68 done
69 printf "\n\n" >> ${LICENSE_MANIFEST}
70 done
71
72 # Two options here:
73 # - Just copy the manifest
74 # - Copy the manifest and the license directories
75 # With both options set we see a .5 M increase in core-image-minimal
76 if [ "${COPY_LIC_MANIFEST}" = "1" ]; then
77 mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/
78 cp ${LICENSE_MANIFEST} ${IMAGE_ROOTFS}/usr/share/common-licenses/license.manifest
79 if [ "${COPY_LIC_DIRS}" = "1" ]; then
80 for pkg in ${INSTALLED_PKGS}; do
81 mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}
82 pkged_pn="$(oe-pkgdata-util lookup-recipe ${PKGDATA_DIR} ${pkg})"
83 for lic in `ls ${LICENSE_DIRECTORY}/${pkged_pn}`; do
84 # Really don't need to copy the generics as they're
85 # represented in the manifest and in the actual pkg licenses
86 # Doing so would make your image quite a bit larger
87 if [ "${lic#generic_}" = "${lic}" ]; then
88 cp ${LICENSE_DIRECTORY}/${pkged_pn}/${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}/${lic}
89 else
90 if [ ! -f ${IMAGE_ROOTFS}/usr/share/common-licenses/${lic} ]; then
91 cp ${LICENSE_DIRECTORY}/${pkged_pn}/${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/
92 fi
93 ln -sf ../${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}/${lic}
94 fi
95 done
96 done
97 fi
98 fi
99
100}
101
102python do_populate_lic() {
103 """
104 Populate LICENSE_DIRECTORY with licenses.
105 """
106 lic_files_paths = find_license_files(d)
107
108 # The base directory we wrangle licenses to
109 destdir = os.path.join(d.getVar('LICSSTATEDIR', True), d.getVar('PN', True))
110 copy_license_files(lic_files_paths, destdir)
111}
112
113# it would be better to copy them in do_install_append, but find_license_filesa is python
114python perform_packagecopy_prepend () {
115 enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
116 if d.getVar('CLASSOVERRIDE', True) == 'class-target' and enabled:
117 lic_files_paths = find_license_files(d)
118
119 # LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
120 destdir = d.getVar('D', True) + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY', True), d.getVar('PN', True))
121 copy_license_files(lic_files_paths, destdir)
122 add_package_and_files(d)
123}
124
125def add_package_and_files(d):
126 packages = d.getVar('PACKAGES', True)
127 files = d.getVar('LICENSE_FILES_DIRECTORY', True)
128 pn = d.getVar('PN', True)
129 pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX'))
130 if pn_lic in packages:
131 bb.warn("%s package already existed in %s." % (pn_lic, pn))
132 else:
133 # first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
134 d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
135 d.setVar('FILES_' + pn_lic, files)
136 rrecommends_pn = d.getVar('RRECOMMENDS_' + pn, True)
137 if rrecommends_pn:
138 d.setVar('RRECOMMENDS_' + pn, "%s %s" % (pn_lic, rrecommends_pn))
139 else:
140 d.setVar('RRECOMMENDS_' + pn, "%s" % (pn_lic))
141
142def copy_license_files(lic_files_paths, destdir):
143 import shutil
144 import stat
145
146 bb.utils.mkdirhier(destdir)
147 for (basename, path) in lic_files_paths:
148 try:
149 src = path
150 dst = os.path.join(destdir, basename)
151 if os.path.exists(dst):
152 os.remove(dst)
153 if (os.stat(src).st_dev == os.stat(destdir).st_dev):
154 os.link(src, dst)
155 else:
156 shutil.copyfile(src, dst)
157 os.chmod(dst, os.stat(dst).st_mode | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH);
158 except Exception as e:
159 bb.warn("Could not copy license file %s: %s" % (basename, e))
160
161def find_license_files(d):
162 """
163 Creates list of files used in LIC_FILES_CHKSUM and generic LICENSE files.
164 """
165 import shutil
166 import oe.license
167
168 pn = d.getVar('PN', True)
169 for package in d.getVar('PACKAGES', True):
170 if d.getVar('LICENSE_' + package, True):
171 license_types = license_types + ' & ' + \
172 d.getVar('LICENSE_' + package, True)
173
174 #If we get here with no license types, then that means we have a recipe
175 #level license. If so, we grab only those.
176 try:
177 license_types
178 except NameError:
179 # All the license types at the recipe level
180 license_types = d.getVar('LICENSE', True)
181
182 # All the license files for the package
183 lic_files = d.getVar('LIC_FILES_CHKSUM', True)
184 pn = d.getVar('PN', True)
185 # The license files are located in S/LIC_FILE_CHECKSUM.
186 srcdir = d.getVar('S', True)
187 # Directory we store the generic licenses as set in the distro configuration
188 generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
189 # List of basename, path tuples
190 lic_files_paths = []
191 license_source_dirs = []
192 license_source_dirs.append(generic_directory)
193 try:
194 additional_lic_dirs = d.getVar('LICENSE_PATH', True).split()
195 for lic_dir in additional_lic_dirs:
196 license_source_dirs.append(lic_dir)
197 except:
198 pass
199
200 class FindVisitor(oe.license.LicenseVisitor):
201 def visit_Str(self, node):
202 #
203 # Until I figure out what to do with
204 # the two modifiers I support (or greater = +
205 # and "with exceptions" being *
206 # we'll just strip out the modifier and put
207 # the base license.
208 find_license(node.s.replace("+", "").replace("*", ""))
209 self.generic_visit(node)
210
211 def find_license(license_type):
212 try:
213 bb.utils.mkdirhier(gen_lic_dest)
214 except:
215 pass
216 spdx_generic = None
217 license_source = None
218 # If the generic does not exist we need to check to see if there is an SPDX mapping to it
219 for lic_dir in license_source_dirs:
220 if not os.path.isfile(os.path.join(lic_dir, license_type)):
221 if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
222 # Great, there is an SPDXLICENSEMAP. We can copy!
223 bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
224 spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
225 license_source = lic_dir
226 break
227 elif os.path.isfile(os.path.join(lic_dir, license_type)):
228 spdx_generic = license_type
229 license_source = lic_dir
230 break
231
232 if spdx_generic and license_source:
233 # we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
234 # audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
235
236 lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic)))
237 else:
238 # And here is where we warn people that their licenses are lousy
239 bb.warn("%s: No generic license file exists for: %s in any provider" % (pn, license_type))
240 pass
241
242 if not generic_directory:
243 raise bb.build.FuncFailed("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
244
245 if not lic_files:
246 # No recipe should have an invalid license file. This is checked else
247 # where, but let's be pedantic
248 bb.note(pn + ": Recipe file does not have license file information.")
249 return lic_files_paths
250
251 for url in lic_files.split():
252 try:
253 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
254 except bb.fetch.MalformedUrl:
255 raise bb.build.FuncFailed("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF', True), url))
256 # We want the license filename and path
257 srclicfile = os.path.join(srcdir, path)
258 lic_files_paths.append((os.path.basename(path), srclicfile))
259
260 v = FindVisitor()
261 try:
262 v.visit_string(license_types)
263 except oe.license.InvalidLicense as exc:
264 bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
265 except SyntaxError:
266 bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF', True)))
267
268 return lic_files_paths
269
270def return_spdx(d, license):
271 """
272 This function returns the spdx mapping of a license if it exists.
273 """
274 return d.getVarFlag('SPDXLICENSEMAP', license, True)
275
276def canonical_license(d, license):
277 """
278 Return the canonical (SPDX) form of the license if available (so GPLv3
279 becomes GPL-3.0), for the license named 'X+', return canonical form of
280 'X' if availabel and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
281 or the passed license if there is no canonical form.
282 """
283 lic = d.getVarFlag('SPDXLICENSEMAP', license, True) or ""
284 if not lic and license.endswith('+'):
285 lic = d.getVarFlag('SPDXLICENSEMAP', license.rstrip('+'), True)
286 if lic:
287 lic += '+'
288 return lic or license
289
290def incompatible_license(d, dont_want_licenses, package=None):
291 """
292 This function checks if a recipe has only incompatible licenses. It also
293 take into consideration 'or' operand. dont_want_licenses should be passed
294 as canonical (SPDX) names.
295 """
296 import re
297 import oe.license
298 from fnmatch import fnmatchcase as fnmatch
299 license = d.getVar("LICENSE_%s" % package, True) if package else None
300 if not license:
301 license = d.getVar('LICENSE', True)
302
303 def license_ok(license):
304 for dwl in dont_want_licenses:
305 # If you want to exclude license named generically 'X', we
306 # surely want to exclude 'X+' as well. In consequence, we
307 # will exclude a trailing '+' character from LICENSE in
308 # case INCOMPATIBLE_LICENSE is not a 'X+' license.
309 lic = license
310 if not re.search('\+$', dwl):
311 lic = re.sub('\+', '', license)
312 if fnmatch(lic, dwl):
313 return False
314 return True
315
316 # Handles an "or" or two license sets provided by
317 # flattened_licenses(), pick one that works if possible.
318 def choose_lic_set(a, b):
319 return a if all(license_ok(lic) for lic in a) else b
320
321 try:
322 licenses = oe.license.flattened_licenses(license, choose_lic_set)
323 except oe.license.LicenseError as exc:
324 bb.fatal('%s: %s' % (d.getVar('P', True), exc))
325 return any(not license_ok(canonical_license(d, l)) for l in licenses)
326
327def check_license_flags(d):
328 """
329 This function checks if a recipe has any LICENSE_FLAGS that
330 aren't whitelisted.
331
332 If it does, it returns the first LICENSE_FLAGS item missing from the
333 whitelist, or all of the LICENSE_FLAGS if there is no whitelist.
334
335 If everything is is properly whitelisted, it returns None.
336 """
337
338 def license_flag_matches(flag, whitelist, pn):
339 """
340 Return True if flag matches something in whitelist, None if not.
341
342 Before we test a flag against the whitelist, we append _${PN}
343 to it. We then try to match that string against the
344 whitelist. This covers the normal case, where we expect
345 LICENSE_FLAGS to be a simple string like 'commercial', which
346 the user typically matches exactly in the whitelist by
347 explicitly appending the package name e.g 'commercial_foo'.
348 If we fail the match however, we then split the flag across
349 '_' and append each fragment and test until we either match or
350 run out of fragments.
351 """
352 flag_pn = ("%s_%s" % (flag, pn))
353 for candidate in whitelist:
354 if flag_pn == candidate:
355 return True
356
357 flag_cur = ""
358 flagments = flag_pn.split("_")
359 flagments.pop() # we've already tested the full string
360 for flagment in flagments:
361 if flag_cur:
362 flag_cur += "_"
363 flag_cur += flagment
364 for candidate in whitelist:
365 if flag_cur == candidate:
366 return True
367 return False
368
369 def all_license_flags_match(license_flags, whitelist):
370 """ Return first unmatched flag, None if all flags match """
371 pn = d.getVar('PN', True)
372 split_whitelist = whitelist.split()
373 for flag in license_flags.split():
374 if not license_flag_matches(flag, split_whitelist, pn):
375 return flag
376 return None
377
378 license_flags = d.getVar('LICENSE_FLAGS', True)
379 if license_flags:
380 whitelist = d.getVar('LICENSE_FLAGS_WHITELIST', True)
381 if not whitelist:
382 return license_flags
383 unmatched_flag = all_license_flags_match(license_flags, whitelist)
384 if unmatched_flag:
385 return unmatched_flag
386 return None
387
388SSTATETASKS += "do_populate_lic"
389do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
390do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
391
392ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
393
394python do_populate_lic_setscene () {
395 sstate_setscene(d)
396}
397addtask do_populate_lic_setscene
diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass
new file mode 100644
index 0000000000..4f2b0a4a98
--- /dev/null
+++ b/meta/classes/linux-kernel-base.bbclass
@@ -0,0 +1,32 @@
1# parse kernel ABI version out of <linux/version.h>
2def get_kernelversion(p):
3 import re
4
5 fn = p + '/include/linux/utsrelease.h'
6 if not os.path.isfile(fn):
7 # after 2.6.33-rc1
8 fn = p + '/include/generated/utsrelease.h'
9 if not os.path.isfile(fn):
10 fn = p + '/include/linux/version.h'
11
12 import re
13 try:
14 f = open(fn, 'r')
15 except IOError:
16 return None
17
18 l = f.readlines()
19 f.close()
20 r = re.compile("#define UTS_RELEASE \"(.*)\"")
21 for s in l:
22 m = r.match(s)
23 if m:
24 return m.group(1)
25 return None
26
27def linux_module_packages(s, d):
28 suffix = ""
29 return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
30
31# that's all
32
diff --git a/meta/classes/logging.bbclass b/meta/classes/logging.bbclass
new file mode 100644
index 0000000000..78d65bda3a
--- /dev/null
+++ b/meta/classes/logging.bbclass
@@ -0,0 +1,72 @@
1# The following logging mechanisms are to be used in bash functions of recipes.
2# They are intended to map one to one in intention and output format with the
3# python recipe logging functions of a similar naming convention: bb.plain(),
4# bb.note(), etc.
5#
6# For the time being, all of these print only to the task logs. Future
7# enhancements may integrate these calls with the bitbake logging
8# infrastructure, allowing for printing to the console as appropriate. The
9# interface and intention statements reflect that future goal. Once it is
10# in place, no changes will be necessary to recipes using these logging
11# mechanisms.
12
13# Print the output exactly as it is passed in. Typically used for output of
14# tasks that should be seen on the console. Use sparingly.
15# Output: logs console
16# NOTE: console output is not currently implemented.
17bbplain() {
18 echo "$*"
19}
20
21# Notify the user of a noteworthy condition.
22# Output: logs console
23# NOTE: console output is not currently implemented.
24bbnote() {
25 echo "NOTE: $*"
26}
27
28# Print a warning to the log. Warnings are non-fatal, and do not
29# indicate a build failure.
30# Output: logs
31bbwarn() {
32 echo "WARNING: $*"
33}
34
35# Print an error to the log. Errors are non-fatal in that the build can
36# continue, but they do indicate a build failure.
37# Output: logs
38bberror() {
39 echo "ERROR: $*"
40}
41
42# Print a fatal error to the log. Fatal errors indicate build failure
43# and halt the build, exiting with an error code.
44# Output: logs
45bbfatal() {
46 echo "ERROR: $*"
47 exit 1
48}
49
50# Print debug messages. These are appropriate for progress checkpoint
51# messages to the logs. Depending on the debug log level, they may also
52# go to the console.
53# Output: logs console
54# Usage: bbdebug 1 "first level debug message"
55# bbdebug 2 "second level debug message"
56# NOTE: console output is not currently implemented.
57bbdebug() {
58 USAGE='Usage: bbdebug [123] "message"'
59 if [ $# -lt 2 ]; then
60 bbfatal "$USAGE"
61 fi
62
63 # Strip off the debug level and ensure it is an integer
64 DBGLVL=$1; shift
65 if ! [[ "$DBGLVL" =~ ^[0-9]+ ]]; then
66 bbfatal "$USAGE"
67 fi
68
69 # All debug output is printed to the logs
70 echo "DEBUG: $*"
71}
72
diff --git a/meta/classes/meta.bbclass b/meta/classes/meta.bbclass
new file mode 100644
index 0000000000..5e6890238b
--- /dev/null
+++ b/meta/classes/meta.bbclass
@@ -0,0 +1,4 @@
1
2PACKAGES = ""
3
4do_build[recrdeptask] = "do_build"
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
new file mode 100644
index 0000000000..237e61821d
--- /dev/null
+++ b/meta/classes/metadata_scm.bbclass
@@ -0,0 +1,82 @@
1METADATA_BRANCH ?= "${@base_detect_branch(d)}"
2METADATA_REVISION ?= "${@base_detect_revision(d)}"
3
4def base_detect_revision(d):
5 path = base_get_scmbasepath(d)
6
7 scms = [base_get_metadata_git_revision, \
8 base_get_metadata_svn_revision]
9
10 for scm in scms:
11 rev = scm(path, d)
12 if rev != "<unknown>":
13 return rev
14
15 return "<unknown>"
16
17def base_detect_branch(d):
18 path = base_get_scmbasepath(d)
19
20 scms = [base_get_metadata_git_branch]
21
22 for scm in scms:
23 rev = scm(path, d)
24 if rev != "<unknown>":
25 return rev.strip()
26
27 return "<unknown>"
28
29def base_get_scmbasepath(d):
30 return d.getVar( 'COREBASE', True)
31
32def base_get_metadata_monotone_branch(path, d):
33 monotone_branch = "<unknown>"
34 try:
35 with open("%s/_MTN/options" % path) as f:
36 monotone_branch = f.read().strip()
37 if monotone_branch.startswith( "database" ):
38 monotone_branch_words = monotone_branch.split()
39 monotone_branch = monotone_branch_words[ monotone_branch_words.index( "branch" )+1][1:-1]
40 except:
41 pass
42 return monotone_branch
43
44def base_get_metadata_monotone_revision(path, d):
45 monotone_revision = "<unknown>"
46 try:
47 with open("%s/_MTN/revision" % path) as f:
48 monotone_revision = f.read().strip()
49 if monotone_revision.startswith( "format_version" ):
50 monotone_revision_words = monotone_revision.split()
51 monotone_revision = monotone_revision_words[ monotone_revision_words.index( "old_revision" )+1][1:-1]
52 except IOError:
53 pass
54 return monotone_revision
55
56def base_get_metadata_svn_revision(path, d):
57 # This only works with older subversion. For newer versions
58 # this function will need to be fixed by someone interested
59 revision = "<unknown>"
60 try:
61 with open("%s/.svn/entries" % path) as f:
62 revision = f.readlines()[3].strip()
63 except (IOError, IndexError):
64 pass
65 return revision
66
67def base_get_metadata_git_branch(path, d):
68 branch = os.popen('cd %s; git branch 2>&1 | grep "^* " | tr -d "* "' % path).read()
69
70 if len(branch) != 0:
71 return branch
72 return "<unknown>"
73
74def base_get_metadata_git_revision(path, d):
75 f = os.popen("cd %s; git log -n 1 --pretty=oneline -- 2>&1" % path)
76 data = f.read()
77 if f.close() is None:
78 rev = data.split(" ")[0]
79 if len(rev) != 0:
80 return rev
81 return "<unknown>"
82
diff --git a/meta/classes/migrate_localcount.bbclass b/meta/classes/migrate_localcount.bbclass
new file mode 100644
index 0000000000..aa0df8bb76
--- /dev/null
+++ b/meta/classes/migrate_localcount.bbclass
@@ -0,0 +1,46 @@
1PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
2LOCALCOUNT_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv-localcount-exports.inc"
3
4python migrate_localcount_handler () {
5 import bb.event
6 if not e.data:
7 return
8
9 pv = e.data.getVar('PV', True)
10 if not 'AUTOINC' in pv:
11 return
12
13 localcounts = bb.persist_data.persist('BB_URI_LOCALCOUNT', e.data)
14 pn = e.data.getVar('PN', True)
15 revs = localcounts.get_by_pattern('%%-%s_rev' % pn)
16 counts = localcounts.get_by_pattern('%%-%s_count' % pn)
17 if not revs or not counts:
18 return
19
20 if len(revs) != len(counts):
21 bb.warn("The number of revs and localcounts don't match in %s" % pn)
22 return
23
24 version = e.data.getVar('PRAUTOINX', True)
25 srcrev = bb.fetch2.get_srcrev(e.data)
26 base_ver = 'AUTOINC-%s' % version[:version.find(srcrev)]
27 pkgarch = e.data.getVar('PACKAGE_ARCH', True)
28 value = max(int(count) for count in counts)
29
30 if len(revs) == 1:
31 if srcrev != ('AUTOINC+%s' % revs[0]):
32 value += 1
33 else:
34 value += 1
35
36 bb.utils.mkdirhier(e.data.getVar('PRSERV_DUMPDIR', True))
37 df = e.data.getVar('LOCALCOUNT_DUMPFILE', True)
38 flock = bb.utils.lockfile("%s.lock" % df)
39 with open(df, 'a') as fd:
40 fd.write('PRAUTO$%s$%s$%s = "%s"\n' %
41 (base_ver, pkgarch, srcrev, str(value)))
42 bb.utils.unlockfile(flock)
43}
44
45addhandler migrate_localcount_handler
46migrate_localcount_handler[eventmask] = "bb.event.RecipeParsed"
diff --git a/meta/classes/mime.bbclass b/meta/classes/mime.bbclass
new file mode 100644
index 0000000000..721c73fcff
--- /dev/null
+++ b/meta/classes/mime.bbclass
@@ -0,0 +1,56 @@
1DEPENDS += "shared-mime-info-native shared-mime-info"
2
3mime_postinst() {
4if [ "$1" = configure ]; then
5 UPDATEMIMEDB=`which update-mime-database`
6 if [ -x "$UPDATEMIMEDB" ] ; then
7 echo "Updating MIME database... this may take a while."
8 $UPDATEMIMEDB $D${datadir}/mime
9 else
10 echo "Missing update-mime-database, update of mime database failed!"
11 exit 1
12 fi
13fi
14}
15
16mime_postrm() {
17if [ "$1" = remove ] || [ "$1" = upgrade ]; then
18 UPDATEMIMEDB=`which update-mime-database`
19 if [ -x "$UPDATEMIMEDB" ] ; then
20 echo "Updating MIME database... this may take a while."
21 $UPDATEMIMEDB $D${datadir}/mime
22 else
23 echo "Missing update-mime-database, update of mime database failed!"
24 exit 1
25 fi
26fi
27}
28
29python populate_packages_append () {
30 import re
31 packages = d.getVar('PACKAGES', True).split()
32 pkgdest = d.getVar('PKGDEST', True)
33
34 for pkg in packages:
35 mime_dir = '%s/%s/usr/share/mime/packages' % (pkgdest, pkg)
36 mimes = []
37 mime_re = re.compile(".*\.xml$")
38 if os.path.exists(mime_dir):
39 for f in os.listdir(mime_dir):
40 if mime_re.match(f):
41 mimes.append(f)
42 if mimes:
43 bb.note("adding mime postinst and postrm scripts to %s" % pkg)
44 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
45 if not postinst:
46 postinst = '#!/bin/sh\n'
47 postinst += d.getVar('mime_postinst', True)
48 d.setVar('pkg_postinst_%s' % pkg, postinst)
49 postrm = d.getVar('pkg_postrm_%s' % pkg, True)
50 if not postrm:
51 postrm = '#!/bin/sh\n'
52 postrm += d.getVar('mime_postrm', True)
53 d.setVar('pkg_postrm_%s' % pkg, postrm)
54 bb.note("adding shared-mime-info-data dependency to %s" % pkg)
55 d.appendVar('RDEPENDS_' + pkg, " shared-mime-info-data")
56}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
new file mode 100644
index 0000000000..57fb90df5e
--- /dev/null
+++ b/meta/classes/mirrors.bbclass
@@ -0,0 +1,82 @@
1MIRRORS += "\
2${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \n \
3${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \n \
4${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \n \
5${DEBIAN_MIRROR} ftp://ftp.de.debian.org/debian/pool \n \
6${DEBIAN_MIRROR} ftp://ftp.au.debian.org/debian/pool \n \
7${DEBIAN_MIRROR} ftp://ftp.cl.debian.org/debian/pool \n \
8${DEBIAN_MIRROR} ftp://ftp.hr.debian.org/debian/pool \n \
9${DEBIAN_MIRROR} ftp://ftp.fi.debian.org/debian/pool \n \
10${DEBIAN_MIRROR} ftp://ftp.hk.debian.org/debian/pool \n \
11${DEBIAN_MIRROR} ftp://ftp.hu.debian.org/debian/pool \n \
12${DEBIAN_MIRROR} ftp://ftp.ie.debian.org/debian/pool \n \
13${DEBIAN_MIRROR} ftp://ftp.it.debian.org/debian/pool \n \
14${DEBIAN_MIRROR} ftp://ftp.jp.debian.org/debian/pool \n \
15${DEBIAN_MIRROR} ftp://ftp.no.debian.org/debian/pool \n \
16${DEBIAN_MIRROR} ftp://ftp.pl.debian.org/debian/pool \n \
17${DEBIAN_MIRROR} ftp://ftp.ro.debian.org/debian/pool \n \
18${DEBIAN_MIRROR} ftp://ftp.si.debian.org/debian/pool \n \
19${DEBIAN_MIRROR} ftp://ftp.es.debian.org/debian/pool \n \
20${DEBIAN_MIRROR} ftp://ftp.se.debian.org/debian/pool \n \
21${DEBIAN_MIRROR} ftp://ftp.tr.debian.org/debian/pool \n \
22${GNU_MIRROR} ftp://mirrors.kernel.org/gnu \n \
23${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
24ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt/ \n \
25ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.surfnet.nl/pub/security/gnupg/ \n \
26ftp://ftp.gnupg.org/gcrypt/ http://gulus.USherbrooke.ca/pub/appl/GnuPG/ \n \
27ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
28ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
29ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
30ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.gnupg.org/gcrypt/gnutls/ \n \
31ftp://ftp.gnutls.org/pub/gnutls http://www.mirrors.wiretapped.net/security/network-security/gnutls/ \n \
32ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.mirrors.wiretapped.net/pub/security/network-security/gnutls/ \n \
33ftp://ftp.gnutls.org/pub/gnutls http://josefsson.org/gnutls/releases/ \n \
34http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
35http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
36ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/ \n \
37ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tau.ac.il/pub/unix/admin/ \n \
38ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cert.dfn.de/pub/tools/admin/lsof/ \n \
39ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.fu-berlin.de/pub/unix/tools/lsof/ \n \
40ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.kaizo.org/pub/lsof/ \n \
41ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tu-darmstadt.de/pub/sysadmin/lsof/ \n \
42ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tux.org/pub/sites/vic.cc.purdue.edu/tools/unix/lsof/ \n \
43ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://gd.tuwien.ac.at/utils/admin-tools/lsof/ \n \
44ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://sunsite.ualberta.ca/pub/Mirror/lsof/ \n \
45ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://the.wiretapped.net/pub/security/host-security/lsof/ \n \
46http://www.apache.org/dist http://archive.apache.org/dist \n \
47http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \
48${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \n \
49${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \n \
50cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
51svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
52git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
53hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
54bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
55svk://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
56p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
57osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
58https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
59ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
60cvs://.*/.* http://sources.openembedded.org/ \n \
61svn://.*/.* http://sources.openembedded.org/ \n \
62git://.*/.* http://sources.openembedded.org/ \n \
63hg://.*/.* http://sources.openembedded.org/ \n \
64bzr://.*/.* http://sources.openembedded.org/ \n \
65svk://.*/.* http://sources.openembedded.org/ \n \
66p4://.*/.* http://sources.openembedded.org/ \n \
67osc://.*/.* http://sources.openembedded.org/ \n \
68https?$://.*/.* http://sources.openembedded.org/ \n \
69ftp://.*/.* http://sources.openembedded.org/ \n \
70${CPAN_MIRROR} http://cpan.metacpan.org/ \n \
71${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \
72cvs://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
73svn://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
74git://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
75hg://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
76bzr://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
77svk://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
78p4://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
79osc://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
80https?$://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
81ftp://.*/.* http://linux.enea.com/${DISTRO_VERSION}/sources/ \n \
82"
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
new file mode 100644
index 0000000000..9537ba9f43
--- /dev/null
+++ b/meta/classes/module-base.bbclass
@@ -0,0 +1,18 @@
1inherit kernel-arch
2
3export OS = "${TARGET_OS}"
4export CROSS_COMPILE = "${TARGET_PREFIX}"
5
6export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-abiversion')}"
7KERNEL_OBJECT_SUFFIX = ".ko"
8
9# kernel modules are generally machine specific
10PACKAGE_ARCH = "${MACHINE_ARCH}"
11
12# Function to ensure the kernel scripts are created. Expected to
13# be called before do_compile. See module.bbclass for an exmaple.
14do_make_scripts() {
15 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
16 make CC="${KERNEL_CC}" LD="${KERNEL_LD}" AR="${KERNEL_AR}" \
17 -C ${STAGING_KERNEL_DIR} scripts
18}
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
new file mode 100644
index 0000000000..ad6f7af1bb
--- /dev/null
+++ b/meta/classes/module.bbclass
@@ -0,0 +1,32 @@
1DEPENDS += "virtual/kernel"
2
3inherit module-base kernel-module-split
4
5addtask make_scripts after do_patch before do_compile
6do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
7do_make_scripts[deptask] = "do_populate_sysroot"
8
9module_do_compile() {
10 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
11 oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
12 KERNEL_SRC=${STAGING_KERNEL_DIR} \
13 KERNEL_VERSION=${KERNEL_VERSION} \
14 CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
15 AR="${KERNEL_AR}" \
16 ${MAKE_TARGETS}
17}
18
19module_do_install() {
20 unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
21 oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" \
22 KERNEL_SRC=${STAGING_KERNEL_DIR} \
23 CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
24 modules_install
25}
26
27EXPORT_FUNCTIONS do_compile do_install
28
29# add all splitted modules to PN RDEPENDS, PN can be empty now
30KERNEL_MODULES_META_PACKAGE = "${PN}"
31FILES_${PN} = ""
32ALLOW_EMPTY_${PN} = "1"
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
new file mode 100644
index 0000000000..eea2fd59a1
--- /dev/null
+++ b/meta/classes/multilib.bbclass
@@ -0,0 +1,145 @@
1python multilib_virtclass_handler () {
2 cls = e.data.getVar("BBEXTENDCURR", True)
3 variant = e.data.getVar("BBEXTENDVARIANT", True)
4 if cls != "multilib" or not variant:
5 return
6
7 e.data.setVar('STAGING_KERNEL_DIR', e.data.getVar('STAGING_KERNEL_DIR', True))
8
9 # There should only be one kernel in multilib configs
10 # We also skip multilib setup for module packages.
11 provides = (e.data.getVar("PROVIDES", True) or "").split()
12 if "virtual/kernel" in provides or bb.data.inherits_class('module-base', e.data):
13 raise bb.parse.SkipPackage("We shouldn't have multilib variants for the kernel")
14
15 save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME", True) or ""
16 for name in save_var_name.split():
17 val=e.data.getVar(name, True)
18 if val:
19 e.data.setVar(name + "_MULTILIB_ORIGINAL", val)
20
21 if bb.data.inherits_class('image', e.data):
22 e.data.setVar("MLPREFIX", variant + "-")
23 e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
24 target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False)
25 if target_vendor:
26 e.data.setVar("TARGET_VENDOR", target_vendor)
27 return
28
29 if bb.data.inherits_class('cross-canadian', e.data):
30 e.data.setVar("MLPREFIX", variant + "-")
31 override = ":virtclass-multilib-" + variant
32 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
33 bb.data.update_data(e.data)
34 return
35
36 if bb.data.inherits_class('native', e.data):
37 raise bb.parse.SkipPackage("We can't extend native recipes")
38
39 if bb.data.inherits_class('nativesdk', e.data) or bb.data.inherits_class('crosssdk', e.data):
40 raise bb.parse.SkipPackage("We can't extend nativesdk recipes")
41
42 if bb.data.inherits_class('allarch', e.data) and not bb.data.inherits_class('packagegroup', e.data):
43 raise bb.parse.SkipPackage("Don't extend allarch recipes which are not packagegroups")
44
45
46 # Expand this since this won't work correctly once we set a multilib into place
47 e.data.setVar("ALL_MULTILIB_PACKAGE_ARCHS", e.data.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
48
49 override = ":virtclass-multilib-" + variant
50
51 e.data.setVar("MLPREFIX", variant + "-")
52 e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
53 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
54
55 # Expand the WHITELISTs with multilib prefix
56 for whitelist in ["HOSTTOOLS_WHITELIST_GPL-3.0", "WHITELIST_GPL-3.0", "LGPLv2_WHITELIST_GPL-3.0"]:
57 pkgs = e.data.getVar(whitelist, True)
58 for pkg in pkgs.split():
59 pkgs += " " + variant + "-" + pkg
60 e.data.setVar(whitelist, pkgs)
61
62 # DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
63 newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False)
64 if newtune:
65 e.data.setVar("DEFAULTTUNE", newtune)
66 e.data.setVar('DEFAULTTUNE_ML_%s' % variant, newtune)
67}
68
69addhandler multilib_virtclass_handler
70multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
71
72STAGINGCC_prepend = "${BBEXTENDVARIANT}-"
73
74python __anonymous () {
75 variant = d.getVar("BBEXTENDVARIANT", True)
76
77 import oe.classextend
78
79 clsextend = oe.classextend.ClassExtender(variant, d)
80
81 if bb.data.inherits_class('image', d):
82 clsextend.map_depends_variable("PACKAGE_INSTALL")
83 clsextend.map_depends_variable("LINGUAS_INSTALL")
84 clsextend.map_depends_variable("RDEPENDS")
85 pinstall = d.getVar("LINGUAS_INSTALL", True) + " " + d.getVar("PACKAGE_INSTALL", True)
86 d.setVar("PACKAGE_INSTALL", pinstall)
87 d.setVar("LINGUAS_INSTALL", "")
88 # FIXME, we need to map this to something, not delete it!
89 d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
90
91 if bb.data.inherits_class('populate_sdk_base', d):
92 clsextend.map_depends_variable("TOOLCHAIN_TARGET_TASK")
93 clsextend.map_depends_variable("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY")
94
95 if bb.data.inherits_class('image', d):
96 return
97
98 clsextend.map_depends_variable("DEPENDS")
99 clsextend.map_variable("PROVIDES")
100
101 if bb.data.inherits_class('cross-canadian', d):
102 return
103
104 clsextend.rename_packages()
105 clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
106
107 clsextend.map_packagevars()
108 clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
109 clsextend.map_variable("PACKAGE_INSTALL")
110 clsextend.map_variable("INITSCRIPT_PACKAGES")
111 clsextend.map_variable("USERADD_PACKAGES")
112 clsextend.map_variable("SYSTEMD_PACKAGES")
113}
114
115PACKAGEFUNCS_append = " do_package_qa_multilib"
116
117python do_package_qa_multilib() {
118
119 def check_mlprefix(pkg, var, mlprefix):
120 values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg), True) or d.getVar(var, True) or "")
121 candidates = []
122 for i in values:
123 if i.startswith('virtual/'):
124 i = i[len('virtual/'):]
125 if (not i.startswith('kernel-module')) and (not i.startswith(mlprefix)) and \
126 (not 'cross-canadian' in i) and (not i.startswith("nativesdk-")) and \
127 (not i.startswith("rtld")) and (not i.startswith('kernel-vmlinux')):
128 candidates.append(i)
129 if len(candidates) > 0:
130 bb.warn("Multilib QA Issue: %s package %s - suspicious values '%s' in %s"
131 % (d.getVar('PN', True), pkg, ' '.join(candidates), var))
132
133 ml = d.getVar('MLPREFIX', True)
134 if not ml:
135 return
136
137 packages = d.getVar('PACKAGES', True)
138 for pkg in packages.split():
139 check_mlprefix(pkg, 'RDEPENDS', ml)
140 check_mlprefix(pkg, 'RPROVIDES', ml)
141 check_mlprefix(pkg, 'RRECOMMENDS', ml)
142 check_mlprefix(pkg, 'RSUGGESTS', ml)
143 check_mlprefix(pkg, 'RREPLACES', ml)
144 check_mlprefix(pkg, 'RCONFLICTS', ml)
145}
diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass
new file mode 100644
index 0000000000..8ea2a5a4b8
--- /dev/null
+++ b/meta/classes/multilib_global.bbclass
@@ -0,0 +1,158 @@
1def preferred_ml_updates(d):
2 # If any PREFERRED_PROVIDER or PREFERRED_VERSION are set,
3 # we need to mirror these variables in the multilib case;
4 multilibs = d.getVar('MULTILIBS', True) or ""
5 if not multilibs:
6 return
7
8 prefixes = []
9 for ext in multilibs.split():
10 eext = ext.split(':')
11 if len(eext) > 1 and eext[0] == 'multilib':
12 prefixes.append(eext[1])
13
14 versions = []
15 providers = []
16 for v in d.keys():
17 if v.startswith("PREFERRED_VERSION_"):
18 versions.append(v)
19 if v.startswith("PREFERRED_PROVIDER_"):
20 providers.append(v)
21
22 for v in versions:
23 val = d.getVar(v, False)
24 pkg = v.replace("PREFERRED_VERSION_", "")
25 if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
26 continue
27 if '-cross-' in pkg and '${' in pkg:
28 for p in prefixes:
29 localdata = bb.data.createCopy(d)
30 override = ":virtclass-multilib-" + p
31 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
32 bb.data.update_data(localdata)
33 newname = localdata.expand(v).replace("PREFERRED_VERSION_", "PREFERRED_VERSION_" + p + '-')
34 if newname != v:
35 newval = localdata.expand(val)
36 d.setVar(newname, newval)
37 # Avoid future variable key expansion
38 vexp = d.expand(v)
39 if v != vexp and d.getVar(v, False):
40 d.renameVar(v, vexp)
41 continue
42 for p in prefixes:
43 newname = "PREFERRED_VERSION_" + p + "-" + pkg
44 if not d.getVar(newname, False):
45 d.setVar(newname, val)
46
47 for prov in providers:
48 val = d.getVar(prov, False)
49 pkg = prov.replace("PREFERRED_PROVIDER_", "")
50 if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
51 continue
52 if 'cross-canadian' in pkg:
53 for p in prefixes:
54 localdata = bb.data.createCopy(d)
55 override = ":virtclass-multilib-" + p
56 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
57 bb.data.update_data(localdata)
58 newname = localdata.expand(prov)
59 if newname != prov:
60 newval = localdata.expand(val)
61 d.setVar(newname, newval)
62 # Avoid future variable key expansion
63 provexp = d.expand(prov)
64 if prov != provexp and d.getVar(prov, False):
65 d.renameVar(prov, provexp)
66 continue
67 virt = ""
68 if pkg.startswith("virtual/"):
69 pkg = pkg.replace("virtual/", "")
70 virt = "virtual/"
71 for p in prefixes:
72 if pkg != "kernel":
73 newval = p + "-" + val
74
75 # implement variable keys
76 localdata = bb.data.createCopy(d)
77 override = ":virtclass-multilib-" + p
78 localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
79 bb.data.update_data(localdata)
80 newname = localdata.expand(prov)
81 if newname != prov and not d.getVar(newname, False):
82 d.setVar(newname, localdata.expand(newval))
83
84 # implement alternative multilib name
85 newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg)
86 if not d.getVar(newname, False):
87 d.setVar(newname, newval)
88 # Avoid future variable key expansion
89 provexp = d.expand(prov)
90 if prov != provexp and d.getVar(prov, False):
91 d.renameVar(prov, provexp)
92
93
94 mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
95 extramp = []
96 for p in mp:
97 if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
98 continue
99 virt = ""
100 if p.startswith("virtual/"):
101 p = p.replace("virtual/", "")
102 virt = "virtual/"
103 for pref in prefixes:
104 extramp.append(virt + pref + "-" + p)
105 d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
106
107python multilib_virtclass_handler_vendor () {
108 if isinstance(e, bb.event.ConfigParsed):
109 for v in e.data.getVar("MULTILIB_VARIANTS", True).split():
110 if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None:
111 e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
112 preferred_ml_updates(e.data)
113}
114addhandler multilib_virtclass_handler_vendor
115multilib_virtclass_handler_vendor[eventmask] = "bb.event.ConfigParsed"
116
117python multilib_virtclass_handler_global () {
118 if not e.data:
119 return
120
121 variant = e.data.getVar("BBEXTENDVARIANT", True)
122
123 if isinstance(e, bb.event.RecipeParsed) and not variant:
124 if bb.data.inherits_class('kernel', e.data) or \
125 bb.data.inherits_class('module-base', e.data) or \
126 (bb.data.inherits_class('allarch', e.data) and\
127 not bb.data.inherits_class('packagegroup', e.data)):
128 variants = (e.data.getVar("MULTILIB_VARIANTS", True) or "").split()
129
130 import oe.classextend
131 clsextends = []
132 for variant in variants:
133 clsextends.append(oe.classextend.ClassExtender(variant, e.data))
134
135 # Process PROVIDES
136 origprovs = provs = e.data.getVar("PROVIDES", True) or ""
137 for clsextend in clsextends:
138 provs = provs + " " + clsextend.map_variable("PROVIDES", setvar=False)
139 e.data.setVar("PROVIDES", provs)
140
141 # Process RPROVIDES
142 origrprovs = rprovs = e.data.getVar("RPROVIDES", True) or ""
143 for clsextend in clsextends:
144 rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES", setvar=False)
145 e.data.setVar("RPROVIDES", rprovs)
146
147 # Process RPROVIDES_${PN}...
148 for pkg in (e.data.getVar("PACKAGES", True) or "").split():
149 origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg, True) or ""
150 for clsextend in clsextends:
151 rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
152 rprovs = rprovs + " " + clsextend.extname + "-" + pkg
153 e.data.setVar("RPROVIDES_%s" % pkg, rprovs)
154}
155
156addhandler multilib_virtclass_handler_global
157multilib_virtclass_handler_global[eventmask] = "bb.event.RecipePreFinalise bb.event.RecipeParsed"
158
diff --git a/meta/classes/multilib_header.bbclass b/meta/classes/multilib_header.bbclass
new file mode 100644
index 0000000000..5ee0a2d562
--- /dev/null
+++ b/meta/classes/multilib_header.bbclass
@@ -0,0 +1,54 @@
1inherit siteinfo
2
3# If applicable on the architecture, this routine will rename the header and
4# add a unique identifier to the name for the ABI/bitsize that is being used.
5# A wrapper will be generated for the architecture that knows how to call
6# all of the ABI variants for that given architecture.
7#
8oe_multilib_header() {
9
10 case ${HOST_OS} in
11 *-musl*)
12 return
13 ;;
14 *)
15 esac
16 # We use
17 # For ARM: We don't support multilib builds.
18 # For MIPS: "n32" is a special case, which needs to be
19 # distinct from both 64-bit and 32-bit.
20 case ${TARGET_ARCH} in
21 arm*) return
22 ;;
23 mips*) case "${MIPSPKGSFX_ABI}" in
24 "-n32")
25 ident=n32
26 ;;
27 *)
28 ident=${SITEINFO_BITS}
29 ;;
30 esac
31 ;;
32 *) ident=${SITEINFO_BITS}
33 esac
34 if echo ${TARGET_ARCH} | grep -q arm; then
35 return
36 fi
37 for each_header in "$@" ; do
38 if [ ! -f "${D}/${includedir}/$each_header" ]; then
39 bberror "oe_multilib_header: Unable to find header $each_header."
40 continue
41 fi
42 stem=$(echo $each_header | sed 's#\.h$##')
43 # if mips64/n32 set ident to n32
44 mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h
45
46 sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header
47 done
48}
49
50# Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic.
51# We don't need multilib headers for native builds so brute force things.
52oe_multilib_header_class-native () {
53 return
54}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
new file mode 100644
index 0000000000..dcd364b92c
--- /dev/null
+++ b/meta/classes/native.bbclass
@@ -0,0 +1,175 @@
1# We want native packages to be relocatable
2inherit relocatable
3
4# Native packages are built indirectly via dependency,
5# no need for them to be a direct target of 'world'
6EXCLUDE_FROM_WORLD = "1"
7
8PACKAGES = ""
9PACKAGES_class-native = ""
10PACKAGES_DYNAMIC = ""
11PACKAGES_DYNAMIC_class-native = ""
12PACKAGE_ARCH = "${BUILD_ARCH}"
13
14# used by cmake class
15OECMAKE_RPATH = "${libdir}"
16OECMAKE_RPATH_class-native = "${libdir}"
17
18# When this class has packaging enabled, setting
19# RPROVIDES becomes unnecessary.
20RPROVIDES = "${PN}"
21
22TARGET_ARCH = "${BUILD_ARCH}"
23TARGET_OS = "${BUILD_OS}"
24TARGET_VENDOR = "${BUILD_VENDOR}"
25TARGET_PREFIX = "${BUILD_PREFIX}"
26TARGET_CC_ARCH = "${BUILD_CC_ARCH}"
27TARGET_LD_ARCH = "${BUILD_LD_ARCH}"
28TARGET_AS_ARCH = "${BUILD_AS_ARCH}"
29TARGET_CPPFLAGS = "${BUILD_CPPFLAGS}"
30TARGET_CFLAGS = "${BUILD_CFLAGS}"
31TARGET_CXXFLAGS = "${BUILD_CXXFLAGS}"
32TARGET_LDFLAGS = "${BUILD_LDFLAGS}"
33TARGET_FPU = ""
34
35HOST_ARCH = "${BUILD_ARCH}"
36HOST_OS = "${BUILD_OS}"
37HOST_VENDOR = "${BUILD_VENDOR}"
38HOST_PREFIX = "${BUILD_PREFIX}"
39HOST_CC_ARCH = "${BUILD_CC_ARCH}"
40HOST_LD_ARCH = "${BUILD_LD_ARCH}"
41HOST_AS_ARCH = "${BUILD_AS_ARCH}"
42
43CPPFLAGS = "${BUILD_CPPFLAGS}"
44CFLAGS = "${BUILD_CFLAGS}"
45CXXFLAGS = "${BUILD_CFLAGS}"
46LDFLAGS = "${BUILD_LDFLAGS}"
47LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE} "
48
49STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
50STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
51
52# native pkg doesn't need the TOOLCHAIN_OPTIONS.
53TOOLCHAIN_OPTIONS = ""
54
55DEPENDS_GETTEXT = "gettext-native"
56
57# Don't build ptest natively
58PTEST_ENABLED = "0"
59
60# Don't use site files for native builds
61export CONFIG_SITE = "${COREBASE}/meta/site/native"
62
63# set the compiler as well. It could have been set to something else
64export CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_ARCH}"
65export CXX = "${CCACHE}${HOST_PREFIX}g++ ${HOST_CC_ARCH}"
66export FC = "${CCACHE}${HOST_PREFIX}gfortran ${HOST_CC_ARCH}"
67export CPP = "${HOST_PREFIX}gcc ${HOST_CC_ARCH} -E"
68export LD = "${HOST_PREFIX}ld ${HOST_LD_ARCH} "
69export CCLD = "${CC}"
70export AR = "${HOST_PREFIX}ar"
71export AS = "${HOST_PREFIX}as ${HOST_AS_ARCH}"
72export RANLIB = "${HOST_PREFIX}ranlib"
73export STRIP = "${HOST_PREFIX}strip"
74
75# Path prefixes
76base_prefix = "${STAGING_DIR_NATIVE}"
77prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
78exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
79
80bindir = "${STAGING_BINDIR_NATIVE}"
81sbindir = "${STAGING_SBINDIR_NATIVE}"
82libdir = "${STAGING_LIBDIR_NATIVE}"
83includedir = "${STAGING_INCDIR_NATIVE}"
84sysconfdir = "${STAGING_ETCDIR_NATIVE}"
85datadir = "${STAGING_DATADIR_NATIVE}"
86
87baselib = "lib"
88
89# Libtool's default paths are correct for the native machine
90lt_cv_sys_lib_dlsearch_path_spec[unexport] = "1"
91
92NATIVE_PACKAGE_PATH_SUFFIX ?= ""
93bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
94libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
95libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
96
97do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
98do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_NATIVE}/"
99
100# Since we actually install these into situ there is no staging prefix
101STAGING_DIR_HOST = ""
102STAGING_DIR_TARGET = ""
103PKG_CONFIG_DIR = "${libdir}/pkgconfig"
104
105EXTRA_NATIVE_PKGCONFIG_PATH ?= ""
106PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}"
107PKG_CONFIG_SYSROOT_DIR = ""
108
109# we dont want libc-uclibc or libc-glibc to kick in for native recipes
110LIBCOVERRIDE = ""
111CLASSOVERRIDE = "class-native"
112MACHINEOVERRIDES = ""
113
114PATH_prepend = "${COREBASE}/scripts/native-intercept:"
115
116python native_virtclass_handler () {
117 classextend = e.data.getVar('BBCLASSEXTEND', True) or ""
118 if "native" not in classextend:
119 return
120
121 pn = e.data.getVar("PN", True)
122 if not pn.endswith("-native"):
123 return
124
125 def map_dependencies(varname, d, suffix = ""):
126 if suffix:
127 varname = varname + "_" + suffix
128 deps = d.getVar(varname, True)
129 if not deps:
130 return
131 deps = bb.utils.explode_deps(deps)
132 newdeps = []
133 for dep in deps:
134 if "-cross-" in dep:
135 newdeps.append(dep.replace("-cross", "-native"))
136 elif not dep.endswith("-native"):
137 newdeps.append(dep + "-native")
138 else:
139 newdeps.append(dep)
140 d.setVar(varname, " ".join(newdeps))
141
142 map_dependencies("DEPENDS", e.data)
143 for pkg in [e.data.getVar("PN", True), "", "${PN}"]:
144 map_dependencies("RDEPENDS", e.data, pkg)
145 map_dependencies("RRECOMMENDS", e.data, pkg)
146 map_dependencies("RSUGGESTS", e.data, pkg)
147 map_dependencies("RPROVIDES", e.data, pkg)
148 map_dependencies("RREPLACES", e.data, pkg)
149
150 provides = e.data.getVar("PROVIDES", True)
151 for prov in provides.split():
152 if prov.find(pn) != -1:
153 continue
154 if not prov.endswith("-native"):
155 provides = provides.replace(prov, prov + "-native")
156 e.data.setVar("PROVIDES", provides)
157
158 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-native")
159}
160
161addhandler native_virtclass_handler
162native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
163
164deltask package
165deltask packagedata
166deltask package_qa
167deltask package_write_ipk
168deltask package_write_deb
169deltask package_write_rpm
170deltask package_write
171
172do_packagedata[stamp-extra-info] = ""
173do_populate_sysroot[stamp-extra-info] = ""
174
175USE_NLS = "no"
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
new file mode 100644
index 0000000000..5e78116ab8
--- /dev/null
+++ b/meta/classes/nativesdk.bbclass
@@ -0,0 +1,95 @@
1# SDK packages are built either explicitly by the user,
2# or indirectly via dependency. No need to be in 'world'.
3EXCLUDE_FROM_WORLD = "1"
4
5STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}"
6
7# libc for the SDK can be different to that of the target
8NATIVESDKLIBC ?= "libc-glibc"
9LIBCOVERRIDE = ":${NATIVESDKLIBC}"
10CLASSOVERRIDE = "class-nativesdk"
11MACHINEOVERRIDES = ""
12
13#
14# Update PACKAGE_ARCH and PACKAGE_ARCHS
15#
16PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
17PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
18
19#
20# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
21# binaries
22#
23DEPENDS_append = " chrpath-replacement-native"
24EXTRANATIVEPATH += "chrpath-native"
25
26STAGING_DIR_HOST = "${STAGING_DIR}/${MULTIMACH_HOST_SYS}"
27STAGING_DIR_TARGET = "${STAGING_DIR}/${MULTIMACH_TARGET_SYS}"
28
29HOST_ARCH = "${SDK_ARCH}"
30HOST_VENDOR = "${SDK_VENDOR}"
31HOST_OS = "${SDK_OS}"
32HOST_PREFIX = "${SDK_PREFIX}"
33HOST_CC_ARCH = "${SDK_CC_ARCH}"
34HOST_LD_ARCH = "${SDK_LD_ARCH}"
35HOST_AS_ARCH = "${SDK_AS_ARCH}"
36#HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}"
37
38TARGET_ARCH = "${SDK_ARCH}"
39TARGET_VENDOR = "${SDK_VENDOR}"
40TARGET_OS = "${SDK_OS}"
41TARGET_PREFIX = "${SDK_PREFIX}"
42TARGET_CC_ARCH = "${SDK_CC_ARCH}"
43TARGET_LD_ARCH = "${SDK_LD_ARCH}"
44TARGET_AS_ARCH = "${SDK_AS_ARCH}"
45TARGET_FPU = ""
46EXTRA_OECONF_GCC_FLOAT = ""
47
48CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
49CFLAGS = "${BUILDSDK_CFLAGS}"
50CXXFLAGS = "${BUILDSDK_CFLAGS}"
51LDFLAGS = "${BUILDSDK_LDFLAGS}"
52
53# Change to place files in SDKPATH
54base_prefix = "${SDKPATHNATIVE}"
55prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
56exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
57baselib = "lib"
58
59export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig"
60export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
61
62python nativesdk_virtclass_handler () {
63 pn = e.data.getVar("PN", True)
64 if not (pn.endswith("-nativesdk") or pn.startswith("nativesdk-")):
65 return
66
67 e.data.setVar("MLPREFIX", "nativesdk-")
68 e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN", True).replace("-nativesdk", "").replace("nativesdk-", ""))
69 e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-nativesdk")
70}
71
72python () {
73 pn = d.getVar("PN", True)
74 if not pn.startswith("nativesdk-"):
75 return
76
77 import oe.classextend
78
79 clsextend = oe.classextend.NativesdkClassExtender("nativesdk", d)
80 clsextend.rename_packages()
81 clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
82
83 clsextend.map_depends_variable("DEPENDS")
84 clsextend.map_packagevars()
85 clsextend.map_variable("PROVIDES")
86 clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
87}
88
89addhandler nativesdk_virtclass_handler
90nativesdk_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
91
92do_populate_sysroot[stamp-extra-info] = ""
93do_packagedata[stamp-extra-info] = ""
94
95USE_NLS = "${SDKUSE_NLS}"
diff --git a/meta/classes/oelint.bbclass b/meta/classes/oelint.bbclass
new file mode 100644
index 0000000000..d00f468d9a
--- /dev/null
+++ b/meta/classes/oelint.bbclass
@@ -0,0 +1,85 @@
1addtask lint before do_fetch
2do_lint[nostamp] = "1"
3python do_lint() {
4 pkgname = d.getVar("PN", True)
5
6 ##############################
7 # Test that DESCRIPTION exists
8 #
9 description = d.getVar("DESCRIPTION")
10 if description[1:10] == '{SUMMARY}':
11 bb.warn("%s: DESCRIPTION is not set" % pkgname)
12
13
14 ##############################
15 # Test that HOMEPAGE exists
16 #
17 homepage = d.getVar("HOMEPAGE")
18 if homepage == '':
19 bb.warn("%s: HOMEPAGE is not set" % pkgname)
20 elif not homepage.startswith("http://") and not homepage.startswith("https://"):
21 bb.warn("%s: HOMEPAGE doesn't start with http:// or https://" % pkgname)
22
23
24 ##############################
25 # Test for valid SECTION
26 #
27 section = d.getVar("SECTION")
28 if section == '':
29 bb.warn("%s: SECTION is not set" % pkgname)
30 elif not section.islower():
31 bb.warn("%s: SECTION should only use lower case" % pkgname)
32
33
34 ##############################
35 # Check that all patches have Signed-off-by and Upstream-Status
36 #
37 srcuri = d.getVar("SRC_URI").split()
38 fpaths = (d.getVar('FILESPATH', True) or '').split(':')
39
40 def findPatch(patchname):
41 for dir in fpaths:
42 patchpath = dir + patchname
43 if os.path.exists(patchpath):
44 return patchpath
45
46 def findKey(path, key):
47 ret = True
48 f = file('%s' % path, mode = 'r')
49 line = f.readline()
50 while line:
51 if line.find(key) != -1:
52 ret = False
53 line = f.readline()
54 f.close()
55 return ret
56
57 length = len("file://")
58 for item in srcuri:
59 if item.startswith("file://"):
60 item = item[length:]
61 if item.endswith(".patch") or item.endswith(".diff"):
62 path = findPatch(item)
63 if findKey(path, "Signed-off-by"):
64 bb.warn("%s: %s doesn't have Signed-off-by" % (pkgname, item))
65 if findKey(path, "Upstream-Status"):
66 bb.warn("%s: %s doesn't have Upstream-Status" % (pkgname, item))
67
68
69 ##############################
70 # Check for ${PN} or ${P} usage in SRC_URI or S
71 # Should use ${BPN} or ${BP} instead to avoid breaking multilib
72 #
73 for s in srcuri:
74 if not s.startswith("file://"):
75 if not s.find("{PN}") == -1:
76 bb.warn("%s: should use BPN instead of PN in SRC_URI" % pkgname)
77 if not s.find("{P}") == -1:
78 bb.warn("%s: should use BP instead of P in SRC_URI" % pkgname)
79
80 srcpath = d.getVar("S")
81 if not srcpath.find("{PN}") == -1:
82 bb.warn("%s: should use BPN instead of PN in S" % pkgname)
83 if not srcpath.find("{P}") == -1:
84 bb.warn("%s: should use BP instead of P in S" % pkgname)
85}
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
new file mode 100644
index 0000000000..77bf0c1c14
--- /dev/null
+++ b/meta/classes/own-mirrors.bbclass
@@ -0,0 +1,13 @@
1PREMIRRORS() {
2cvs://.*/.* ${SOURCE_MIRROR_URL}
3svn://.*/.* ${SOURCE_MIRROR_URL}
4git://.*/.* ${SOURCE_MIRROR_URL}
5gitsm://.*/.* ${SOURCE_MIRROR_URL}
6hg://.*/.* ${SOURCE_MIRROR_URL}
7bzr://.*/.* ${SOURCE_MIRROR_URL}
8svk://.*/.* ${SOURCE_MIRROR_URL}
9p4://.*/.* ${SOURCE_MIRROR_URL}
10osc://.*/.* ${SOURCE_MIRROR_URL}
11https?$://.*/.* ${SOURCE_MIRROR_URL}
12ftp://.*/.* ${SOURCE_MIRROR_URL}
13}
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
new file mode 100644
index 0000000000..b81f4f9281
--- /dev/null
+++ b/meta/classes/package.bbclass
@@ -0,0 +1,2060 @@
1#
2# Packaging process
3#
4# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS
5# Taking D and splitting it up into the packages listed in PACKAGES, placing the
6# resulting output in PKGDEST.
7#
8# There are the following default steps but PACKAGEFUNCS can be extended:
9#
10# a) package_get_auto_pr - get PRAUTO from remote PR service
11#
12# b) perform_packagecopy - Copy D into PKGD
13#
14# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES
15#
16# d) split_and_strip_files - split the files into runtime and debug and strip them.
17# Debug files include debug info split, and associated sources that end up in -dbg packages
18#
19# e) fixup_perms - Fix up permissions in the package before we split it.
20#
21# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/<pkgname>
22# Also triggers the binary stripping code to put files in -dbg packages.
23#
24# g) package_do_filedeps - Collect perfile run-time dependency metadata
25# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with
26# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
27#
28# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
29# depenedencies found. Also stores the package name so anyone else using this library
30# knows which package to depend on.
31#
32# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
33#
34# j) read_shlibdeps - Reads the stored shlibs information into the metadata
35#
36# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages
37#
38# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later
39# packaging steps
40
41inherit packagedata
42inherit prserv
43inherit chrpath
44
45# Need the package_qa_handle_error() in insane.bbclass
46inherit insane
47
48PKGD = "${WORKDIR}/package"
49PKGDEST = "${WORKDIR}/packages-split"
50
51LOCALE_SECTION ?= ''
52
53ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
54
55# rpm is used for the per-file dependency identification
56PACKAGE_DEPENDS += "rpm-native"
57
58def legitimize_package_name(s):
59 """
60 Make sure package names are legitimate strings
61 """
62 import re
63
64 def fixutf(m):
65 cp = m.group(1)
66 if cp:
67 return ('\u%s' % cp).decode('unicode_escape').encode('utf-8')
68
69 # Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
70 s = re.sub('<U([0-9A-Fa-f]{1,4})>', fixutf, s)
71
72 # Remaining package name validity fixes
73 return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
74
75def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False, summary=None):
76 """
77 Used in .bb files to split up dynamically generated subpackages of a
78 given package, usually plugins or modules.
79
80 Arguments:
81 root -- the path in which to search
82 file_regex -- regular expression to match searched files. Use
83 parentheses () to mark the part of this expression
84 that should be used to derive the module name (to be
85 substituted where %s is used in other function
86 arguments as noted below)
87 output_pattern -- pattern to use for the package names. Must include %s.
88 description -- description to set for each package. Must include %s.
89 postinst -- postinstall script to use for all packages (as a
90 string)
91 recursive -- True to perform a recursive search - default False
92 hook -- a hook function to be called for every match. The
93 function will be called with the following arguments
94 (in the order listed):
95 f: full path to the file/directory match
96 pkg: the package name
97 file_regex: as above
98 output_pattern: as above
99 modulename: the module name derived using file_regex
100 extra_depends -- extra runtime dependencies (RDEPENDS) to be set for
101 all packages. The default value of None causes a
102 dependency on the main package (${PN}) - if you do
103 not want this, pass '' for this parameter.
104 aux_files_pattern -- extra item(s) to be added to FILES for each
105 package. Can be a single string item or a list of
106 strings for multiple items. Must include %s.
107 postrm -- postrm script to use for all packages (as a string)
108 allow_dirs -- True allow directories to be matched - default False
109 prepend -- if True, prepend created packages to PACKAGES instead
110 of the default False which appends them
111 match_path -- match file_regex on the whole relative path to the
112 root rather than just the file name
113 aux_files_pattern_verbatim -- extra item(s) to be added to FILES for
114 each package, using the actual derived module name
115 rather than converting it to something legal for a
116 package name. Can be a single string item or a list
117 of strings for multiple items. Must include %s.
118 allow_links -- True to allow symlinks to be matched - default False
119 summary -- Summary to set for each package. Must include %s;
120 defaults to description if not set.
121
122 """
123
124 dvar = d.getVar('PKGD', True)
125
126 # If the root directory doesn't exist, don't error out later but silently do
127 # no splitting.
128 if not os.path.exists(dvar + root):
129 return []
130
131 ml = d.getVar("MLPREFIX", True)
132 if ml:
133 if not output_pattern.startswith(ml):
134 output_pattern = ml + output_pattern
135
136 newdeps = []
137 for dep in (extra_depends or "").split():
138 if dep.startswith(ml):
139 newdeps.append(dep)
140 else:
141 newdeps.append(ml + dep)
142 if newdeps:
143 extra_depends = " ".join(newdeps)
144
145
146 packages = d.getVar('PACKAGES', True).split()
147 split_packages = []
148
149 if postinst:
150 postinst = '#!/bin/sh\n' + postinst + '\n'
151 if postrm:
152 postrm = '#!/bin/sh\n' + postrm + '\n'
153 if not recursive:
154 objs = os.listdir(dvar + root)
155 else:
156 objs = []
157 for walkroot, dirs, files in os.walk(dvar + root):
158 for file in files:
159 relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
160 if relpath:
161 objs.append(relpath)
162
163 if extra_depends == None:
164 extra_depends = d.getVar("PN", True)
165
166 if not summary:
167 summary = description
168
169 for o in sorted(objs):
170 import re, stat
171 if match_path:
172 m = re.match(file_regex, o)
173 else:
174 m = re.match(file_regex, os.path.basename(o))
175
176 if not m:
177 continue
178 f = os.path.join(dvar + root, o)
179 mode = os.lstat(f).st_mode
180 if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
181 continue
182 on = legitimize_package_name(m.group(1))
183 pkg = output_pattern % on
184 split_packages.append(pkg)
185 if not pkg in packages:
186 if prepend:
187 packages = [pkg] + packages
188 else:
189 packages.append(pkg)
190 oldfiles = d.getVar('FILES_' + pkg, True)
191 newfile = os.path.join(root, o)
192 # These names will be passed through glob() so if the filename actually
193 # contains * or ? (rare, but possible) we need to handle that specially
194 newfile = newfile.replace('*', '[*]')
195 newfile = newfile.replace('?', '[?]')
196 if not oldfiles:
197 the_files = [newfile]
198 if aux_files_pattern:
199 if type(aux_files_pattern) is list:
200 for fp in aux_files_pattern:
201 the_files.append(fp % on)
202 else:
203 the_files.append(aux_files_pattern % on)
204 if aux_files_pattern_verbatim:
205 if type(aux_files_pattern_verbatim) is list:
206 for fp in aux_files_pattern_verbatim:
207 the_files.append(fp % m.group(1))
208 else:
209 the_files.append(aux_files_pattern_verbatim % m.group(1))
210 d.setVar('FILES_' + pkg, " ".join(the_files))
211 else:
212 d.setVar('FILES_' + pkg, oldfiles + " " + newfile)
213 if extra_depends != '':
214 d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
215 if not d.getVar('DESCRIPTION_' + pkg, True):
216 d.setVar('DESCRIPTION_' + pkg, description % on)
217 if not d.getVar('SUMMARY_' + pkg, True):
218 d.setVar('SUMMARY_' + pkg, summary % on)
219 if postinst:
220 d.setVar('pkg_postinst_' + pkg, postinst)
221 if postrm:
222 d.setVar('pkg_postrm_' + pkg, postrm)
223 if callable(hook):
224 hook(f, pkg, file_regex, output_pattern, m.group(1))
225
226 d.setVar('PACKAGES', ' '.join(packages))
227 return split_packages
228
229PACKAGE_DEPENDS += "file-native"
230
231python () {
232 if d.getVar('PACKAGES', True) != '':
233 deps = ""
234 for dep in (d.getVar('PACKAGE_DEPENDS', True) or "").split():
235 deps += " %s:do_populate_sysroot" % dep
236 d.appendVarFlag('do_package', 'depends', deps)
237
238 # shlibs requires any DEPENDS to have already packaged for the *.list files
239 d.appendVarFlag('do_package', 'deptask', " do_packagedata")
240}
241
242def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
243 # Function to split a single file into two components, one is the stripped
244 # target system binary, the other contains any debugging information. The
245 # two files are linked to reference each other.
246 #
247 # sourcefile is also generated containing a list of debugsources
248
249 import stat
250
251 dvar = d.getVar('PKGD', True)
252 objcopy = d.getVar("OBJCOPY", True)
253 debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
254 workdir = d.getVar("WORKDIR", True)
255 workparentdir = d.getVar("DEBUGSRC_OVERRIDE_PATH", True) or os.path.dirname(os.path.dirname(workdir))
256
257 # We ignore kernel modules, we don't generate debug info files.
258 if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
259 return 1
260
261 newmode = None
262 if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
263 origmode = os.stat(file)[stat.ST_MODE]
264 newmode = origmode | stat.S_IWRITE | stat.S_IREAD
265 os.chmod(file, newmode)
266
267 # We need to extract the debug src information here...
268 if debugsrcdir:
269 cmd = "'%s' -b '%s' -d '%s' -i -l '%s' '%s'" % (debugedit, workparentdir, debugsrcdir, sourcefile, file)
270 (retval, output) = oe.utils.getstatusoutput(cmd)
271 if retval:
272 bb.fatal("debugedit failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
273
274 bb.utils.mkdirhier(os.path.dirname(debugfile))
275
276 cmd = "'%s' --only-keep-debug '%s' '%s'" % (objcopy, file, debugfile)
277 (retval, output) = oe.utils.getstatusoutput(cmd)
278 if retval:
279 bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
280
281 # Set the debuglink to have the view of the file path on the target
282 cmd = "'%s' --add-gnu-debuglink='%s' '%s'" % (objcopy, debugfile, file)
283 (retval, output) = oe.utils.getstatusoutput(cmd)
284 if retval:
285 bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
286
287 if newmode:
288 os.chmod(file, origmode)
289
290 return 0
291
292def copydebugsources(debugsrcdir, d):
293 # The debug src information written out to sourcefile is further procecessed
294 # and copied to the destination here.
295
296 import stat
297
298 sourcefile = d.expand("${WORKDIR}/debugsources.list")
299 if debugsrcdir and os.path.isfile(sourcefile):
300 dvar = d.getVar('PKGD', True)
301 strip = d.getVar("STRIP", True)
302 objcopy = d.getVar("OBJCOPY", True)
303 debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
304 workdir = d.getVar("WORKDIR", True)
305 workparentdir = os.path.dirname(os.path.dirname(workdir))
306 workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
307
308 nosuchdir = []
309 basepath = dvar
310 for p in debugsrcdir.split("/"):
311 basepath = basepath + "/" + p
312 if not cpath.exists(basepath):
313 nosuchdir.append(basepath)
314 bb.utils.mkdirhier(basepath)
315 cpath.updatecache(basepath)
316
317 processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '(<internal>|<built-in>)$' | "
318 # We need to ignore files that are not actually ours
319 # we do this by only paying attention to items from this package
320 processdebugsrc += "fgrep -zw '%s' | "
321 processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
322
323 cmd = processdebugsrc % (sourcefile, workbasedir, workparentdir, dvar, debugsrcdir)
324 (retval, output) = oe.utils.getstatusoutput(cmd)
325 # Can "fail" if internal headers/transient sources are attempted
326 #if retval:
327 # bb.fatal("debug source copy failed with exit code %s (cmd was %s)" % (retval, cmd))
328
329 # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
330 # Work around this by manually finding and copying any symbolic links that made it through.
331 cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s' 2>/dev/null)" % (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
332 (retval, output) = oe.utils.getstatusoutput(cmd)
333 if retval:
334 bb.fatal("debugsrc symlink fixup failed with exit code %s (cmd was %s)" % (retval, cmd))
335
336 # The copy by cpio may have resulted in some empty directories! Remove these
337 cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
338 (retval, output) = oe.utils.getstatusoutput(cmd)
339 if retval:
340 bb.fatal("empty directory removal failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
341
342 # Also remove debugsrcdir if its empty
343 for p in nosuchdir[::-1]:
344 if os.path.exists(p) and not os.listdir(p):
345 os.rmdir(p)
346
347#
348# Package data handling routines
349#
350
351def get_package_mapping (pkg, basepkg, d):
352 import oe.packagedata
353
354 data = oe.packagedata.read_subpkgdata(pkg, d)
355 key = "PKG_%s" % pkg
356
357 if key in data:
358 # Have to avoid undoing the write_extra_pkgs(global_variants...)
359 if bb.data.inherits_class('allarch', d) and data[key] == basepkg:
360 return pkg
361 return data[key]
362
363 return pkg
364
365def get_package_additional_metadata (pkg_type, d):
366 base_key = "PACKAGE_ADD_METADATA"
367 for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key):
368 if d.getVar(key) is None:
369 continue
370 d.setVarFlag(key, "type", "list")
371 if d.getVarFlag(key, "separator") is None:
372 d.setVarFlag(key, "separator", "\\n")
373 metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
374 return "\n".join(metadata_fields).strip()
375
376def runtime_mapping_rename (varname, pkg, d):
377 #bb.note("%s before: %s" % (varname, d.getVar(varname, True)))
378
379 if bb.data.inherits_class('packagegroup', d):
380 return
381
382 new_depends = {}
383 deps = bb.utils.explode_dep_versions2(d.getVar(varname, True) or "")
384 for depend in deps:
385 new_depend = get_package_mapping(depend, pkg, d)
386 new_depends[new_depend] = deps[depend]
387
388 d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
389
390 #bb.note("%s after: %s" % (varname, d.getVar(varname, True)))
391
392#
393# Package functions suitable for inclusion in PACKAGEFUNCS
394#
395
396python package_get_auto_pr() {
397 import oe.prservice
398 import re
399
400 # Support per recipe PRSERV_HOST
401 pn = d.getVar('PN', True)
402 host = d.getVar("PRSERV_HOST_" + pn, True)
403 if not (host is None):
404 d.setVar("PRSERV_HOST", host)
405
406 pkgv = d.getVar("PKGV", True)
407
408 # PR Server not active, handle AUTOINC
409 if not d.getVar('PRSERV_HOST', True):
410 if 'AUTOINC' in pkgv:
411 d.setVar("PKGV", pkgv.replace("AUTOINC", "0"))
412 return
413
414 auto_pr = None
415 pv = d.getVar("PV", True)
416 version = d.getVar("PRAUTOINX", True)
417 pkgarch = d.getVar("PACKAGE_ARCH", True)
418 checksum = d.getVar("BB_TASKHASH", True)
419
420 if d.getVar('PRSERV_LOCKDOWN', True):
421 auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch, True) or d.getVar('PRAUTO_' + version, True) or None
422 if auto_pr is None:
423 bb.fatal("Can NOT get PRAUTO from lockdown exported file")
424 d.setVar('PRAUTO',str(auto_pr))
425 return
426
427 try:
428 conn = d.getVar("__PRSERV_CONN", True)
429 if conn is None:
430 conn = oe.prservice.prserv_make_conn(d)
431 if conn is not None:
432 if "AUTOINC" in pkgv:
433 srcpv = bb.fetch2.get_srcrev(d)
434 base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
435 value = conn.getPR(base_ver, pkgarch, srcpv)
436 d.setVar("PKGV", pkgv.replace("AUTOINC", str(value)))
437
438 auto_pr = conn.getPR(version, pkgarch, checksum)
439 except Exception as e:
440 bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
441 if auto_pr is None:
442 bb.fatal("Can NOT get PRAUTO from remote PR service")
443 d.setVar('PRAUTO',str(auto_pr))
444}
445
446LOCALEBASEPN ??= "${PN}"
447
448python package_do_split_locales() {
449 if (d.getVar('PACKAGE_NO_LOCALE', True) == '1'):
450 bb.debug(1, "package requested not splitting locales")
451 return
452
453 packages = (d.getVar('PACKAGES', True) or "").split()
454
455 datadir = d.getVar('datadir', True)
456 if not datadir:
457 bb.note("datadir not defined")
458 return
459
460 dvar = d.getVar('PKGD', True)
461 pn = d.getVar('LOCALEBASEPN', True)
462
463 if pn + '-locale' in packages:
464 packages.remove(pn + '-locale')
465
466 localedir = os.path.join(dvar + datadir, 'locale')
467
468 if not cpath.isdir(localedir):
469 bb.debug(1, "No locale files in this package")
470 return
471
472 locales = os.listdir(localedir)
473
474 summary = d.getVar('SUMMARY', True) or pn
475 description = d.getVar('DESCRIPTION', True) or ""
476 locale_section = d.getVar('LOCALE_SECTION', True)
477 mlprefix = d.getVar('MLPREFIX', True) or ""
478 for l in sorted(locales):
479 ln = legitimize_package_name(l)
480 pkg = pn + '-locale-' + ln
481 packages.append(pkg)
482 d.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l))
483 d.setVar('RRECOMMENDS_' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
484 d.setVar('RPROVIDES_' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
485 d.setVar('SUMMARY_' + pkg, '%s - %s translations' % (summary, l))
486 d.setVar('DESCRIPTION_' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
487 if locale_section:
488 d.setVar('SECTION_' + pkg, locale_section)
489
490 d.setVar('PACKAGES', ' '.join(packages))
491
492 # Disabled by RP 18/06/07
493 # Wildcards aren't supported in debian
494 # They break with ipkg since glibc-locale* will mean that
495 # glibc-localedata-translit* won't install as a dependency
496 # for some other package which breaks meta-toolchain
497 # Probably breaks since virtual-locale- isn't provided anywhere
498 #rdep = (d.getVar('RDEPENDS_%s' % pn, True) or "").split()
499 #rdep.append('%s-locale*' % pn)
500 #d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep))
501}
502
503python perform_packagecopy () {
504 dest = d.getVar('D', True)
505 dvar = d.getVar('PKGD', True)
506
507 # Start by package population by taking a copy of the installed
508 # files to operate on
509 # Preserve sparse files and hard links
510 cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (dest, dvar)
511 (retval, output) = oe.utils.getstatusoutput(cmd)
512 if retval:
513 bb.fatal("file copy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
514
515 # replace RPATHs for the nativesdk binaries, to make them relocatable
516 if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
517 rpath_replace (dvar, d)
518}
519perform_packagecopy[cleandirs] = "${PKGD}"
520perform_packagecopy[dirs] = "${PKGD}"
521
522# We generate a master list of directories to process, we start by
523# seeding this list with reasonable defaults, then load from
524# the fs-perms.txt files
525python fixup_perms () {
526 import pwd, grp
527
528 # init using a string with the same format as a line as documented in
529 # the fs-perms.txt file
530 # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid>
531 # <path> link <link target>
532 #
533 # __str__ can be used to print out an entry in the input format
534 #
535 # if fs_perms_entry.path is None:
536 # an error occured
537 # if fs_perms_entry.link, you can retrieve:
538 # fs_perms_entry.path = path
539 # fs_perms_entry.link = target of link
540 # if not fs_perms_entry.link, you can retrieve:
541 # fs_perms_entry.path = path
542 # fs_perms_entry.mode = expected dir mode or None
543 # fs_perms_entry.uid = expected uid or -1
544 # fs_perms_entry.gid = expected gid or -1
545 # fs_perms_entry.walk = 'true' or something else
546 # fs_perms_entry.fmode = expected file mode or None
547 # fs_perms_entry.fuid = expected file uid or -1
548 # fs_perms_entry_fgid = expected file gid or -1
549 class fs_perms_entry():
550 def __init__(self, line):
551 lsplit = line.split()
552 if len(lsplit) == 3 and lsplit[1].lower() == "link":
553 self._setlink(lsplit[0], lsplit[2])
554 elif len(lsplit) == 8:
555 self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
556 else:
557 msg = "Fixup Perms: invalid config line %s" % line
558 package_qa_handle_error("perm-config", msg, d)
559 self.path = None
560 self.link = None
561
562 def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid):
563 self.path = os.path.normpath(path)
564 self.link = None
565 self.mode = self._procmode(mode)
566 self.uid = self._procuid(uid)
567 self.gid = self._procgid(gid)
568 self.walk = walk.lower()
569 self.fmode = self._procmode(fmode)
570 self.fuid = self._procuid(fuid)
571 self.fgid = self._procgid(fgid)
572
573 def _setlink(self, path, link):
574 self.path = os.path.normpath(path)
575 self.link = link
576
577 def _procmode(self, mode):
578 if not mode or (mode and mode == "-"):
579 return None
580 else:
581 return int(mode,8)
582
583 # Note uid/gid -1 has special significance in os.lchown
584 def _procuid(self, uid):
585 if uid is None or uid == "-":
586 return -1
587 elif uid.isdigit():
588 return int(uid)
589 else:
590 return pwd.getpwnam(uid).pw_uid
591
592 def _procgid(self, gid):
593 if gid is None or gid == "-":
594 return -1
595 elif gid.isdigit():
596 return int(gid)
597 else:
598 return grp.getgrnam(gid).gr_gid
599
600 # Use for debugging the entries
601 def __str__(self):
602 if self.link:
603 return "%s link %s" % (self.path, self.link)
604 else:
605 mode = "-"
606 if self.mode:
607 mode = "0%o" % self.mode
608 fmode = "-"
609 if self.fmode:
610 fmode = "0%o" % self.fmode
611 uid = self._mapugid(self.uid)
612 gid = self._mapugid(self.gid)
613 fuid = self._mapugid(self.fuid)
614 fgid = self._mapugid(self.fgid)
615 return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid)
616
617 def _mapugid(self, id):
618 if id is None or id == -1:
619 return "-"
620 else:
621 return "%d" % id
622
623 # Fix the permission, owner and group of path
624 def fix_perms(path, mode, uid, gid, dir):
625 if mode and not os.path.islink(path):
626 #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
627 os.chmod(path, mode)
628 # -1 is a special value that means don't change the uid/gid
629 # if they are BOTH -1, don't bother to lchown
630 if not (uid == -1 and gid == -1):
631 #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
632 os.lchown(path, uid, gid)
633
634 # Return a list of configuration files based on either the default
635 # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES
636 # paths are resolved via BBPATH
637 def get_fs_perms_list(d):
638 str = ""
639 bbpath = d.getVar('BBPATH', True)
640 fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES', True)
641 if not fs_perms_tables:
642 fs_perms_tables = 'files/fs-perms.txt'
643 for conf_file in fs_perms_tables.split():
644 str += " %s" % bb.utils.which(bbpath, conf_file)
645 return str
646
647
648
649 dvar = d.getVar('PKGD', True)
650
651 fs_perms_table = {}
652
653 # By default all of the standard directories specified in
654 # bitbake.conf will get 0755 root:root.
655 target_path_vars = [ 'base_prefix',
656 'prefix',
657 'exec_prefix',
658 'base_bindir',
659 'base_sbindir',
660 'base_libdir',
661 'datadir',
662 'sysconfdir',
663 'servicedir',
664 'sharedstatedir',
665 'localstatedir',
666 'infodir',
667 'mandir',
668 'docdir',
669 'bindir',
670 'sbindir',
671 'libexecdir',
672 'libdir',
673 'includedir',
674 'oldincludedir' ]
675
676 for path in target_path_vars:
677 dir = d.getVar(path, True) or ""
678 if dir == "":
679 continue
680 fs_perms_table[dir] = fs_perms_entry(bb.data.expand("%s 0755 root root false - - -" % (dir), d))
681
682 # Now we actually load from the configuration files
683 for conf in get_fs_perms_list(d).split():
684 if os.path.exists(conf):
685 f = open(conf)
686 for line in f:
687 if line.startswith('#'):
688 continue
689 lsplit = line.split()
690 if len(lsplit) == 0:
691 continue
692 if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
693 msg = "Fixup perms: %s invalid line: %s" % (conf, line)
694 package_qa_handle_error("perm-line", msg, d)
695 continue
696 entry = fs_perms_entry(d.expand(line))
697 if entry and entry.path:
698 fs_perms_table[entry.path] = entry
699 f.close()
700
701 # Debug -- list out in-memory table
702 #for dir in fs_perms_table:
703 # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
704
705 # We process links first, so we can go back and fixup directory ownership
706 # for any newly created directories
707 for dir in fs_perms_table:
708 if not fs_perms_table[dir].link:
709 continue
710
711 origin = dvar + dir
712 if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)):
713 continue
714
715 link = fs_perms_table[dir].link
716 if link[0] == "/":
717 target = dvar + link
718 ptarget = link
719 else:
720 target = os.path.join(os.path.dirname(origin), link)
721 ptarget = os.path.join(os.path.dirname(dir), link)
722 if os.path.exists(target):
723 msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
724 package_qa_handle_error("perm-link", msg, d)
725 continue
726
727 # Create path to move directory to, move it, and then setup the symlink
728 bb.utils.mkdirhier(os.path.dirname(target))
729 #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
730 os.rename(origin, target)
731 #bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
732 os.symlink(link, origin)
733
734 for dir in fs_perms_table:
735 if fs_perms_table[dir].link:
736 continue
737
738 origin = dvar + dir
739 if not (cpath.exists(origin) and cpath.isdir(origin)):
740 continue
741
742 fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
743
744 if fs_perms_table[dir].walk == 'true':
745 for root, dirs, files in os.walk(origin):
746 for dr in dirs:
747 each_dir = os.path.join(root, dr)
748 fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
749 for f in files:
750 each_file = os.path.join(root, f)
751 fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
752}
753
754python split_and_strip_files () {
755 import stat, errno
756
757 dvar = d.getVar('PKGD', True)
758 pn = d.getVar('PN', True)
759
760 # We default to '.debug' style
761 if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory':
762 # Single debug-file-directory style debug info
763 debugappend = ".debug"
764 debugdir = ""
765 debuglibdir = "/usr/lib/debug"
766 debugsrcdir = "/usr/src/debug"
767 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-without-src':
768 # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
769 debugappend = ""
770 debugdir = "/.debug"
771 debuglibdir = ""
772 debugsrcdir = ""
773 else:
774 # Original OE-core, a.k.a. ".debug", style debug info
775 debugappend = ""
776 debugdir = "/.debug"
777 debuglibdir = ""
778 debugsrcdir = "/usr/src/debug"
779
780 sourcefile = d.expand("${WORKDIR}/debugsources.list")
781 bb.utils.remove(sourcefile)
782
783 os.chdir(dvar)
784
785 # Return type (bits):
786 # 0 - not elf
787 # 1 - ELF
788 # 2 - stripped
789 # 4 - executable
790 # 8 - shared library
791 # 16 - kernel module
792 def isELF(path):
793 type = 0
794 ret, result = oe.utils.getstatusoutput("file \"%s\"" % path.replace("\"", "\\\""))
795
796 if ret:
797 msg = "split_and_strip_files: 'file %s' failed" % path
798 package_qa_handle_error("split-strip", msg, d)
799 return type
800
801 # Not stripped
802 if "ELF" in result:
803 type |= 1
804 if "not stripped" not in result:
805 type |= 2
806 if "executable" in result:
807 type |= 4
808 if "shared" in result:
809 type |= 8
810 return type
811
812
813 #
814 # First lets figure out all of the files we may have to process ... do this only once!
815 #
816 elffiles = {}
817 symlinks = {}
818 kernmods = []
819 inodes = {}
820 libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir", True))
821 baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir", True))
822 if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
823 for root, dirs, files in cpath.walk(dvar):
824 for f in files:
825 file = os.path.join(root, f)
826 if file.endswith(".ko") and file.find("/lib/modules/") != -1:
827 kernmods.append(file)
828 continue
829
830 # Skip debug files
831 if debugappend and file.endswith(debugappend):
832 continue
833 if debugdir and debugdir in os.path.dirname(file[len(dvar):]):
834 continue
835
836 try:
837 ltarget = cpath.realpath(file, dvar, False)
838 s = cpath.lstat(ltarget)
839 except OSError as e:
840 (err, strerror) = e.args
841 if err != errno.ENOENT:
842 raise
843 # Skip broken symlinks
844 continue
845 if not s:
846 continue
847 # Check its an excutable
848 if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
849 or ((file.startswith(libdir) or file.startswith(baselibdir)) and ".so" in f):
850 # If it's a symlink, and points to an ELF file, we capture the readlink target
851 if cpath.islink(file):
852 target = os.readlink(file)
853 if isELF(ltarget):
854 #bb.note("Sym: %s (%d)" % (ltarget, isELF(ltarget)))
855 symlinks[file] = target
856 continue
857
858 # It's a file (or hardlink), not a link
859 # ...but is it ELF, and is it already stripped?
860 elf_file = isELF(file)
861 if elf_file & 1:
862 if elf_file & 2:
863 if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
864 bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
865 else:
866 msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
867 package_qa_handle_error("already-stripped", msg, d)
868 continue
869
870 # At this point we have an unstripped elf file. We need to:
871 # a) Make sure any file we strip is not hardlinked to anything else outside this tree
872 # b) Only strip any hardlinked file once (no races)
873 # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
874
875 # Use a reference of device ID and inode number to indentify files
876 file_reference = "%d_%d" % (s.st_dev, s.st_ino)
877 if file_reference in inodes:
878 os.unlink(file)
879 os.link(inodes[file_reference][0], file)
880 inodes[file_reference].append(file)
881 else:
882 inodes[file_reference] = [file]
883 # break hardlink
884 bb.utils.copyfile(file, file)
885 elffiles[file] = elf_file
886 # Modified the file so clear the cache
887 cpath.updatecache(file)
888
889 #
890 # First lets process debug splitting
891 #
892 if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1'):
893 for file in elffiles:
894 src = file[len(dvar):]
895 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
896 fpath = dvar + dest
897
898 # Split the file...
899 bb.utils.mkdirhier(os.path.dirname(fpath))
900 #bb.note("Split %s -> %s" % (file, fpath))
901 # Only store off the hard link reference if we successfully split!
902 splitdebuginfo(file, fpath, debugsrcdir, sourcefile, d)
903
904 # Hardlink our debug symbols to the other hardlink copies
905 for ref in inodes:
906 if len(inodes[ref]) == 1:
907 continue
908 for file in inodes[ref][1:]:
909 src = file[len(dvar):]
910 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
911 fpath = dvar + dest
912 target = inodes[ref][0][len(dvar):]
913 ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend
914 bb.utils.mkdirhier(os.path.dirname(fpath))
915 #bb.note("Link %s -> %s" % (fpath, ftarget))
916 os.link(ftarget, fpath)
917
918 # Create symlinks for all cases we were able to split symbols
919 for file in symlinks:
920 src = file[len(dvar):]
921 dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
922 fpath = dvar + dest
923 # Skip it if the target doesn't exist
924 try:
925 s = os.stat(fpath)
926 except OSError as e:
927 (err, strerror) = e.args
928 if err != errno.ENOENT:
929 raise
930 continue
931
932 ltarget = symlinks[file]
933 lpath = os.path.dirname(ltarget)
934 lbase = os.path.basename(ltarget)
935 ftarget = ""
936 if lpath and lpath != ".":
937 ftarget += lpath + debugdir + "/"
938 ftarget += lbase + debugappend
939 if lpath.startswith(".."):
940 ftarget = os.path.join("..", ftarget)
941 bb.utils.mkdirhier(os.path.dirname(fpath))
942 #bb.note("Symlink %s -> %s" % (fpath, ftarget))
943 os.symlink(ftarget, fpath)
944
945 # Process the debugsrcdir if requested...
946 # This copies and places the referenced sources for later debugging...
947 copydebugsources(debugsrcdir, d)
948 #
949 # End of debug splitting
950 #
951
952 #
953 # Now lets go back over things and strip them
954 #
955 if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
956 strip = d.getVar("STRIP", True)
957 sfiles = []
958 for file in elffiles:
959 elf_file = int(elffiles[file])
960 #bb.note("Strip %s" % file)
961 sfiles.append((file, elf_file, strip))
962 for f in kernmods:
963 sfiles.append((f, 16, strip))
964
965 oe.utils.multiprocess_exec(sfiles, oe.package.runstrip)
966
967 #
968 # End of strip
969 #
970}
971
972python populate_packages () {
973 import glob, re
974
975 workdir = d.getVar('WORKDIR', True)
976 outdir = d.getVar('DEPLOY_DIR', True)
977 dvar = d.getVar('PKGD', True)
978 packages = d.getVar('PACKAGES', True)
979 pn = d.getVar('PN', True)
980
981 bb.utils.mkdirhier(outdir)
982 os.chdir(dvar)
983
984 # Sanity check PACKAGES for duplicates and for LICENSE_EXCLUSION
985 # Sanity should be moved to sanity.bbclass once we have the infrastucture
986 package_list = []
987
988 for pkg in packages.split():
989 if d.getVar('LICENSE_EXCLUSION-' + pkg, True):
990 msg = "%s has an incompatible license. Excluding from packaging." % pkg
991 package_qa_handle_error("incompatible-license", msg, d)
992 if pkg in package_list:
993 msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
994 package_qa_handle_error("packages-list", msg, d)
995 else:
996 package_list.append(pkg)
997 d.setVar('PACKAGES', ' '.join(package_list))
998 pkgdest = d.getVar('PKGDEST', True)
999
1000 seen = []
1001
1002 # os.mkdir masks the permissions with umask so we have to unset it first
1003 oldumask = os.umask(0)
1004
1005 for pkg in package_list:
1006 root = os.path.join(pkgdest, pkg)
1007 bb.utils.mkdirhier(root)
1008
1009 filesvar = d.getVar('FILES_%s' % pkg, True) or ""
1010 if "//" in filesvar:
1011 msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
1012 package_qa_handle_error("files-invalid", msg, d)
1013 filesvar.replace("//", "/")
1014
1015 origfiles = filesvar.split()
1016 files = []
1017 for file in origfiles:
1018 if os.path.isabs(file):
1019 file = '.' + file
1020 if not file.startswith("./"):
1021 file = './' + file
1022 globbed = glob.glob(file)
1023 if globbed:
1024 if [ file ] != globbed:
1025 files += globbed
1026 continue
1027 files.append(file)
1028
1029 for file in files:
1030 if not cpath.islink(file):
1031 if cpath.isdir(file):
1032 newfiles = [ os.path.join(file,x) for x in os.listdir(file) ]
1033 if newfiles:
1034 files += newfiles
1035 continue
1036 if (not cpath.islink(file)) and (not cpath.exists(file)):
1037 continue
1038 if file in seen:
1039 continue
1040 seen.append(file)
1041
1042 if d.getVar('LICENSE_EXCLUSION-' + pkg, True):
1043 continue
1044
1045 def mkdir(src, dest, p):
1046 src = os.path.join(src, p)
1047 dest = os.path.join(dest, p)
1048 fstat = cpath.stat(src)
1049 os.mkdir(dest, fstat.st_mode)
1050 os.chown(dest, fstat.st_uid, fstat.st_gid)
1051 if p not in seen:
1052 seen.append(p)
1053 cpath.updatecache(dest)
1054
1055 def mkdir_recurse(src, dest, paths):
1056 if cpath.exists(dest + '/' + paths):
1057 return
1058 while paths.startswith("./"):
1059 paths = paths[2:]
1060 p = "."
1061 for c in paths.split("/"):
1062 p = os.path.join(p, c)
1063 if not cpath.exists(os.path.join(dest, p)):
1064 mkdir(src, dest, p)
1065
1066 if cpath.isdir(file) and not cpath.islink(file):
1067 mkdir_recurse(dvar, root, file)
1068 continue
1069
1070 mkdir_recurse(dvar, root, os.path.dirname(file))
1071 fpath = os.path.join(root,file)
1072 if not cpath.islink(file):
1073 os.link(file, fpath)
1074 fstat = cpath.stat(file)
1075 os.chmod(fpath, fstat.st_mode)
1076 os.chown(fpath, fstat.st_uid, fstat.st_gid)
1077 continue
1078 ret = bb.utils.copyfile(file, fpath)
1079 if ret is False or ret == 0:
1080 raise bb.build.FuncFailed("File population failed")
1081
1082 os.umask(oldumask)
1083 os.chdir(workdir)
1084
1085 unshipped = []
1086 for root, dirs, files in cpath.walk(dvar):
1087 dir = root[len(dvar):]
1088 if not dir:
1089 dir = os.sep
1090 for f in (files + dirs):
1091 path = os.path.join(dir, f)
1092 if ('.' + path) not in seen:
1093 unshipped.append(path)
1094
1095 if unshipped != []:
1096 msg = pn + ": Files/directories were installed but not shipped"
1097 if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
1098 bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
1099 else:
1100 for f in unshipped:
1101 msg = msg + "\n " + f
1102 package_qa_handle_error("installed-vs-shipped", msg, d)
1103}
1104populate_packages[dirs] = "${D}"
1105
1106python package_fixsymlinks () {
1107 import errno
1108 pkgdest = d.getVar('PKGDEST', True)
1109 packages = d.getVar("PACKAGES").split()
1110
1111 dangling_links = {}
1112 pkg_files = {}
1113 for pkg in packages:
1114 dangling_links[pkg] = []
1115 pkg_files[pkg] = []
1116 inst_root = os.path.join(pkgdest, pkg)
1117 for path in pkgfiles[pkg]:
1118 rpath = path[len(inst_root):]
1119 pkg_files[pkg].append(rpath)
1120 rtarget = cpath.realpath(path, inst_root, True, assume_dir = True)
1121 if not cpath.lexists(rtarget):
1122 dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):]))
1123
1124 newrdepends = {}
1125 for pkg in dangling_links:
1126 for l in dangling_links[pkg]:
1127 found = False
1128 bb.debug(1, "%s contains dangling link %s" % (pkg, l))
1129 for p in packages:
1130 if l in pkg_files[p]:
1131 found = True
1132 bb.debug(1, "target found in %s" % p)
1133 if p == pkg:
1134 break
1135 if pkg not in newrdepends:
1136 newrdepends[pkg] = []
1137 newrdepends[pkg].append(p)
1138 break
1139 if found == False:
1140 bb.note("%s contains dangling symlink to %s" % (pkg, l))
1141
1142 for pkg in newrdepends:
1143 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
1144 for p in newrdepends[pkg]:
1145 if p not in rdepends:
1146 rdepends[p] = []
1147 d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
1148}
1149
1150
1151python package_package_name_hook() {
1152 """
1153 A package_name_hook function can be used to rewrite the package names by
1154 changing PKG. For an example, see debian.bbclass.
1155 """
1156 pass
1157}
1158
1159EXPORT_FUNCTIONS package_name_hook
1160
1161
1162PKGDESTWORK = "${WORKDIR}/pkgdata"
1163
1164python emit_pkgdata() {
1165 from glob import glob
1166 import json
1167
1168 def write_if_exists(f, pkg, var):
1169 def encode(str):
1170 import codecs
1171 c = codecs.getencoder("string_escape")
1172 return c(str)[0]
1173
1174 val = d.getVar('%s_%s' % (var, pkg), True)
1175 if val:
1176 f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
1177 return val
1178 val = d.getVar('%s' % (var), True)
1179 if val:
1180 f.write('%s: %s\n' % (var, encode(val)))
1181 return val
1182
1183 def write_extra_pkgs(variants, pn, packages, pkgdatadir):
1184 for variant in variants:
1185 with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
1186 fd.write("PACKAGES: %s\n" % ' '.join(
1187 map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
1188
1189 def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
1190 for variant in variants:
1191 for pkg in packages.split():
1192 ml_pkg = "%s-%s" % (variant, pkg)
1193 subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
1194 with open(subdata_file, 'w') as fd:
1195 fd.write("PKG_%s: %s" % (ml_pkg, pkg))
1196
1197 packages = d.getVar('PACKAGES', True)
1198 pkgdest = d.getVar('PKGDEST', True)
1199 pkgdatadir = d.getVar('PKGDESTWORK', True)
1200
1201 # Take shared lock since we're only reading, not writing
1202 lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
1203
1204 data_file = pkgdatadir + d.expand("/${PN}" )
1205 f = open(data_file, 'w')
1206 f.write("PACKAGES: %s\n" % packages)
1207 f.close()
1208
1209 pn = d.getVar('PN', True)
1210 global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS', True) or "").split()
1211 variants = (d.getVar('MULTILIB_VARIANTS', True) or "").split()
1212
1213 if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
1214 write_extra_pkgs(variants, pn, packages, pkgdatadir)
1215
1216 if (bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d)):
1217 write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
1218
1219 workdir = d.getVar('WORKDIR', True)
1220
1221 for pkg in packages.split():
1222 pkgval = d.getVar('PKG_%s' % pkg, True)
1223 if pkgval is None:
1224 pkgval = pkg
1225 d.setVar('PKG_%s' % pkg, pkg)
1226
1227 pkgdestpkg = os.path.join(pkgdest, pkg)
1228 files = {}
1229 total_size = 0
1230 for f in pkgfiles[pkg]:
1231 relpth = os.path.relpath(f, pkgdestpkg)
1232 fstat = os.lstat(f)
1233 total_size += fstat.st_size
1234 files[os.sep + relpth] = fstat.st_size
1235 d.setVar('FILES_INFO', json.dumps(files))
1236
1237 subdata_file = pkgdatadir + "/runtime/%s" % pkg
1238 sf = open(subdata_file, 'w')
1239 write_if_exists(sf, pkg, 'PN')
1240 write_if_exists(sf, pkg, 'PE')
1241 write_if_exists(sf, pkg, 'PV')
1242 write_if_exists(sf, pkg, 'PR')
1243 write_if_exists(sf, pkg, 'PKGE')
1244 write_if_exists(sf, pkg, 'PKGV')
1245 write_if_exists(sf, pkg, 'PKGR')
1246 write_if_exists(sf, pkg, 'LICENSE')
1247 write_if_exists(sf, pkg, 'DESCRIPTION')
1248 write_if_exists(sf, pkg, 'SUMMARY')
1249 write_if_exists(sf, pkg, 'RDEPENDS')
1250 rprov = write_if_exists(sf, pkg, 'RPROVIDES')
1251 write_if_exists(sf, pkg, 'RRECOMMENDS')
1252 write_if_exists(sf, pkg, 'RSUGGESTS')
1253 write_if_exists(sf, pkg, 'RREPLACES')
1254 write_if_exists(sf, pkg, 'RCONFLICTS')
1255 write_if_exists(sf, pkg, 'SECTION')
1256 write_if_exists(sf, pkg, 'PKG')
1257 write_if_exists(sf, pkg, 'ALLOW_EMPTY')
1258 write_if_exists(sf, pkg, 'FILES')
1259 write_if_exists(sf, pkg, 'pkg_postinst')
1260 write_if_exists(sf, pkg, 'pkg_postrm')
1261 write_if_exists(sf, pkg, 'pkg_preinst')
1262 write_if_exists(sf, pkg, 'pkg_prerm')
1263 write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
1264 write_if_exists(sf, pkg, 'FILES_INFO')
1265 for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg, True) or "").split():
1266 write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
1267
1268 write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
1269 for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg, True) or "").split():
1270 write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
1271
1272 sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
1273 sf.close()
1274
1275 # Symlinks needed for rprovides lookup
1276 if rprov:
1277 for p in rprov.strip().split():
1278 subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
1279 bb.utils.mkdirhier(os.path.dirname(subdata_sym))
1280 oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
1281
1282 allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg, True)
1283 if not allow_empty:
1284 allow_empty = d.getVar('ALLOW_EMPTY', True)
1285 root = "%s/%s" % (pkgdest, pkg)
1286 os.chdir(root)
1287 g = glob('*')
1288 if g or allow_empty == "1":
1289 # Symlinks needed for reverse lookups (from the final package name)
1290 subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
1291 oe.path.symlink("../runtime/%s" % pkg, subdata_sym, True)
1292
1293 packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
1294 open(packagedfile, 'w').close()
1295
1296 if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
1297 write_extra_runtime_pkgs(variants, packages, pkgdatadir)
1298
1299 if bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d):
1300 write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
1301
1302 bb.utils.unlockfile(lf)
1303}
1304emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
1305
1306ldconfig_postinst_fragment() {
1307if [ x"$D" = "x" ]; then
1308 if [ -x /sbin/ldconfig ]; then /sbin/ldconfig ; fi
1309fi
1310}
1311
1312RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/bin/rpmdeps-oecore --macros ${STAGING_LIBDIR_NATIVE}/rpm/macros --define '_rpmfc_magic_path ${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc' --rpmpopt ${STAGING_LIBDIR_NATIVE}/rpm/rpmpopt"
1313
1314# Collect perfile run-time dependency metadata
1315# Output:
1316# FILERPROVIDESFLIST_pkg - list of all files w/ deps
1317# FILERPROVIDES_filepath_pkg - per file dep
1318#
1319# FILERDEPENDSFLIST_pkg - list of all files w/ deps
1320# FILERDEPENDS_filepath_pkg - per file dep
1321
1322python package_do_filedeps() {
1323 if d.getVar('SKIP_FILEDEPS', True) == '1':
1324 return
1325
1326 pkgdest = d.getVar('PKGDEST', True)
1327 packages = d.getVar('PACKAGES', True)
1328 rpmdeps = d.getVar('RPMDEPS', True)
1329
1330 def chunks(files, n):
1331 return [files[i:i+n] for i in range(0, len(files), n)]
1332
1333 pkglist = []
1334 for pkg in packages.split():
1335 if d.getVar('SKIP_FILEDEPS_' + pkg, True) == '1':
1336 continue
1337 if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-'):
1338 continue
1339 for files in chunks(pkgfiles[pkg], 100):
1340 pkglist.append((pkg, files, rpmdeps, pkgdest))
1341
1342 processed = oe.utils.multiprocess_exec( pkglist, oe.package.filedeprunner)
1343
1344 provides_files = {}
1345 requires_files = {}
1346
1347 for result in processed:
1348 (pkg, provides, requires) = result
1349
1350 if pkg not in provides_files:
1351 provides_files[pkg] = []
1352 if pkg not in requires_files:
1353 requires_files[pkg] = []
1354
1355 for file in provides:
1356 provides_files[pkg].append(file)
1357 key = "FILERPROVIDES_" + file + "_" + pkg
1358 d.setVar(key, " ".join(provides[file]))
1359
1360 for file in requires:
1361 requires_files[pkg].append(file)
1362 key = "FILERDEPENDS_" + file + "_" + pkg
1363 d.setVar(key, " ".join(requires[file]))
1364
1365 for pkg in requires_files:
1366 d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files[pkg]))
1367 for pkg in provides_files:
1368 d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
1369}
1370
1371SHLIBSDIRS = "${PKGDATA_DIR}/${MLPREFIX}shlibs2"
1372SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
1373
1374python package_do_shlibs() {
1375 import re, pipes
1376 import subprocess as sub
1377
1378 exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', 0)
1379 if exclude_shlibs:
1380 bb.note("not generating shlibs")
1381 return
1382
1383 lib_re = re.compile("^.*\.so")
1384 libdir_re = re.compile(".*/%s$" % d.getVar('baselib', True))
1385
1386 packages = d.getVar('PACKAGES', True)
1387 targetos = d.getVar('TARGET_OS', True)
1388
1389 workdir = d.getVar('WORKDIR', True)
1390
1391 ver = d.getVar('PKGV', True)
1392 if not ver:
1393 msg = "PKGV not defined"
1394 package_qa_handle_error("pkgv-undefined", msg, d)
1395 return
1396
1397 pkgdest = d.getVar('PKGDEST', True)
1398
1399 shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
1400 shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
1401
1402 # Take shared lock since we're only reading, not writing
1403 lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
1404
1405 def read_shlib_providers():
1406 list_re = re.compile('^(.*)\.list$')
1407 # Go from least to most specific since the last one found wins
1408 for dir in reversed(shlibs_dirs):
1409 bb.debug(2, "Reading shlib providers in %s" % (dir))
1410 if not os.path.exists(dir):
1411 continue
1412 for file in os.listdir(dir):
1413 m = list_re.match(file)
1414 if m:
1415 dep_pkg = m.group(1)
1416 fd = open(os.path.join(dir, file))
1417 lines = fd.readlines()
1418 fd.close()
1419 for l in lines:
1420 s = l.strip().split(":")
1421 if s[0] not in shlib_provider:
1422 shlib_provider[s[0]] = {}
1423 shlib_provider[s[0]][s[1]] = (dep_pkg, s[2])
1424
1425 def linux_so(file, needed, sonames, renames, pkgver):
1426 needs_ldconfig = False
1427 ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
1428 cmd = d.getVar('OBJDUMP', True) + " -p " + pipes.quote(file) + " 2>/dev/null"
1429 fd = os.popen(cmd)
1430 lines = fd.readlines()
1431 fd.close()
1432 rpath = []
1433 for l in lines:
1434 m = re.match("\s+RPATH\s+([^\s]*)", l)
1435 if m:
1436 rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
1437 rpath = map(os.path.normpath, rpaths)
1438 for l in lines:
1439 m = re.match("\s+NEEDED\s+([^\s]*)", l)
1440 if m:
1441 dep = m.group(1)
1442 if dep not in needed[pkg]:
1443 needed[pkg].append((dep, file, rpath))
1444 m = re.match("\s+SONAME\s+([^\s]*)", l)
1445 if m:
1446 this_soname = m.group(1)
1447 prov = (this_soname, ldir, pkgver)
1448 if not prov in sonames:
1449 # if library is private (only used by package) then do not build shlib for it
1450 if not private_libs or this_soname not in private_libs:
1451 sonames.append(prov)
1452 if libdir_re.match(os.path.dirname(file)):
1453 needs_ldconfig = True
1454 if snap_symlinks and (os.path.basename(file) != this_soname):
1455 renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
1456 return needs_ldconfig
1457
1458 def darwin_so(file, needed, sonames, renames, pkgver):
1459 if not os.path.exists(file):
1460 return
1461 ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
1462
1463 def get_combinations(base):
1464 #
1465 # Given a base library name, find all combinations of this split by "." and "-"
1466 #
1467 combos = []
1468 options = base.split(".")
1469 for i in range(1, len(options) + 1):
1470 combos.append(".".join(options[0:i]))
1471 options = base.split("-")
1472 for i in range(1, len(options) + 1):
1473 combos.append("-".join(options[0:i]))
1474 return combos
1475
1476 if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg'):
1477 # Drop suffix
1478 name = os.path.basename(file).rsplit(".",1)[0]
1479 # Find all combinations
1480 combos = get_combinations(name)
1481 for combo in combos:
1482 if not combo in sonames:
1483 prov = (combo, ldir, pkgver)
1484 sonames.append(prov)
1485 if file.endswith('.dylib') or file.endswith('.so'):
1486 rpath = []
1487 p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file],stdout=sub.PIPE,stderr=sub.PIPE)
1488 err, out = p.communicate()
1489 # If returned succesfully, process stderr for results
1490 if p.returncode == 0:
1491 for l in err.split("\n"):
1492 l = l.strip()
1493 if l.startswith('path '):
1494 rpath.append(l.split()[1])
1495
1496 p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file],stdout=sub.PIPE,stderr=sub.PIPE)
1497 err, out = p.communicate()
1498 # If returned succesfully, process stderr for results
1499 if p.returncode == 0:
1500 for l in err.split("\n"):
1501 l = l.strip()
1502 if not l or l.endswith(":"):
1503 continue
1504 if "is not an object file" in l:
1505 continue
1506 name = os.path.basename(l.split()[0]).rsplit(".", 1)[0]
1507 if name and name not in needed[pkg]:
1508 needed[pkg].append((name, file, []))
1509
1510 if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS', True) == "1":
1511 snap_symlinks = True
1512 else:
1513 snap_symlinks = False
1514
1515 if (d.getVar('USE_LDCONFIG', True) or "1") == "1":
1516 use_ldconfig = True
1517 else:
1518 use_ldconfig = False
1519
1520 needed = {}
1521 shlib_provider = {}
1522 read_shlib_providers()
1523
1524 for pkg in packages.split():
1525 private_libs = d.getVar('PRIVATE_LIBS_' + pkg, True) or d.getVar('PRIVATE_LIBS', True) or ""
1526 private_libs = private_libs.split()
1527 needs_ldconfig = False
1528 bb.debug(2, "calculating shlib provides for %s" % pkg)
1529
1530 pkgver = d.getVar('PKGV_' + pkg, True)
1531 if not pkgver:
1532 pkgver = d.getVar('PV_' + pkg, True)
1533 if not pkgver:
1534 pkgver = ver
1535
1536 needed[pkg] = []
1537 sonames = list()
1538 renames = list()
1539 for file in pkgfiles[pkg]:
1540 soname = None
1541 if cpath.islink(file):
1542 continue
1543 if targetos == "darwin" or targetos == "darwin8":
1544 darwin_so(file, needed, sonames, renames, pkgver)
1545 elif os.access(file, os.X_OK) or lib_re.match(file):
1546 ldconfig = linux_so(file, needed, sonames, renames, pkgver)
1547 needs_ldconfig = needs_ldconfig or ldconfig
1548 for (old, new) in renames:
1549 bb.note("Renaming %s to %s" % (old, new))
1550 os.rename(old, new)
1551 pkgfiles[pkg].remove(old)
1552
1553 shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
1554 if len(sonames):
1555 fd = open(shlibs_file, 'w')
1556 for s in sonames:
1557 if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
1558 (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
1559 if old_pkg != pkg:
1560 bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
1561 bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
1562 fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
1563 if s[0] not in shlib_provider:
1564 shlib_provider[s[0]] = {}
1565 shlib_provider[s[0]][s[1]] = (pkg, pkgver)
1566 fd.close()
1567 if needs_ldconfig and use_ldconfig:
1568 bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
1569 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
1570 if not postinst:
1571 postinst = '#!/bin/sh\n'
1572 postinst += d.getVar('ldconfig_postinst_fragment', True)
1573 d.setVar('pkg_postinst_%s' % pkg, postinst)
1574 bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
1575
1576 bb.utils.unlockfile(lf)
1577
1578 assumed_libs = d.getVar('ASSUME_SHLIBS', True)
1579 if assumed_libs:
1580 libdir = d.getVar("libdir", True)
1581 for e in assumed_libs.split():
1582 l, dep_pkg = e.split(":")
1583 lib_ver = None
1584 dep_pkg = dep_pkg.rsplit("_", 1)
1585 if len(dep_pkg) == 2:
1586 lib_ver = dep_pkg[1]
1587 dep_pkg = dep_pkg[0]
1588 if l not in shlib_provider:
1589 shlib_provider[l] = {}
1590 shlib_provider[l][libdir] = (dep_pkg, lib_ver)
1591
1592 libsearchpath = [d.getVar('libdir', True), d.getVar('base_libdir', True)]
1593
1594 for pkg in packages.split():
1595 bb.debug(2, "calculating shlib requirements for %s" % pkg)
1596
1597 deps = list()
1598 for n in needed[pkg]:
1599 # if n is in private libraries, don't try to search provider for it
1600 # this could cause problem in case some abc.bb provides private
1601 # /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
1602 # but skipping it is still better alternative than providing own
1603 # version and then adding runtime dependency for the same system library
1604 if private_libs and n[0] in private_libs:
1605 bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
1606 continue
1607 if n[0] in shlib_provider.keys():
1608 shlib_provider_path = list()
1609 for k in shlib_provider[n[0]].keys():
1610 shlib_provider_path.append(k)
1611 match = None
1612 for p in n[2] + shlib_provider_path + libsearchpath:
1613 if p in shlib_provider[n[0]]:
1614 match = p
1615 break
1616 if match:
1617 (dep_pkg, ver_needed) = shlib_provider[n[0]][match]
1618
1619 bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
1620
1621 if dep_pkg == pkg:
1622 continue
1623
1624 if ver_needed:
1625 dep = "%s (>= %s)" % (dep_pkg, ver_needed)
1626 else:
1627 dep = dep_pkg
1628 if not dep in deps:
1629 deps.append(dep)
1630 continue
1631 bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n[0], n[1]))
1632
1633 deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
1634 if os.path.exists(deps_file):
1635 os.remove(deps_file)
1636 if len(deps):
1637 fd = open(deps_file, 'w')
1638 for dep in deps:
1639 fd.write(dep + '\n')
1640 fd.close()
1641}
1642
1643python package_do_pkgconfig () {
1644 import re
1645
1646 packages = d.getVar('PACKAGES', True)
1647 workdir = d.getVar('WORKDIR', True)
1648 pkgdest = d.getVar('PKGDEST', True)
1649
1650 shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
1651 shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
1652
1653 pc_re = re.compile('(.*)\.pc$')
1654 var_re = re.compile('(.*)=(.*)')
1655 field_re = re.compile('(.*): (.*)')
1656
1657 pkgconfig_provided = {}
1658 pkgconfig_needed = {}
1659 for pkg in packages.split():
1660 pkgconfig_provided[pkg] = []
1661 pkgconfig_needed[pkg] = []
1662 for file in pkgfiles[pkg]:
1663 m = pc_re.match(file)
1664 if m:
1665 pd = bb.data.init()
1666 name = m.group(1)
1667 pkgconfig_provided[pkg].append(name)
1668 if not os.access(file, os.R_OK):
1669 continue
1670 f = open(file, 'r')
1671 lines = f.readlines()
1672 f.close()
1673 for l in lines:
1674 m = var_re.match(l)
1675 if m:
1676 name = m.group(1)
1677 val = m.group(2)
1678 pd.setVar(name, pd.expand(val))
1679 continue
1680 m = field_re.match(l)
1681 if m:
1682 hdr = m.group(1)
1683 exp = bb.data.expand(m.group(2), pd)
1684 if hdr == 'Requires':
1685 pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
1686
1687 # Take shared lock since we're only reading, not writing
1688 lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
1689
1690 for pkg in packages.split():
1691 pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
1692 if pkgconfig_provided[pkg] != []:
1693 f = open(pkgs_file, 'w')
1694 for p in pkgconfig_provided[pkg]:
1695 f.write('%s\n' % p)
1696 f.close()
1697
1698 # Go from least to most specific since the last one found wins
1699 for dir in reversed(shlibs_dirs):
1700 if not os.path.exists(dir):
1701 continue
1702 for file in os.listdir(dir):
1703 m = re.match('^(.*)\.pclist$', file)
1704 if m:
1705 pkg = m.group(1)
1706 fd = open(os.path.join(dir, file))
1707 lines = fd.readlines()
1708 fd.close()
1709 pkgconfig_provided[pkg] = []
1710 for l in lines:
1711 pkgconfig_provided[pkg].append(l.rstrip())
1712
1713 for pkg in packages.split():
1714 deps = []
1715 for n in pkgconfig_needed[pkg]:
1716 found = False
1717 for k in pkgconfig_provided.keys():
1718 if n in pkgconfig_provided[k]:
1719 if k != pkg and not (k in deps):
1720 deps.append(k)
1721 found = True
1722 if found == False:
1723 bb.note("couldn't find pkgconfig module '%s' in any package" % n)
1724 deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
1725 if len(deps):
1726 fd = open(deps_file, 'w')
1727 for dep in deps:
1728 fd.write(dep + '\n')
1729 fd.close()
1730
1731 bb.utils.unlockfile(lf)
1732}
1733
1734def read_libdep_files(d):
1735 pkglibdeps = {}
1736 packages = d.getVar('PACKAGES', True).split()
1737 for pkg in packages:
1738 pkglibdeps[pkg] = {}
1739 for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
1740 depsfile = d.expand("${PKGDEST}/" + pkg + extension)
1741 if os.access(depsfile, os.R_OK):
1742 fd = open(depsfile)
1743 lines = fd.readlines()
1744 fd.close()
1745 for l in lines:
1746 l.rstrip()
1747 deps = bb.utils.explode_dep_versions2(l)
1748 for dep in deps:
1749 if not dep in pkglibdeps[pkg]:
1750 pkglibdeps[pkg][dep] = deps[dep]
1751 return pkglibdeps
1752
1753python read_shlibdeps () {
1754 pkglibdeps = read_libdep_files(d)
1755
1756 packages = d.getVar('PACKAGES', True).split()
1757 for pkg in packages:
1758 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg, True) or "")
1759 for dep in pkglibdeps[pkg]:
1760 # Add the dep if it's not already there, or if no comparison is set
1761 if dep not in rdepends:
1762 rdepends[dep] = []
1763 for v in pkglibdeps[pkg][dep]:
1764 if v not in rdepends[dep]:
1765 rdepends[dep].append(v)
1766 d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
1767}
1768
1769python package_depchains() {
1770 """
1771 For a given set of prefix and postfix modifiers, make those packages
1772 RRECOMMENDS on the corresponding packages for its RDEPENDS.
1773
1774 Example: If package A depends upon package B, and A's .bb emits an
1775 A-dev package, this would make A-dev Recommends: B-dev.
1776
1777 If only one of a given suffix is specified, it will take the RRECOMMENDS
1778 based on the RDEPENDS of *all* other packages. If more than one of a given
1779 suffix is specified, its will only use the RDEPENDS of the single parent
1780 package.
1781 """
1782
1783 packages = d.getVar('PACKAGES', True)
1784 postfixes = (d.getVar('DEPCHAIN_POST', True) or '').split()
1785 prefixes = (d.getVar('DEPCHAIN_PRE', True) or '').split()
1786
1787 def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
1788
1789 #bb.note('depends for %s is %s' % (base, depends))
1790 rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "")
1791
1792 for depend in depends:
1793 if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
1794 #bb.note("Skipping %s" % depend)
1795 continue
1796 if depend.endswith('-dev'):
1797 depend = depend[:-4]
1798 if depend.endswith('-dbg'):
1799 depend = depend[:-4]
1800 pkgname = getname(depend, suffix)
1801 #bb.note("Adding %s for %s" % (pkgname, depend))
1802 if pkgname not in rreclist and pkgname != pkg:
1803 rreclist[pkgname] = []
1804
1805 #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
1806 d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
1807
1808 def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
1809
1810 #bb.note('rdepends for %s is %s' % (base, rdepends))
1811 rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg, True) or "")
1812
1813 for depend in rdepends:
1814 if depend.find('virtual-locale-') != -1:
1815 #bb.note("Skipping %s" % depend)
1816 continue
1817 if depend.endswith('-dev'):
1818 depend = depend[:-4]
1819 if depend.endswith('-dbg'):
1820 depend = depend[:-4]
1821 pkgname = getname(depend, suffix)
1822 #bb.note("Adding %s for %s" % (pkgname, depend))
1823 if pkgname not in rreclist and pkgname != pkg:
1824 rreclist[pkgname] = []
1825
1826 #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
1827 d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
1828
1829 def add_dep(list, dep):
1830 if dep not in list:
1831 list.append(dep)
1832
1833 depends = []
1834 for dep in bb.utils.explode_deps(d.getVar('DEPENDS', True) or ""):
1835 add_dep(depends, dep)
1836
1837 rdepends = []
1838 for pkg in packages.split():
1839 for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg, True) or ""):
1840 add_dep(rdepends, dep)
1841
1842 #bb.note('rdepends is %s' % rdepends)
1843
1844 def post_getname(name, suffix):
1845 return '%s%s' % (name, suffix)
1846 def pre_getname(name, suffix):
1847 return '%s%s' % (suffix, name)
1848
1849 pkgs = {}
1850 for pkg in packages.split():
1851 for postfix in postfixes:
1852 if pkg.endswith(postfix):
1853 if not postfix in pkgs:
1854 pkgs[postfix] = {}
1855 pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname)
1856
1857 for prefix in prefixes:
1858 if pkg.startswith(prefix):
1859 if not prefix in pkgs:
1860 pkgs[prefix] = {}
1861 pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname)
1862
1863 if "-dbg" in pkgs:
1864 pkglibdeps = read_libdep_files(d)
1865 pkglibdeplist = []
1866 for pkg in pkglibdeps:
1867 for k in pkglibdeps[pkg]:
1868 add_dep(pkglibdeplist, k)
1869 # FIXME this should not look at PN once all task recipes inherit from task.bbclass
1870 dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS', True) == '1') or (d.getVar('PN', True) or '').startswith('packagegroup-'))
1871
1872 for suffix in pkgs:
1873 for pkg in pkgs[suffix]:
1874 if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'):
1875 continue
1876 (base, func) = pkgs[suffix][pkg]
1877 if suffix == "-dev":
1878 pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
1879 elif suffix == "-dbg":
1880 if not dbgdefaultdeps:
1881 pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d)
1882 continue
1883 if len(pkgs[suffix]) == 1:
1884 pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
1885 else:
1886 rdeps = []
1887 for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base, True) or ""):
1888 add_dep(rdeps, dep)
1889 pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
1890}
1891
1892# Since bitbake can't determine which variables are accessed during package
1893# iteration, we need to list them here:
1894PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES"
1895
1896def gen_packagevar(d):
1897 ret = []
1898 pkgs = (d.getVar("PACKAGES", True) or "").split()
1899 vars = (d.getVar("PACKAGEVARS", True) or "").split()
1900 for p in pkgs:
1901 for v in vars:
1902 ret.append(v + "_" + p)
1903
1904 # Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
1905 # affected recipes.
1906 ret.append('LICENSE_EXCLUSION-%s' % p)
1907 return " ".join(ret)
1908
1909PACKAGE_PREPROCESS_FUNCS ?= ""
1910# Functions for setting up PKGD
1911PACKAGEBUILDPKGD ?= " \
1912 perform_packagecopy \
1913 ${PACKAGE_PREPROCESS_FUNCS} \
1914 split_and_strip_files \
1915 fixup_perms \
1916 "
1917# Functions which split PKGD up into separate packages
1918PACKAGESPLITFUNCS ?= " \
1919 package_do_split_locales \
1920 populate_packages"
1921# Functions which process metadata based on split packages
1922PACKAGEFUNCS += " \
1923 package_fixsymlinks \
1924 package_name_hook \
1925 package_do_filedeps \
1926 package_do_shlibs \
1927 package_do_pkgconfig \
1928 read_shlibdeps \
1929 package_depchains \
1930 emit_pkgdata"
1931
1932python do_package () {
1933 # Change the following version to cause sstate to invalidate the package
1934 # cache. This is useful if an item this class depends on changes in a
1935 # way that the output of this class changes. rpmdeps is a good example
1936 # as any change to rpmdeps requires this to be rerun.
1937 # PACKAGE_BBCLASS_VERSION = "1"
1938
1939 # Init cachedpath
1940 global cpath
1941 cpath = oe.cachedpath.CachedPath()
1942
1943 ###########################################################################
1944 # Sanity test the setup
1945 ###########################################################################
1946
1947 packages = (d.getVar('PACKAGES', True) or "").split()
1948 if len(packages) < 1:
1949 bb.debug(1, "No packages to build, skipping do_package")
1950 return
1951
1952 workdir = d.getVar('WORKDIR', True)
1953 outdir = d.getVar('DEPLOY_DIR', True)
1954 dest = d.getVar('D', True)
1955 dvar = d.getVar('PKGD', True)
1956 pn = d.getVar('PN', True)
1957
1958 if not workdir or not outdir or not dest or not dvar or not pn:
1959 msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
1960 package_qa_handle_error("var-undefined", msg, d)
1961 return
1962
1963 bb.build.exec_func("package_get_auto_pr", d)
1964
1965 ###########################################################################
1966 # Optimisations
1967 ###########################################################################
1968
1969 # Continually expanding complex expressions is inefficient, particularly
1970 # when we write to the datastore and invalidate the expansion cache. This
1971 # code pre-expands some frequently used variables
1972
1973 def expandVar(x, d):
1974 d.setVar(x, d.getVar(x, True))
1975
1976 for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
1977 expandVar(x, d)
1978
1979 ###########################################################################
1980 # Setup PKGD (from D)
1981 ###########################################################################
1982
1983 for f in (d.getVar('PACKAGEBUILDPKGD', True) or '').split():
1984 bb.build.exec_func(f, d)
1985
1986 ###########################################################################
1987 # Split up PKGD into PKGDEST
1988 ###########################################################################
1989
1990 cpath = oe.cachedpath.CachedPath()
1991
1992 for f in (d.getVar('PACKAGESPLITFUNCS', True) or '').split():
1993 bb.build.exec_func(f, d)
1994
1995 ###########################################################################
1996 # Process PKGDEST
1997 ###########################################################################
1998
1999 # Build global list of files in each split package
2000 global pkgfiles
2001 pkgfiles = {}
2002 packages = d.getVar('PACKAGES', True).split()
2003 pkgdest = d.getVar('PKGDEST', True)
2004 for pkg in packages:
2005 pkgfiles[pkg] = []
2006 for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
2007 for file in files:
2008 pkgfiles[pkg].append(walkroot + os.sep + file)
2009
2010 for f in (d.getVar('PACKAGEFUNCS', True) or '').split():
2011 bb.build.exec_func(f, d)
2012}
2013
2014do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
2015do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
2016addtask package after do_install
2017
2018PACKAGELOCK = "${STAGING_DIR}/package-output.lock"
2019SSTATETASKS += "do_package"
2020do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
2021do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
2022do_package[sstate-lockfile-shared] = "${PACKAGELOCK}"
2023do_package_setscene[dirs] = "${STAGING_DIR}"
2024
2025python do_package_setscene () {
2026 sstate_setscene(d)
2027}
2028addtask do_package_setscene
2029
2030do_packagedata () {
2031 :
2032}
2033
2034addtask packagedata before do_build after do_package
2035
2036SSTATETASKS += "do_packagedata"
2037do_packagedata[sstate-inputdirs] = "${PKGDESTWORK}"
2038do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
2039do_packagedata[sstate-lockfile-shared] = "${PACKAGELOCK}"
2040do_packagedata[stamp-extra-info] = "${MACHINE}"
2041
2042python do_packagedata_setscene () {
2043 sstate_setscene(d)
2044}
2045addtask do_packagedata_setscene
2046
2047#
2048# Helper functions for the package writing classes
2049#
2050
2051def mapping_rename_hook(d):
2052 """
2053 Rewrite variables to account for package renaming in things
2054 like debian.bbclass or manual PKG variable name changes
2055 """
2056 pkg = d.getVar("PKG", True)
2057 runtime_mapping_rename("RDEPENDS", pkg, d)
2058 runtime_mapping_rename("RRECOMMENDS", pkg, d)
2059 runtime_mapping_rename("RSUGGESTS", pkg, d)
2060
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
new file mode 100644
index 0000000000..5b5f7e2c9a
--- /dev/null
+++ b/meta/classes/package_deb.bbclass
@@ -0,0 +1,330 @@
1#
2# Copyright 2006-2008 OpenedHand Ltd.
3#
4
5inherit package
6
7IMAGE_PKGTYPE ?= "deb"
8
9DPKG_ARCH ?= "${TARGET_ARCH}"
10
11PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
12
13APTCONF_TARGET = "${WORKDIR}"
14
15APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
16
17#
18# install a bunch of packages using apt
19# the following shell variables needs to be set before calling this func:
20# INSTALL_ROOTFS_DEB - install root dir
21# INSTALL_BASEARCH_DEB - install base architecutre
22# INSTALL_ARCHS_DEB - list of available archs
23# INSTALL_PACKAGES_NORMAL_DEB - packages to be installed
24# INSTALL_PACKAGES_ATTEMPTONLY_DEB - packages attemped to be installed only
25# INSTALL_PACKAGES_LINGUAS_DEB - additional packages for uclibc
26# INSTALL_TASK_DEB - task name
27
28python do_package_deb () {
29 import re, copy
30 import textwrap
31 import subprocess
32
33 workdir = d.getVar('WORKDIR', True)
34 if not workdir:
35 bb.error("WORKDIR not defined, unable to package")
36 return
37
38 outdir = d.getVar('PKGWRITEDIRDEB', True)
39 if not outdir:
40 bb.error("PKGWRITEDIRDEB not defined, unable to package")
41 return
42
43 packages = d.getVar('PACKAGES', True)
44 if not packages:
45 bb.debug(1, "PACKAGES not defined, nothing to package")
46 return
47
48 tmpdir = d.getVar('TMPDIR', True)
49
50 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
51 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
52
53 if packages == []:
54 bb.debug(1, "No packages; nothing to do")
55 return
56
57 pkgdest = d.getVar('PKGDEST', True)
58
59 def cleanupcontrol(root):
60 for p in ['CONTROL', 'DEBIAN']:
61 p = os.path.join(root, p)
62 if os.path.exists(p):
63 bb.utils.prunedir(p)
64
65 for pkg in packages.split():
66 localdata = bb.data.createCopy(d)
67 root = "%s/%s" % (pkgdest, pkg)
68
69 lf = bb.utils.lockfile(root + ".lock")
70
71 localdata.setVar('ROOT', '')
72 localdata.setVar('ROOT_%s' % pkg, root)
73 pkgname = localdata.getVar('PKG_%s' % pkg, True)
74 if not pkgname:
75 pkgname = pkg
76 localdata.setVar('PKG', pkgname)
77
78 localdata.setVar('OVERRIDES', pkg)
79
80 bb.data.update_data(localdata)
81 basedir = os.path.join(os.path.dirname(root))
82
83 pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH', True))
84 bb.utils.mkdirhier(pkgoutdir)
85
86 os.chdir(root)
87 cleanupcontrol(root)
88 from glob import glob
89 g = glob('*')
90 if not g and localdata.getVar('ALLOW_EMPTY') != "1":
91 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
92 bb.utils.unlockfile(lf)
93 continue
94
95 controldir = os.path.join(root, 'DEBIAN')
96 bb.utils.mkdirhier(controldir)
97 os.chmod(controldir, 0755)
98 try:
99 ctrlfile = open(os.path.join(controldir, 'control'), 'w')
100 # import codecs
101 # ctrlfile = codecs.open("someFile", "w", "utf-8")
102 except OSError:
103 bb.utils.unlockfile(lf)
104 raise bb.build.FuncFailed("unable to open control file for writing.")
105
106 fields = []
107 pe = d.getVar('PKGE', True)
108 if pe and int(pe) > 0:
109 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
110 else:
111 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
112 fields.append(["Description: %s\n", ['DESCRIPTION']])
113 fields.append(["Section: %s\n", ['SECTION']])
114 fields.append(["Priority: %s\n", ['PRIORITY']])
115 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
116 fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
117 fields.append(["OE: %s\n", ['PN']])
118 fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
119 if d.getVar('HOMEPAGE', True):
120 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
121
122 # Package, Version, Maintainer, Description - mandatory
123 # Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
124
125
126 def pullData(l, d):
127 l2 = []
128 for i in l:
129 data = d.getVar(i, True)
130 if data is None:
131 raise KeyError(f)
132 if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH', True) == 'all':
133 data = 'all'
134 elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
135 # The params in deb package control don't allow character
136 # `_', so change the arch's `_' to `-'. Such as `x86_64'
137 # -->`x86-64'
138 data = data.replace('_', '-')
139 l2.append(data)
140 return l2
141
142 ctrlfile.write("Package: %s\n" % pkgname)
143 # check for required fields
144 try:
145 for (c, fs) in fields:
146 for f in fs:
147 if localdata.getVar(f) is None:
148 raise KeyError(f)
149 # Special behavior for description...
150 if 'DESCRIPTION' in fs:
151 summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
152 ctrlfile.write('Description: %s\n' % unicode(summary))
153 description = localdata.getVar('DESCRIPTION', True) or "."
154 description = textwrap.dedent(description).strip()
155 if '\\n' in description:
156 # Manually indent
157 for t in description.split('\\n'):
158 # We don't limit the width when manually indent, but we do
159 # need the textwrap.fill() to set the initial_indent and
160 # subsequent_indent, so set a large width
161 ctrlfile.write('%s\n' % unicode(textwrap.fill(t, width=100000, initial_indent=' ', subsequent_indent=' ')))
162 else:
163 # Auto indent
164 ctrlfile.write('%s\n' % unicode(textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' ')))
165
166 else:
167 ctrlfile.write(unicode(c % tuple(pullData(fs, localdata))))
168 except KeyError:
169 import sys
170 (type, value, traceback) = sys.exc_info()
171 bb.utils.unlockfile(lf)
172 ctrlfile.close()
173 raise bb.build.FuncFailed("Missing field for deb generation: %s" % value)
174 # more fields
175
176 custom_fields_chunk = get_package_additional_metadata("deb", localdata)
177 if custom_fields_chunk is not None:
178 ctrlfile.write(unicode(custom_fields_chunk))
179 ctrlfile.write("\n")
180
181 mapping_rename_hook(localdata)
182
183 def debian_cmp_remap(var):
184 # dpkg does not allow for '(' or ')' in a dependency name
185 # replace these instances with '__' and '__'
186 #
187 # In debian '>' and '<' do not mean what it appears they mean
188 # '<' = less or equal
189 # '>' = greater or equal
190 # adjust these to the '<<' and '>>' equivalents
191 #
192 for dep in var:
193 if '(' in dep:
194 newdep = dep.replace('(', '__')
195 newdep = newdep.replace(')', '__')
196 if newdep != dep:
197 var[newdep] = var[dep]
198 del var[dep]
199 for dep in var:
200 for i, v in enumerate(var[dep]):
201 if (v or "").startswith("< "):
202 var[dep][i] = var[dep][i].replace("< ", "<< ")
203 elif (v or "").startswith("> "):
204 var[dep][i] = var[dep][i].replace("> ", ">> ")
205
206 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
207 debian_cmp_remap(rdepends)
208 for dep in rdepends:
209 if '*' in dep:
210 del rdepends[dep]
211 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
212 debian_cmp_remap(rrecommends)
213 for dep in rrecommends:
214 if '*' in dep:
215 del rrecommends[dep]
216 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
217 debian_cmp_remap(rsuggests)
218 rprovides = bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or "")
219 debian_cmp_remap(rprovides)
220 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
221 debian_cmp_remap(rreplaces)
222 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
223 debian_cmp_remap(rconflicts)
224 if rdepends:
225 ctrlfile.write("Depends: %s\n" % unicode(bb.utils.join_deps(rdepends)))
226 if rsuggests:
227 ctrlfile.write("Suggests: %s\n" % unicode(bb.utils.join_deps(rsuggests)))
228 if rrecommends:
229 ctrlfile.write("Recommends: %s\n" % unicode(bb.utils.join_deps(rrecommends)))
230 if rprovides:
231 ctrlfile.write("Provides: %s\n" % unicode(bb.utils.join_deps(rprovides)))
232 if rreplaces:
233 ctrlfile.write("Replaces: %s\n" % unicode(bb.utils.join_deps(rreplaces)))
234 if rconflicts:
235 ctrlfile.write("Conflicts: %s\n" % unicode(bb.utils.join_deps(rconflicts)))
236 ctrlfile.close()
237
238 for script in ["preinst", "postinst", "prerm", "postrm"]:
239 scriptvar = localdata.getVar('pkg_%s' % script, True)
240 if not scriptvar:
241 continue
242 scriptvar = scriptvar.strip()
243 try:
244 scriptfile = open(os.path.join(controldir, script), 'w')
245 except OSError:
246 bb.utils.unlockfile(lf)
247 raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
248
249 if scriptvar.startswith("#!"):
250 pos = scriptvar.find("\n") + 1
251 scriptfile.write(scriptvar[:pos])
252 else:
253 pos = 0
254 scriptfile.write("#!/bin/sh\n")
255
256 # Prevent the prerm/postrm scripts from being run during an upgrade
257 if script in ('prerm', 'postrm'):
258 scriptfile.write('[ "$1" != "upgrade" ] || exit 0\n')
259
260 scriptfile.write(scriptvar[pos:])
261 scriptfile.write('\n')
262 scriptfile.close()
263 os.chmod(os.path.join(controldir, script), 0755)
264
265 conffiles_str = localdata.getVar("CONFFILES", True)
266 if conffiles_str:
267 try:
268 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
269 except OSError:
270 bb.utils.unlockfile(lf)
271 raise bb.build.FuncFailed("unable to open conffiles for writing.")
272 for f in conffiles_str.split():
273 if os.path.exists(oe.path.join(root, f)):
274 conffiles.write('%s\n' % f)
275 conffiles.close()
276
277 os.chdir(basedir)
278 ret = subprocess.call("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH", True), root, pkgoutdir), shell=True)
279 if ret != 0:
280 bb.utils.unlockfile(lf)
281 raise bb.build.FuncFailed("dpkg-deb execution failed")
282
283 cleanupcontrol(root)
284 bb.utils.unlockfile(lf)
285}
286
287SSTATETASKS += "do_package_write_deb"
288do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
289do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
290
291python do_package_write_deb_setscene () {
292 tmpdir = d.getVar('TMPDIR', True)
293
294 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
295 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
296
297 sstate_setscene(d)
298}
299addtask do_package_write_deb_setscene
300
301python () {
302 if d.getVar('PACKAGES', True) != '':
303 deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
304 d.appendVarFlag('do_package_write_deb', 'depends', deps)
305 d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
306
307 # Map TARGET_ARCH to Debian's ideas about architectures
308 darch = d.getVar('DPKG_ARCH', True)
309 if darch in ["x86", "i486", "i586", "i686", "pentium"]:
310 d.setVar('DPKG_ARCH', 'i386')
311 elif darch == "x86_64":
312 d.setVar('DPKG_ARCH', 'amd64')
313 elif darch == "arm":
314 d.setVar('DPKG_ARCH', 'armel')
315}
316
317python do_package_write_deb () {
318 bb.build.exec_func("read_subpackage_metadata", d)
319 bb.build.exec_func("do_package_deb", d)
320}
321do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
322do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
323do_package_write_deb[umask] = "022"
324addtask package_write_deb after do_packagedata do_package
325
326
327PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
328PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
329
330do_build[recrdeptask] += "do_package_write_deb"
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
new file mode 100644
index 0000000000..44fd3eb29c
--- /dev/null
+++ b/meta/classes/package_ipk.bbclass
@@ -0,0 +1,286 @@
1inherit package
2
3IMAGE_PKGTYPE ?= "ipk"
4
5IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
6IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
7
8PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
9
10# Program to be used to build opkg packages
11OPKGBUILDCMD ??= "opkg-build"
12
13OPKG_ARGS = "--force_postinstall --prefer-arch-to-version"
14OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
15OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE', True) or "").split())][(d.getVar("PACKAGE_EXCLUDE", True) or "") != ""]}"
16
17OPKGLIBDIR = "${localstatedir}/lib"
18
19python do_package_ipk () {
20 import re, copy
21 import textwrap
22 import subprocess
23
24 workdir = d.getVar('WORKDIR', True)
25 outdir = d.getVar('PKGWRITEDIRIPK', True)
26 tmpdir = d.getVar('TMPDIR', True)
27 pkgdest = d.getVar('PKGDEST', True)
28 if not workdir or not outdir or not tmpdir:
29 bb.error("Variables incorrectly set, unable to package")
30 return
31
32 packages = d.getVar('PACKAGES', True)
33 if not packages or packages == '':
34 bb.debug(1, "No packages; nothing to do")
35 return
36
37 # We're about to add new packages so the index needs to be checked
38 # so remove the appropriate stamp file.
39 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
40 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
41
42 def cleanupcontrol(root):
43 for p in ['CONTROL', 'DEBIAN']:
44 p = os.path.join(root, p)
45 if os.path.exists(p):
46 bb.utils.prunedir(p)
47
48 for pkg in packages.split():
49 localdata = bb.data.createCopy(d)
50 root = "%s/%s" % (pkgdest, pkg)
51
52 lf = bb.utils.lockfile(root + ".lock")
53
54 localdata.setVar('ROOT', '')
55 localdata.setVar('ROOT_%s' % pkg, root)
56 pkgname = localdata.getVar('PKG_%s' % pkg, True)
57 if not pkgname:
58 pkgname = pkg
59 localdata.setVar('PKG', pkgname)
60
61 localdata.setVar('OVERRIDES', pkg)
62
63 bb.data.update_data(localdata)
64 basedir = os.path.join(os.path.dirname(root))
65 arch = localdata.getVar('PACKAGE_ARCH', True)
66
67 if localdata.getVar('IPK_HIERARCHICAL_FEED') == "1":
68 # Spread packages across subdirectories so each isn't too crowded
69 if pkgname.startswith('lib'):
70 pkg_prefix = 'lib' + pkgname[3]
71 else:
72 pkg_prefix = pkgname[0]
73
74 # Keep -dbg, -dev, -doc, -staticdev, -locale and -locale-* packages
75 # together. These package suffixes are taken from the definitions of
76 # PACKAGES and PACKAGES_DYNAMIC in meta/conf/bitbake.conf
77 if pkgname[-4:] in ('-dbg', '-dev', '-doc'):
78 pkg_subdir = pkgname[:-4]
79 elif pkgname.endswith('-staticdev'):
80 pkg_subdir = pkgname[:-10]
81 elif pkgname.endswith('-locale'):
82 pkg_subdir = pkgname[:-7]
83 elif '-locale-' in pkgname:
84 pkg_subdir = pkgname[:pkgname.find('-locale-')]
85 else:
86 pkg_subdir = pkgname
87
88 pkgoutdir = "%s/%s/%s/%s" % (outdir, arch, pkg_prefix, pkg_subdir)
89 else:
90 pkgoutdir = "%s/%s" % (outdir, arch)
91
92 bb.utils.mkdirhier(pkgoutdir)
93 os.chdir(root)
94 cleanupcontrol(root)
95 from glob import glob
96 g = glob('*')
97 if not g and localdata.getVar('ALLOW_EMPTY') != "1":
98 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
99 bb.utils.unlockfile(lf)
100 continue
101
102 controldir = os.path.join(root, 'CONTROL')
103 bb.utils.mkdirhier(controldir)
104 try:
105 ctrlfile = open(os.path.join(controldir, 'control'), 'w')
106 except OSError:
107 bb.utils.unlockfile(lf)
108 raise bb.build.FuncFailed("unable to open control file for writing.")
109
110 fields = []
111 pe = d.getVar('PKGE', True)
112 if pe and int(pe) > 0:
113 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
114 else:
115 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
116 fields.append(["Description: %s\n", ['DESCRIPTION']])
117 fields.append(["Section: %s\n", ['SECTION']])
118 fields.append(["Priority: %s\n", ['PRIORITY']])
119 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
120 fields.append(["License: %s\n", ['LICENSE']])
121 fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
122 fields.append(["OE: %s\n", ['PN']])
123 if d.getVar('HOMEPAGE', True):
124 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
125
126 def pullData(l, d):
127 l2 = []
128 for i in l:
129 l2.append(d.getVar(i, True))
130 return l2
131
132 ctrlfile.write("Package: %s\n" % pkgname)
133 # check for required fields
134 try:
135 for (c, fs) in fields:
136 for f in fs:
137 if localdata.getVar(f) is None:
138 raise KeyError(f)
139 # Special behavior for description...
140 if 'DESCRIPTION' in fs:
141 summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
142 ctrlfile.write('Description: %s\n' % summary)
143 description = localdata.getVar('DESCRIPTION', True) or "."
144 description = textwrap.dedent(description).strip()
145 if '\\n' in description:
146 # Manually indent
147 for t in description.split('\\n'):
148 # We don't limit the width when manually indent, but we do
149 # need the textwrap.fill() to set the initial_indent and
150 # subsequent_indent, so set a large width
151 ctrlfile.write('%s\n' % textwrap.fill(t.strip(), width=100000, initial_indent=' ', subsequent_indent=' '))
152 else:
153 # Auto indent
154 ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
155 else:
156 ctrlfile.write(c % tuple(pullData(fs, localdata)))
157 except KeyError:
158 import sys
159 (type, value, traceback) = sys.exc_info()
160 ctrlfile.close()
161 bb.utils.unlockfile(lf)
162 raise bb.build.FuncFailed("Missing field for ipk generation: %s" % value)
163 # more fields
164
165 custom_fields_chunk = get_package_additional_metadata("ipk", localdata)
166 if custom_fields_chunk is not None:
167 ctrlfile.write(custom_fields_chunk)
168 ctrlfile.write("\n")
169
170 mapping_rename_hook(localdata)
171
172 def debian_cmp_remap(var):
173 # In debian '>' and '<' do not mean what it appears they mean
174 # '<' = less or equal
175 # '>' = greater or equal
176 # adjust these to the '<<' and '>>' equivalents
177 #
178 for dep in var:
179 for i, v in enumerate(var[dep]):
180 if (v or "").startswith("< "):
181 var[dep][i] = var[dep][i].replace("< ", "<< ")
182 elif (v or "").startswith("> "):
183 var[dep][i] = var[dep][i].replace("> ", ">> ")
184
185 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
186 debian_cmp_remap(rdepends)
187 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
188 debian_cmp_remap(rrecommends)
189 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
190 debian_cmp_remap(rsuggests)
191 rprovides = bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES", True) or "")
192 debian_cmp_remap(rprovides)
193 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES", True) or "")
194 debian_cmp_remap(rreplaces)
195 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS", True) or "")
196 debian_cmp_remap(rconflicts)
197
198 if rdepends:
199 ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
200 if rsuggests:
201 ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
202 if rrecommends:
203 ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
204 if rprovides:
205 ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
206 if rreplaces:
207 ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
208 if rconflicts:
209 ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
210 src_uri = localdata.getVar("SRC_URI", True).strip() or "None"
211 if src_uri:
212 src_uri = re.sub("\s+", " ", src_uri)
213 ctrlfile.write("Source: %s\n" % " ".join(src_uri.split()))
214 ctrlfile.close()
215
216 for script in ["preinst", "postinst", "prerm", "postrm"]:
217 scriptvar = localdata.getVar('pkg_%s' % script, True)
218 if not scriptvar:
219 continue
220 try:
221 scriptfile = open(os.path.join(controldir, script), 'w')
222 except OSError:
223 bb.utils.unlockfile(lf)
224 raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
225 scriptfile.write(scriptvar)
226 scriptfile.close()
227 os.chmod(os.path.join(controldir, script), 0755)
228
229 conffiles_str = localdata.getVar("CONFFILES", True)
230 if conffiles_str:
231 try:
232 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
233 except OSError:
234 bb.utils.unlockfile(lf)
235 raise bb.build.FuncFailed("unable to open conffiles for writing.")
236 for f in conffiles_str.split():
237 if os.path.exists(oe.path.join(root, f)):
238 conffiles.write('%s\n' % f)
239 conffiles.close()
240
241 os.chdir(basedir)
242 ret = subprocess.call("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH", True),
243 d.getVar("OPKGBUILDCMD",1), pkg, pkgoutdir), shell=True)
244 if ret != 0:
245 bb.utils.unlockfile(lf)
246 raise bb.build.FuncFailed("opkg-build execution failed")
247
248 cleanupcontrol(root)
249 bb.utils.unlockfile(lf)
250
251}
252
253SSTATETASKS += "do_package_write_ipk"
254do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
255do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
256
257python do_package_write_ipk_setscene () {
258 tmpdir = d.getVar('TMPDIR', True)
259
260 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
261 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
262
263 sstate_setscene(d)
264}
265addtask do_package_write_ipk_setscene
266
267python () {
268 if d.getVar('PACKAGES', True) != '':
269 deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
270 d.appendVarFlag('do_package_write_ipk', 'depends', deps)
271 d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
272}
273
274python do_package_write_ipk () {
275 bb.build.exec_func("read_subpackage_metadata", d)
276 bb.build.exec_func("do_package_ipk", d)
277}
278do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
279do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
280do_package_write_ipk[umask] = "022"
281addtask package_write_ipk after do_packagedata do_package
282
283PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
284PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
285
286do_build[recrdeptask] += "do_package_write_ipk"
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
new file mode 100644
index 0000000000..92ddf7a30f
--- /dev/null
+++ b/meta/classes/package_rpm.bbclass
@@ -0,0 +1,754 @@
1inherit package
2
3IMAGE_PKGTYPE ?= "rpm"
4
5RPM="rpm"
6RPMBUILD="rpmbuild"
7
8PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
9
10# Maintaining the perfile dependencies has singificant overhead when writing the
11# packages. When set, this value merges them for efficiency.
12MERGEPERFILEDEPS = "1"
13
14# Construct per file dependencies file
15def write_rpm_perfiledata(srcname, d):
16 workdir = d.getVar('WORKDIR', True)
17 packages = d.getVar('PACKAGES', True)
18 pkgd = d.getVar('PKGD', True)
19
20 def dump_filerdeps(varname, outfile, d):
21 outfile.write("#!/usr/bin/env python\n\n")
22 outfile.write("# Dependency table\n")
23 outfile.write('deps = {\n')
24 for pkg in packages.split():
25 dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
26 dependsflist = (d.getVar(dependsflist_key, True) or "")
27 for dfile in dependsflist.split():
28 key = "FILE" + varname + "_" + dfile + "_" + pkg
29 depends_dict = bb.utils.explode_dep_versions(d.getVar(key, True) or "")
30 file = dfile.replace("@underscore@", "_")
31 file = file.replace("@closebrace@", "]")
32 file = file.replace("@openbrace@", "[")
33 file = file.replace("@tab@", "\t")
34 file = file.replace("@space@", " ")
35 file = file.replace("@at@", "@")
36 outfile.write('"' + pkgd + file + '" : "')
37 for dep in depends_dict:
38 ver = depends_dict[dep]
39 if dep and ver:
40 ver = ver.replace("(","")
41 ver = ver.replace(")","")
42 outfile.write(dep + " " + ver + " ")
43 else:
44 outfile.write(dep + " ")
45 outfile.write('",\n')
46 outfile.write('}\n\n')
47 outfile.write("import sys\n")
48 outfile.write("while 1:\n")
49 outfile.write("\tline = sys.stdin.readline().strip()\n")
50 outfile.write("\tif not line:\n")
51 outfile.write("\t\tsys.exit(0)\n")
52 outfile.write("\tif line in deps:\n")
53 outfile.write("\t\tprint(deps[line] + '\\n')\n")
54
55 # OE-core dependencies a.k.a. RPM requires
56 outdepends = workdir + "/" + srcname + ".requires"
57
58 try:
59 dependsfile = open(outdepends, 'w')
60 except OSError:
61 raise bb.build.FuncFailed("unable to open spec file for writing.")
62
63 dump_filerdeps('RDEPENDS', dependsfile, d)
64
65 dependsfile.close()
66 os.chmod(outdepends, 0755)
67
68 # OE-core / RPM Provides
69 outprovides = workdir + "/" + srcname + ".provides"
70
71 try:
72 providesfile = open(outprovides, 'w')
73 except OSError:
74 raise bb.build.FuncFailed("unable to open spec file for writing.")
75
76 dump_filerdeps('RPROVIDES', providesfile, d)
77
78 providesfile.close()
79 os.chmod(outprovides, 0755)
80
81 return (outdepends, outprovides)
82
83
84python write_specfile () {
85 import oe.packagedata
86
87 # append information for logs and patches to %prep
88 def add_prep(d,spec_files_bottom):
89 if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
90 spec_files_bottom.append('%%prep -n %s' % d.getVar('PN', True) )
91 spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
92 spec_files_bottom.append('')
93
94 # append the name of tarball to key word 'SOURCE' in xxx.spec.
95 def tail_source(d):
96 if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
97 ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
98 if not os.path.exists(ar_outdir):
99 return
100 source_list = os.listdir(ar_outdir)
101 source_number = 0
102 for source in source_list:
103 # The rpmbuild doesn't need the root permission, but it needs
104 # to know the file's user and group name, the only user and
105 # group in fakeroot is "root" when working in fakeroot.
106 f = os.path.join(ar_outdir, source)
107 os.chown(f, 0, 0)
108 spec_preamble_top.append('Source%s: %s' % (source_number, source))
109 source_number += 1
110 # We need a simple way to remove the MLPREFIX from the package name,
111 # and dependency information...
112 def strip_multilib(name, d):
113 multilibs = d.getVar('MULTILIBS', True) or ""
114 for ext in multilibs.split():
115 eext = ext.split(':')
116 if len(eext) > 1 and eext[0] == 'multilib' and name and name.find(eext[1] + '-') >= 0:
117 name = "".join(name.split(eext[1] + '-'))
118 return name
119
120 def strip_multilib_deps(deps, d):
121 depends = bb.utils.explode_dep_versions2(deps or "")
122 newdeps = {}
123 for dep in depends:
124 newdeps[strip_multilib(dep, d)] = depends[dep]
125 return bb.utils.join_deps(newdeps)
126
127# ml = d.getVar("MLPREFIX", True)
128# if ml and name and len(ml) != 0 and name.find(ml) == 0:
129# return ml.join(name.split(ml, 1)[1:])
130# return name
131
132 # In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
133 # This format is similar to OE, however there are restrictions on the
134 # characters that can be in a field. In the Version field, "-"
135 # characters are not allowed. "-" is allowed in the Release field.
136 #
137 # We translate the "-" in the version to a "+", by loading the PKGV
138 # from the dependent recipe, replacing the - with a +, and then using
139 # that value to do a replace inside of this recipe's dependencies.
140 # This preserves the "-" separator between the version and release, as
141 # well as any "-" characters inside of the release field.
142 #
143 # All of this has to happen BEFORE the mapping_rename_hook as
144 # after renaming we cannot look up the dependencies in the packagedata
145 # store.
146 def translate_vers(varname, d):
147 depends = d.getVar(varname, True)
148 if depends:
149 depends_dict = bb.utils.explode_dep_versions2(depends)
150 newdeps_dict = {}
151 for dep in depends_dict:
152 verlist = []
153 for ver in depends_dict[dep]:
154 if '-' in ver:
155 subd = oe.packagedata.read_subpkgdata_dict(dep, d)
156 if 'PKGV' in subd:
157 pv = subd['PV']
158 pkgv = subd['PKGV']
159 reppv = pkgv.replace('-', '+')
160 ver = ver.replace(pv, reppv).replace(pkgv, reppv)
161 if 'PKGR' in subd:
162 # Make sure PKGR rather than PR in ver
163 pr = '-' + subd['PR']
164 pkgr = '-' + subd['PKGR']
165 if pkgr not in ver:
166 ver = ver.replace(pr, pkgr)
167 verlist.append(ver)
168 else:
169 verlist.append(ver)
170 newdeps_dict[dep] = verlist
171 depends = bb.utils.join_deps(newdeps_dict)
172 d.setVar(varname, depends.strip())
173
174 # We need to change the style the dependency from BB to RPM
175 # This needs to happen AFTER the mapping_rename_hook
176 def print_deps(variable, tag, array, d):
177 depends = variable
178 if depends:
179 depends_dict = bb.utils.explode_dep_versions2(depends)
180 for dep in depends_dict:
181 for ver in depends_dict[dep]:
182 ver = ver.replace('(', '')
183 ver = ver.replace(')', '')
184 array.append("%s: %s %s" % (tag, dep, ver))
185 if not len(depends_dict[dep]):
186 array.append("%s: %s" % (tag, dep))
187
188 def walk_files(walkpath, target, conffiles, dirfiles):
189 # We can race against the ipk/deb backends which create CONTROL or DEBIAN directories
190 # when packaging. We just ignore these files which are created in
191 # packages-split/ and not package/
192 # We have the odd situation where the CONTROL/DEBIAN directory can be removed in the middle of
193 # of the walk, the isdir() test would then fail and the walk code would assume its a file
194 # hence we check for the names in files too.
195 for rootpath, dirs, files in os.walk(walkpath):
196 path = rootpath.replace(walkpath, "")
197 if path.endswith("DEBIAN") or path.endswith("CONTROL"):
198 continue
199
200 # Directory handling can happen in two ways, either DIRFILES is not set at all
201 # in which case we fall back to the older behaviour of packages owning all their
202 # directories
203 if dirfiles is None:
204 for dir in dirs:
205 if dir == "CONTROL" or dir == "DEBIAN":
206 continue
207 # All packages own the directories their files are in...
208 target.append('%dir "' + path + '/' + dir + '"')
209 else:
210 # packages own only empty directories or explict directory.
211 # This will prevent the overlapping of security permission.
212 if path and not files and not dirs:
213 target.append('%dir "' + path + '"')
214 elif path and path in dirfiles:
215 target.append('%dir "' + path + '"')
216
217 for file in files:
218 if file == "CONTROL" or file == "DEBIAN":
219 continue
220 if conffiles.count(path + '/' + file):
221 target.append('%config "' + path + '/' + file + '"')
222 else:
223 target.append('"' + path + '/' + file + '"')
224
225 # Prevent the prerm/postrm scripts from being run during an upgrade
226 def wrap_uninstall(scriptvar):
227 scr = scriptvar.strip()
228 if scr.startswith("#!"):
229 pos = scr.find("\n") + 1
230 else:
231 pos = 0
232 scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi'
233 return scr
234
235 def get_perfile(varname, pkg, d):
236 deps = []
237 dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
238 dependsflist = (d.getVar(dependsflist_key, True) or "")
239 for dfile in dependsflist.split():
240 key = "FILE" + varname + "_" + dfile + "_" + pkg
241 depends = d.getVar(key, True)
242 if depends:
243 deps.append(depends)
244 return " ".join(deps)
245
246 def append_description(spec_preamble, text):
247 """
248 Add the description to the spec file.
249 """
250 import textwrap
251 dedent_text = textwrap.dedent(text).strip()
252 # Bitbake saves "\n" as "\\n"
253 if '\\n' in dedent_text:
254 for t in dedent_text.split('\\n'):
255 spec_preamble.append(t.strip())
256 else:
257 spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
258
259 packages = d.getVar('PACKAGES', True)
260 if not packages or packages == '':
261 bb.debug(1, "No packages; nothing to do")
262 return
263
264 pkgdest = d.getVar('PKGDEST', True)
265 if not pkgdest:
266 bb.fatal("No PKGDEST")
267
268 outspecfile = d.getVar('OUTSPECFILE', True)
269 if not outspecfile:
270 bb.fatal("No OUTSPECFILE")
271
272 # Construct the SPEC file...
273 srcname = strip_multilib(d.getVar('PN', True), d)
274 srcsummary = (d.getVar('SUMMARY', True) or d.getVar('DESCRIPTION', True) or ".")
275 srcversion = d.getVar('PKGV', True).replace('-', '+')
276 srcrelease = d.getVar('PKGR', True)
277 srcepoch = (d.getVar('PKGE', True) or "")
278 srclicense = d.getVar('LICENSE', True)
279 srcsection = d.getVar('SECTION', True)
280 srcmaintainer = d.getVar('MAINTAINER', True)
281 srchomepage = d.getVar('HOMEPAGE', True)
282 srcdescription = d.getVar('DESCRIPTION', True) or "."
283 srccustomtagschunk = get_package_additional_metadata("rpm", d)
284
285 srcdepends = strip_multilib_deps(d.getVar('DEPENDS', True), d)
286 srcrdepends = []
287 srcrrecommends = []
288 srcrsuggests = []
289 srcrprovides = []
290 srcrreplaces = []
291 srcrconflicts = []
292 srcrobsoletes = []
293
294 srcrpreinst = []
295 srcrpostinst = []
296 srcrprerm = []
297 srcrpostrm = []
298
299 spec_preamble_top = []
300 spec_preamble_bottom = []
301
302 spec_scriptlets_top = []
303 spec_scriptlets_bottom = []
304
305 spec_files_top = []
306 spec_files_bottom = []
307
308 perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
309 extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA", True) or "0") == "1"
310
311 for pkg in packages.split():
312 localdata = bb.data.createCopy(d)
313
314 root = "%s/%s" % (pkgdest, pkg)
315
316 localdata.setVar('ROOT', '')
317 localdata.setVar('ROOT_%s' % pkg, root)
318 pkgname = localdata.getVar('PKG_%s' % pkg, True)
319 if not pkgname:
320 pkgname = pkg
321 localdata.setVar('PKG', pkgname)
322
323 localdata.setVar('OVERRIDES', pkg)
324
325 bb.data.update_data(localdata)
326
327 conffiles = (localdata.getVar('CONFFILES', True) or "").split()
328 dirfiles = localdata.getVar('DIRFILES', True)
329 if dirfiles is not None:
330 dirfiles = dirfiles.split()
331
332 splitname = strip_multilib(pkgname, d)
333
334 splitsummary = (localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or ".")
335 splitversion = (localdata.getVar('PKGV', True) or "").replace('-', '+')
336 splitrelease = (localdata.getVar('PKGR', True) or "")
337 splitepoch = (localdata.getVar('PKGE', True) or "")
338 splitlicense = (localdata.getVar('LICENSE', True) or "")
339 splitsection = (localdata.getVar('SECTION', True) or "")
340 splitdescription = (localdata.getVar('DESCRIPTION', True) or ".")
341 splitcustomtagschunk = get_package_additional_metadata("rpm", localdata)
342
343 translate_vers('RDEPENDS', localdata)
344 translate_vers('RRECOMMENDS', localdata)
345 translate_vers('RSUGGESTS', localdata)
346 translate_vers('RPROVIDES', localdata)
347 translate_vers('RREPLACES', localdata)
348 translate_vers('RCONFLICTS', localdata)
349
350 # Map the dependencies into their final form
351 mapping_rename_hook(localdata)
352
353 splitrdepends = strip_multilib_deps(localdata.getVar('RDEPENDS', True), d)
354 splitrrecommends = strip_multilib_deps(localdata.getVar('RRECOMMENDS', True), d)
355 splitrsuggests = strip_multilib_deps(localdata.getVar('RSUGGESTS', True), d)
356 splitrprovides = strip_multilib_deps(localdata.getVar('RPROVIDES', True), d)
357 splitrreplaces = strip_multilib_deps(localdata.getVar('RREPLACES', True), d)
358 splitrconflicts = strip_multilib_deps(localdata.getVar('RCONFLICTS', True), d)
359 splitrobsoletes = []
360
361 splitrpreinst = localdata.getVar('pkg_preinst', True)
362 splitrpostinst = localdata.getVar('pkg_postinst', True)
363 splitrprerm = localdata.getVar('pkg_prerm', True)
364 splitrpostrm = localdata.getVar('pkg_postrm', True)
365
366
367 if not perfiledeps:
368 # Add in summary of per file dependencies
369 splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
370 splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
371
372 # Gather special src/first package data
373 if srcname == splitname:
374 srcrdepends = splitrdepends
375 srcrrecommends = splitrrecommends
376 srcrsuggests = splitrsuggests
377 srcrprovides = splitrprovides
378 srcrreplaces = splitrreplaces
379 srcrconflicts = splitrconflicts
380
381 srcrpreinst = splitrpreinst
382 srcrpostinst = splitrpostinst
383 srcrprerm = splitrprerm
384 srcrpostrm = splitrpostrm
385
386 file_list = []
387 walk_files(root, file_list, conffiles, dirfiles)
388 if not file_list and localdata.getVar('ALLOW_EMPTY') != "1":
389 bb.note("Not creating empty RPM package for %s" % splitname)
390 else:
391 bb.note("Creating RPM package for %s" % splitname)
392 spec_files_top.append('%files')
393 if extra_pkgdata:
394 package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
395 spec_files_top.append('%defattr(-,-,-,-)')
396 if file_list:
397 bb.note("Creating RPM package for %s" % splitname)
398 spec_files_top.extend(file_list)
399 else:
400 bb.note("Creating EMPTY RPM Package for %s" % splitname)
401 spec_files_top.append('')
402 continue
403
404 # Process subpackage data
405 spec_preamble_bottom.append('%%package -n %s' % splitname)
406 spec_preamble_bottom.append('Summary: %s' % splitsummary)
407 if srcversion != splitversion:
408 spec_preamble_bottom.append('Version: %s' % splitversion)
409 if srcrelease != splitrelease:
410 spec_preamble_bottom.append('Release: %s' % splitrelease)
411 if srcepoch != splitepoch:
412 spec_preamble_bottom.append('Epoch: %s' % splitepoch)
413 if srclicense != splitlicense:
414 spec_preamble_bottom.append('License: %s' % splitlicense)
415 spec_preamble_bottom.append('Group: %s' % splitsection)
416
417 if srccustomtagschunk != splitcustomtagschunk:
418 spec_preamble_bottom.append(splitcustomtagschunk)
419
420 # Replaces == Obsoletes && Provides
421 robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes or "")
422 rprovides = bb.utils.explode_dep_versions2(splitrprovides or "")
423 rreplaces = bb.utils.explode_dep_versions2(splitrreplaces or "")
424 for dep in rreplaces:
425 if not dep in robsoletes:
426 robsoletes[dep] = rreplaces[dep]
427 if not dep in rprovides:
428 rprovides[dep] = rreplaces[dep]
429 splitrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
430 splitrprovides = bb.utils.join_deps(rprovides, commasep=False)
431
432 print_deps(splitrdepends, "Requires", spec_preamble_bottom, d)
433 if splitrpreinst:
434 print_deps(splitrdepends, "Requires(pre)", spec_preamble_bottom, d)
435 if splitrpostinst:
436 print_deps(splitrdepends, "Requires(post)", spec_preamble_bottom, d)
437 if splitrprerm:
438 print_deps(splitrdepends, "Requires(preun)", spec_preamble_bottom, d)
439 if splitrpostrm:
440 print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
441
442 # Suggests in RPM are like recommends in OE-core!
443 print_deps(splitrrecommends, "Suggests", spec_preamble_bottom, d)
444 # While there is no analog for suggests... (So call them recommends for now)
445 print_deps(splitrsuggests, "Recommends", spec_preamble_bottom, d)
446 print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
447 print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
448
449 # conflicts can not be in a provide! We will need to filter it.
450 if splitrconflicts:
451 depends_dict = bb.utils.explode_dep_versions2(splitrconflicts)
452 newdeps_dict = {}
453 for dep in depends_dict:
454 if dep not in splitrprovides:
455 newdeps_dict[dep] = depends_dict[dep]
456 if newdeps_dict:
457 splitrconflicts = bb.utils.join_deps(newdeps_dict)
458 else:
459 splitrconflicts = ""
460
461 print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
462
463 spec_preamble_bottom.append('')
464
465 spec_preamble_bottom.append('%%description -n %s' % splitname)
466 append_description(spec_preamble_bottom, splitdescription)
467
468 spec_preamble_bottom.append('')
469
470 # Now process scriptlets
471 if splitrpreinst:
472 spec_scriptlets_bottom.append('%%pre -n %s' % splitname)
473 spec_scriptlets_bottom.append('# %s - preinst' % splitname)
474 spec_scriptlets_bottom.append(splitrpreinst)
475 spec_scriptlets_bottom.append('')
476 if splitrpostinst:
477 spec_scriptlets_bottom.append('%%post -n %s' % splitname)
478 spec_scriptlets_bottom.append('# %s - postinst' % splitname)
479 spec_scriptlets_bottom.append(splitrpostinst)
480 spec_scriptlets_bottom.append('')
481 if splitrprerm:
482 spec_scriptlets_bottom.append('%%preun -n %s' % splitname)
483 spec_scriptlets_bottom.append('# %s - prerm' % splitname)
484 scriptvar = wrap_uninstall(splitrprerm)
485 spec_scriptlets_bottom.append(scriptvar)
486 spec_scriptlets_bottom.append('')
487 if splitrpostrm:
488 spec_scriptlets_bottom.append('%%postun -n %s' % splitname)
489 spec_scriptlets_bottom.append('# %s - postrm' % splitname)
490 scriptvar = wrap_uninstall(splitrpostrm)
491 spec_scriptlets_bottom.append(scriptvar)
492 spec_scriptlets_bottom.append('')
493
494 # Now process files
495 file_list = []
496 walk_files(root, file_list, conffiles, dirfiles)
497 if not file_list and localdata.getVar('ALLOW_EMPTY') != "1":
498 bb.note("Not creating empty RPM package for %s" % splitname)
499 else:
500 spec_files_bottom.append('%%files -n %s' % splitname)
501 if extra_pkgdata:
502 package_rpm_extra_pkgdata(splitname, spec_files_bottom, localdata)
503 spec_files_bottom.append('%defattr(-,-,-,-)')
504 if file_list:
505 bb.note("Creating RPM package for %s" % splitname)
506 spec_files_bottom.extend(file_list)
507 else:
508 bb.note("Creating EMPTY RPM Package for %s" % splitname)
509 spec_files_bottom.append('')
510
511 del localdata
512
513 add_prep(d,spec_files_bottom)
514 spec_preamble_top.append('Summary: %s' % srcsummary)
515 spec_preamble_top.append('Name: %s' % srcname)
516 spec_preamble_top.append('Version: %s' % srcversion)
517 spec_preamble_top.append('Release: %s' % srcrelease)
518 if srcepoch and srcepoch.strip() != "":
519 spec_preamble_top.append('Epoch: %s' % srcepoch)
520 spec_preamble_top.append('License: %s' % srclicense)
521 spec_preamble_top.append('Group: %s' % srcsection)
522 spec_preamble_top.append('Packager: %s' % srcmaintainer)
523 if srchomepage:
524 spec_preamble_top.append('URL: %s' % srchomepage)
525 if srccustomtagschunk:
526 spec_preamble_top.append(srccustomtagschunk)
527 tail_source(d)
528
529 # Replaces == Obsoletes && Provides
530 robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes or "")
531 rprovides = bb.utils.explode_dep_versions2(srcrprovides or "")
532 rreplaces = bb.utils.explode_dep_versions2(srcrreplaces or "")
533 for dep in rreplaces:
534 if not dep in robsoletes:
535 robsoletes[dep] = rreplaces[dep]
536 if not dep in rprovides:
537 rprovides[dep] = rreplaces[dep]
538 srcrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
539 srcrprovides = bb.utils.join_deps(rprovides, commasep=False)
540
541 print_deps(srcdepends, "BuildRequires", spec_preamble_top, d)
542 print_deps(srcrdepends, "Requires", spec_preamble_top, d)
543 if srcrpreinst:
544 print_deps(srcrdepends, "Requires(pre)", spec_preamble_top, d)
545 if srcrpostinst:
546 print_deps(srcrdepends, "Requires(post)", spec_preamble_top, d)
547 if srcrprerm:
548 print_deps(srcrdepends, "Requires(preun)", spec_preamble_top, d)
549 if srcrpostrm:
550 print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
551
552 # Suggests in RPM are like recommends in OE-core!
553 print_deps(srcrrecommends, "Suggests", spec_preamble_top, d)
554 # While there is no analog for suggests... (So call them recommends for now)
555 print_deps(srcrsuggests, "Recommends", spec_preamble_top, d)
556 print_deps(srcrprovides, "Provides", spec_preamble_top, d)
557 print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
558
559 # conflicts can not be in a provide! We will need to filter it.
560 if srcrconflicts:
561 depends_dict = bb.utils.explode_dep_versions2(srcrconflicts)
562 newdeps_dict = {}
563 for dep in depends_dict:
564 if dep not in srcrprovides:
565 newdeps_dict[dep] = depends_dict[dep]
566 if newdeps_dict:
567 srcrconflicts = bb.utils.join_deps(newdeps_dict)
568 else:
569 srcrconflicts = ""
570
571 print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
572
573 spec_preamble_top.append('')
574
575 spec_preamble_top.append('%description')
576 append_description(spec_preamble_top, srcdescription)
577
578 spec_preamble_top.append('')
579
580 if srcrpreinst:
581 spec_scriptlets_top.append('%pre')
582 spec_scriptlets_top.append('# %s - preinst' % srcname)
583 spec_scriptlets_top.append(srcrpreinst)
584 spec_scriptlets_top.append('')
585 if srcrpostinst:
586 spec_scriptlets_top.append('%post')
587 spec_scriptlets_top.append('# %s - postinst' % srcname)
588 spec_scriptlets_top.append(srcrpostinst)
589 spec_scriptlets_top.append('')
590 if srcrprerm:
591 spec_scriptlets_top.append('%preun')
592 spec_scriptlets_top.append('# %s - prerm' % srcname)
593 scriptvar = wrap_uninstall(srcrprerm)
594 spec_scriptlets_top.append(scriptvar)
595 spec_scriptlets_top.append('')
596 if srcrpostrm:
597 spec_scriptlets_top.append('%postun')
598 spec_scriptlets_top.append('# %s - postrm' % srcname)
599 scriptvar = wrap_uninstall(srcrpostrm)
600 spec_scriptlets_top.append(scriptvar)
601 spec_scriptlets_top.append('')
602
603 # Write the SPEC file
604 try:
605 specfile = open(outspecfile, 'w')
606 except OSError:
607 raise bb.build.FuncFailed("unable to open spec file for writing.")
608
609 # RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
610 # of the generated spec file
611 external_preamble = d.getVar("RPMSPEC_PREAMBLE", True)
612 if external_preamble:
613 specfile.write(external_preamble + "\n")
614
615 for line in spec_preamble_top:
616 specfile.write(line + "\n")
617
618 for line in spec_preamble_bottom:
619 specfile.write(line + "\n")
620
621 for line in spec_scriptlets_top:
622 specfile.write(line + "\n")
623
624 for line in spec_scriptlets_bottom:
625 specfile.write(line + "\n")
626
627 for line in spec_files_top:
628 specfile.write(line + "\n")
629
630 for line in spec_files_bottom:
631 specfile.write(line + "\n")
632
633 specfile.close()
634}
635
636python do_package_rpm () {
637 # We need a simple way to remove the MLPREFIX from the package name,
638 # and dependency information...
639 def strip_multilib(name, d):
640 ml = d.getVar("MLPREFIX", True)
641 if ml and name and len(ml) != 0 and name.find(ml) >= 0:
642 return "".join(name.split(ml))
643 return name
644
645 workdir = d.getVar('WORKDIR', True)
646 tmpdir = d.getVar('TMPDIR', True)
647 pkgd = d.getVar('PKGD', True)
648 pkgdest = d.getVar('PKGDEST', True)
649 if not workdir or not pkgd or not tmpdir:
650 bb.error("Variables incorrectly set, unable to package")
651 return
652
653 packages = d.getVar('PACKAGES', True)
654 if not packages or packages == '':
655 bb.debug(1, "No packages; nothing to do")
656 return
657
658 # Construct the spec file...
659 # If the spec file already exist, and has not been stored into
660 # pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
661 # so remove it before doing rpmbuild src.rpm.
662 srcname = strip_multilib(d.getVar('PN', True), d)
663 outspecfile = workdir + "/" + srcname + ".spec"
664 if os.path.isfile(outspecfile):
665 os.remove(outspecfile)
666 d.setVar('OUTSPECFILE', outspecfile)
667 bb.build.exec_func('write_specfile', d)
668
669 perfiledeps = (d.getVar("MERGEPERFILEDEPS", True) or "0") == "0"
670 if perfiledeps:
671 outdepends, outprovides = write_rpm_perfiledata(srcname, d)
672
673 # Setup the rpmbuild arguments...
674 rpmbuild = d.getVar('RPMBUILD', True)
675 targetsys = d.getVar('TARGET_SYS', True)
676 targetvendor = d.getVar('HOST_VENDOR', True)
677 package_arch = (d.getVar('PACKAGE_ARCH', True) or "").replace("-", "_")
678 sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX', True) or "nativesdk").replace("-", "_")
679 if package_arch not in "all any noarch".split() and not package_arch.endswith(sdkpkgsuffix):
680 ml_prefix = (d.getVar('MLPREFIX', True) or "").replace("-", "_")
681 d.setVar('PACKAGE_ARCH_EXTEND', ml_prefix + package_arch)
682 else:
683 d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
684 pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
685 pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-${HOST_OS}')
686 magicfile = d.expand('${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc')
687 bb.utils.mkdirhier(pkgwritedir)
688 os.chmod(pkgwritedir, 0755)
689
690 cmd = rpmbuild
691 cmd = cmd + " --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
692 cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
693 cmd = cmd + " --define '_builddir " + d.getVar('S', True) + "'"
694 cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
695 cmd = cmd + " --define '_use_internal_dependency_generator 0'"
696 if perfiledeps:
697 cmd = cmd + " --define '__find_requires " + outdepends + "'"
698 cmd = cmd + " --define '__find_provides " + outprovides + "'"
699 else:
700 cmd = cmd + " --define '__find_requires %{nil}'"
701 cmd = cmd + " --define '__find_provides %{nil}'"
702 cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
703 cmd = cmd + " --define 'debug_package %{nil}'"
704 cmd = cmd + " --define '_rpmfc_magic_path " + magicfile + "'"
705 cmd = cmd + " --define '_tmppath " + workdir + "'"
706 if d.getVarFlag('ARCHIVER_MODE', 'srpm', True) == '1' and bb.data.inherits_class('archiver', d):
707 cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR', True) + "'"
708 cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_OUTDIR', True) + "'"
709 cmdsrpm = cmdsrpm + " -bs " + outspecfile
710 # Build the .src.rpm
711 d.setVar('SBUILDSPEC', cmdsrpm + "\n")
712 d.setVarFlag('SBUILDSPEC', 'func', '1')
713 bb.build.exec_func('SBUILDSPEC', d)
714 cmd = cmd + " -bb " + outspecfile
715
716 # Build the rpm package!
717 d.setVar('BUILDSPEC', cmd + "\n")
718 d.setVarFlag('BUILDSPEC', 'func', '1')
719 bb.build.exec_func('BUILDSPEC', d)
720}
721
722python () {
723 if d.getVar('PACKAGES', True) != '':
724 deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
725 d.appendVarFlag('do_package_write_rpm', 'depends', deps)
726 d.setVarFlag('do_package_write_rpm', 'fakeroot', 1)
727}
728
729SSTATETASKS += "do_package_write_rpm"
730do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}"
731do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}"
732# Take a shared lock, we can write multiple packages at the same time...
733# but we need to stop the rootfs/solver from running while we do...
734do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock"
735
736python do_package_write_rpm_setscene () {
737 sstate_setscene(d)
738}
739addtask do_package_write_rpm_setscene
740
741python do_package_write_rpm () {
742 bb.build.exec_func("read_subpackage_metadata", d)
743 bb.build.exec_func("do_package_rpm", d)
744}
745
746do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
747do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
748do_package_write_rpm[umask] = "022"
749addtask package_write_rpm after do_packagedata do_package
750
751PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
752PACKAGEINDEXDEPS += "createrepo-native:do_populate_sysroot"
753
754do_build[recrdeptask] += "do_package_write_rpm"
diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass
new file mode 100644
index 0000000000..fed2c28b69
--- /dev/null
+++ b/meta/classes/package_tar.bbclass
@@ -0,0 +1,69 @@
1inherit package
2
3IMAGE_PKGTYPE ?= "tar"
4
5python do_package_tar () {
6 import subprocess
7 workdir = d.getVar('WORKDIR', True)
8 if not workdir:
9 bb.error("WORKDIR not defined, unable to package")
10 return
11
12 outdir = d.getVar('DEPLOY_DIR_TAR', True)
13 if not outdir:
14 bb.error("DEPLOY_DIR_TAR not defined, unable to package")
15 return
16
17 dvar = d.getVar('D', True)
18 if not dvar:
19 bb.error("D not defined, unable to package")
20 return
21
22 packages = d.getVar('PACKAGES', True)
23 if not packages:
24 bb.debug(1, "PACKAGES not defined, nothing to package")
25 return
26
27 pkgdest = d.getVar('PKGDEST', True)
28
29 bb.utils.mkdirhier(outdir)
30 bb.utils.mkdirhier(dvar)
31
32 for pkg in packages.split():
33 localdata = bb.data.createCopy(d)
34 root = "%s/%s" % (pkgdest, pkg)
35
36 overrides = localdata.getVar('OVERRIDES')
37 localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg))
38 bb.data.update_data(localdata)
39
40 bb.utils.mkdirhier(root)
41 basedir = os.path.dirname(root)
42 tarfn = localdata.expand("${DEPLOY_DIR_TAR}/${PKG}-${PKGV}-${PKGR}.tar.gz")
43 os.chdir(root)
44 dlist = os.listdir(root)
45 if not dlist:
46 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
47 continue
48 args = "tar -cz --exclude=CONTROL --exclude=DEBIAN -f".split()
49 ret = subprocess.call(args + [tarfn] + dlist)
50 if ret != 0:
51 bb.error("Creation of tar %s failed." % tarfn)
52}
53
54python () {
55 if d.getVar('PACKAGES', True) != '':
56 deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split()
57 deps.append('tar-native:do_populate_sysroot')
58 deps.append('virtual/fakeroot-native:do_populate_sysroot')
59 d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
60 d.setVarFlag('do_package_write_tar', 'fakeroot', "1")
61}
62
63
64python do_package_write_tar () {
65 bb.build.exec_func("read_subpackage_metadata", d)
66 bb.build.exec_func("do_package_tar", d)
67}
68do_package_write_tar[dirs] = "${D}"
69addtask package_write_tar before do_build after do_packagedata do_package
diff --git a/meta/classes/packagedata.bbclass b/meta/classes/packagedata.bbclass
new file mode 100644
index 0000000000..d1aedf2289
--- /dev/null
+++ b/meta/classes/packagedata.bbclass
@@ -0,0 +1,26 @@
1python read_subpackage_metadata () {
2 import oe.packagedata
3
4 vars = {
5 "PN" : d.getVar('PN', True),
6 "PE" : d.getVar('PE', True),
7 "PV" : d.getVar('PV', True),
8 "PR" : d.getVar('PR', True),
9 }
10
11 data = oe.packagedata.read_pkgdata(vars["PN"], d)
12
13 for key in data.keys():
14 d.setVar(key, data[key])
15
16 for pkg in d.getVar('PACKAGES', True).split():
17 sdata = oe.packagedata.read_subpkgdata(pkg, d)
18 for key in sdata.keys():
19 if key in vars:
20 if sdata[key] != vars[key]:
21 if key == "PN":
22 bb.fatal("Recipe %s is trying to create package %s which was already written by recipe %s. This will cause corruption, please resolve this and only provide the package from one recipe or the other or only build one of the recipes." % (vars[key], pkg, sdata[key]))
23 bb.fatal("Recipe %s is trying to change %s from '%s' to '%s'. This will cause do_package_write_* failures since the incorrect data will be used and they will be unable to find the right workdir." % (vars["PN"], key, vars[key], sdata[key]))
24 continue
25 d.setVar(key, sdata[key])
26}
diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass
new file mode 100644
index 0000000000..56cfead82a
--- /dev/null
+++ b/meta/classes/packagegroup.bbclass
@@ -0,0 +1,52 @@
1# Class for packagegroup (package group) recipes
2
3# By default, only the packagegroup package itself is in PACKAGES.
4# -dbg and -dev flavours are handled by the anonfunc below.
5# This means that packagegroup recipes used to build multiple packagegroup
6# packages have to modify PACKAGES after inheriting packagegroup.bbclass.
7PACKAGES = "${PN}"
8
9# By default, packagegroup packages do not depend on a certain architecture.
10# Only if dependencies are modified by MACHINE_FEATURES, packages
11# need to be set to MACHINE_ARCH after inheriting packagegroup.bbclass
12PACKAGE_ARCH ?= "all"
13
14# Fully expanded - so it applies the overrides as well
15PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}"
16
17inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED', True) == 'all', 'allarch', '')}
18
19# This automatically adds -dbg and -dev flavours of all PACKAGES
20# to the list. Their dependencies (RRECOMMENDS) are handled as usual
21# by package_depchains in a following step.
22# Also mark all packages as ALLOW_EMPTY
23python () {
24 packages = d.getVar('PACKAGES', True).split()
25 genpackages = []
26 for pkg in packages:
27 d.setVar("ALLOW_EMPTY_%s" % pkg, "1")
28 for postfix in ['-dbg', '-dev', '-ptest']:
29 genpackages.append(pkg+postfix)
30 if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY', True) != '1':
31 d.setVar('PACKAGES', ' '.join(packages+genpackages))
32}
33
34# We don't want to look at shared library dependencies for the
35# dbg packages
36DEPCHAIN_DBGDEFAULTDEPS = "1"
37
38# We only need the packaging tasks - disable the rest
39do_fetch[noexec] = "1"
40do_unpack[noexec] = "1"
41do_patch[noexec] = "1"
42do_configure[noexec] = "1"
43do_compile[noexec] = "1"
44do_install[noexec] = "1"
45do_populate_sysroot[noexec] = "1"
46
47python () {
48 initman = d.getVar("VIRTUAL-RUNTIME_init_manager", True)
49 if initman and initman in ['sysvinit', 'systemd'] and not bb.utils.contains('DISTRO_FEATURES', initman, True, False, d):
50 bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman)
51}
52
diff --git a/meta/classes/packageinfo.bbclass b/meta/classes/packageinfo.bbclass
new file mode 100644
index 0000000000..7d60ace1dc
--- /dev/null
+++ b/meta/classes/packageinfo.bbclass
@@ -0,0 +1,22 @@
1python packageinfo_handler () {
2 import oe.packagedata
3 pkginfolist = []
4
5 pkgdata_dir = e.data.getVar("PKGDATA_DIR", True) + '/runtime/'
6 if os.path.exists(pkgdata_dir):
7 for root, dirs, files in os.walk(pkgdata_dir):
8 for pkgname in files:
9 if pkgname.endswith('.packaged'):
10 pkgname = pkgname[:-9]
11 pkgdatafile = root + pkgname
12 try:
13 sdata = oe.packagedata.read_pkgdatafile(pkgdatafile)
14 sdata['PKG'] = pkgname
15 pkginfolist.append(sdata)
16 except Exception as e:
17 bb.warn("Failed to read pkgdata file %s: %s: %s" % (pkgdatafile, e.__class__, str(e)))
18 bb.event.fire(bb.event.PackageInfo(pkginfolist), e.data)
19}
20
21addhandler packageinfo_handler
22packageinfo_handler[eventmask] = "bb.event.RequestPackageInfo"
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
new file mode 100644
index 0000000000..1e2aab0418
--- /dev/null
+++ b/meta/classes/patch.bbclass
@@ -0,0 +1,188 @@
1# Copyright (C) 2006 OpenedHand LTD
2
3# Point to an empty file so any user's custom settings don't break things
4QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
5
6PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
7
8inherit terminal
9
10def src_patches(d, all = False ):
11 workdir = d.getVar('WORKDIR', True)
12 fetch = bb.fetch2.Fetch([], d)
13 patches = []
14 sources = []
15 for url in fetch.urls:
16 local = patch_path(url, fetch, workdir)
17 if not local:
18 if all:
19 local = fetch.localpath(url)
20 sources.append(local)
21 continue
22
23 urldata = fetch.ud[url]
24 parm = urldata.parm
25 patchname = parm.get('pname') or os.path.basename(local)
26
27 apply, reason = should_apply(parm, d)
28 if not apply:
29 if reason:
30 bb.note("Patch %s %s" % (patchname, reason))
31 continue
32
33 patchparm = {'patchname': patchname}
34 if "striplevel" in parm:
35 striplevel = parm["striplevel"]
36 elif "pnum" in parm:
37 #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
38 striplevel = parm["pnum"]
39 else:
40 striplevel = '1'
41 patchparm['striplevel'] = striplevel
42
43 patchdir = parm.get('patchdir')
44 if patchdir:
45 patchparm['patchdir'] = patchdir
46
47 localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm))
48 patches.append(localurl)
49
50 if all:
51 return sources
52
53 return patches
54
55def patch_path(url, fetch, workdir):
56 """Return the local path of a patch, or None if this isn't a patch"""
57
58 local = fetch.localpath(url)
59 base, ext = os.path.splitext(os.path.basename(local))
60 if ext in ('.gz', '.bz2', '.Z'):
61 local = os.path.join(workdir, base)
62 ext = os.path.splitext(base)[1]
63
64 urldata = fetch.ud[url]
65 if "apply" in urldata.parm:
66 apply = oe.types.boolean(urldata.parm["apply"])
67 if not apply:
68 return
69 elif ext not in (".diff", ".patch"):
70 return
71
72 return local
73
74def should_apply(parm, d):
75 """Determine if we should apply the given patch"""
76
77 if "mindate" in parm or "maxdate" in parm:
78 pn = d.getVar('PN', True)
79 srcdate = d.getVar('SRCDATE_%s' % pn, True)
80 if not srcdate:
81 srcdate = d.getVar('SRCDATE', True)
82
83 if srcdate == "now":
84 srcdate = d.getVar('DATE', True)
85
86 if "maxdate" in parm and parm["maxdate"] < srcdate:
87 return False, 'is outdated'
88
89 if "mindate" in parm and parm["mindate"] > srcdate:
90 return False, 'is predated'
91
92
93 if "minrev" in parm:
94 srcrev = d.getVar('SRCREV', True)
95 if srcrev and srcrev < parm["minrev"]:
96 return False, 'applies to later revisions'
97
98 if "maxrev" in parm:
99 srcrev = d.getVar('SRCREV', True)
100 if srcrev and srcrev > parm["maxrev"]:
101 return False, 'applies to earlier revisions'
102
103 if "rev" in parm:
104 srcrev = d.getVar('SRCREV', True)
105 if srcrev and parm["rev"] not in srcrev:
106 return False, "doesn't apply to revision"
107
108 if "notrev" in parm:
109 srcrev = d.getVar('SRCREV', True)
110 if srcrev and parm["notrev"] in srcrev:
111 return False, "doesn't apply to revision"
112
113 return True, None
114
115should_apply[vardepsexclude] = "DATE SRCDATE"
116
117python patch_do_patch() {
118 import oe.patch
119
120 patchsetmap = {
121 "patch": oe.patch.PatchTree,
122 "quilt": oe.patch.QuiltTree,
123 "git": oe.patch.GitApplyTree,
124 }
125
126 cls = patchsetmap[d.getVar('PATCHTOOL', True) or 'quilt']
127
128 resolvermap = {
129 "noop": oe.patch.NOOPResolver,
130 "user": oe.patch.UserResolver,
131 }
132
133 rcls = resolvermap[d.getVar('PATCHRESOLVE', True) or 'user']
134
135 classes = {}
136
137 s = d.getVar('S', True)
138
139 path = os.getenv('PATH')
140 os.putenv('PATH', d.getVar('PATH', True))
141
142 # We must use one TMPDIR per process so that the "patch" processes
143 # don't generate the same temp file name.
144
145 import tempfile
146 process_tmpdir = tempfile.mkdtemp()
147 os.environ['TMPDIR'] = process_tmpdir
148
149 for patch in src_patches(d):
150 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
151
152 if "patchdir" in parm:
153 patchdir = parm["patchdir"]
154 if not os.path.isabs(patchdir):
155 patchdir = os.path.join(s, patchdir)
156 else:
157 patchdir = s
158
159 if not patchdir in classes:
160 patchset = cls(patchdir, d)
161 resolver = rcls(patchset, oe_terminal)
162 classes[patchdir] = (patchset, resolver)
163 patchset.Clean()
164 else:
165 patchset, resolver = classes[patchdir]
166
167 bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
168 try:
169 patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
170 except Exception as exc:
171 bb.utils.remove(process_tmpdir, True)
172 bb.fatal(str(exc))
173 try:
174 resolver.Resolve()
175 except bb.BBHandledException as e:
176 bb.utils.remove(process_tmpdir, True)
177 bb.fatal(str(e))
178
179 bb.utils.remove(process_tmpdir, True)
180 del os.environ['TMPDIR']
181}
182patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
183
184addtask patch after do_unpack
185do_patch[dirs] = "${WORKDIR}"
186do_patch[depends] = "${PATCHDEPENDENCY}"
187
188EXPORT_FUNCTIONS do_patch
diff --git a/meta/classes/perlnative.bbclass b/meta/classes/perlnative.bbclass
new file mode 100644
index 0000000000..cc8de8b381
--- /dev/null
+++ b/meta/classes/perlnative.bbclass
@@ -0,0 +1,3 @@
1EXTRANATIVEPATH += "perl-native"
2DEPENDS += "perl-native"
3OECMAKE_PERLNATIVE_DIR = "${STAGING_BINDIR_NATIVE}/perl-native"
diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass
new file mode 100644
index 0000000000..b8d75bd38c
--- /dev/null
+++ b/meta/classes/pixbufcache.bbclass
@@ -0,0 +1,72 @@
1#
2# This class will generate the proper postinst/postrm scriptlets for pixbuf
3# packages.
4#
5
6DEPENDS += "qemu-native"
7inherit qemu
8
9PIXBUF_PACKAGES ??= "${PN}"
10
11pixbufcache_common() {
12if [ "x$D" != "x" ]; then
13 $INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
14 bindir=${bindir} base_libdir=${base_libdir}
15else
16
17 # Update the pixbuf loaders in case they haven't been registered yet
18 GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache
19
20 if [ -x ${bindir}/gtk-update-icon-cache ] && [ -d ${datadir}/icons ]; then
21 for icondir in /usr/share/icons/*; do
22 if [ -d ${icondir} ]; then
23 gtk-update-icon-cache -t -q ${icondir}
24 fi
25 done
26 fi
27fi
28}
29
30python populate_packages_append() {
31 pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES', True).split()
32
33 for pkg in pixbuf_pkgs:
34 bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
35 postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
36 if not postinst:
37 postinst = '#!/bin/sh\n'
38 postinst += d.getVar('pixbufcache_common', True)
39 d.setVar('pkg_postinst_%s' % pkg, postinst)
40
41 postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
42 if not postrm:
43 postrm = '#!/bin/sh\n'
44 postrm += d.getVar('pixbufcache_common', True)
45 d.setVar('pkg_postrm_%s' % pkg, postrm)
46}
47
48#
49# Add an sstate postinst hook to update the cache for native packages.
50# An error exit during populate_sysroot_setscene allows bitbake to
51# try to recover by re-building the package.
52#
53SSTATEPOSTINSTFUNCS_append_class-native = " pixbufcache_sstate_postinst"
54
55pixbufcache_sstate_postinst() {
56 if [ "${BB_CURRENTTASK}" = "populate_sysroot" -o "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]
57 then
58 GDK_PIXBUF_FATAL_LOADER=1 gdk-pixbuf-query-loaders --update-cache || exit 1
59 fi
60}
61
62# Add all of the dependencies of gdk-pixbuf as dependencies of
63# do_populate_sysroot_setscene so that pixbufcache_sstate_postinst can work
64# (otherwise gdk-pixbuf-query-loaders may not exist or link). Only add
65# gdk-pixbuf-native if we're not building gdk-pixbuf itself.
66#
67# Packages that use this class should extend this variable with their runtime
68# dependencies.
69PIXBUFCACHE_SYSROOT_DEPS = ""
70PIXBUFCACHE_SYSROOT_DEPS_class-native = "${@['gdk-pixbuf-native:do_populate_sysroot_setscene', '']['${BPN}' == 'gdk-pixbuf']} glib-2.0-native:do_populate_sysroot_setscene libffi-native:do_populate_sysroot_setscene libpng-native:do_populate_sysroot_setscene zlib-native:do_populate_sysroot_setscene"
71do_populate_sysroot_setscene[depends] += "${PIXBUFCACHE_SYSROOT_DEPS}"
72do_populate_sysroot[depends] += "${@d.getVar('PIXBUFCACHE_SYSROOT_DEPS', True).replace('_setscene','')}"
diff --git a/meta/classes/pkgconfig.bbclass b/meta/classes/pkgconfig.bbclass
new file mode 100644
index 0000000000..ad1f84f506
--- /dev/null
+++ b/meta/classes/pkgconfig.bbclass
@@ -0,0 +1,2 @@
1DEPENDS_prepend = "pkgconfig-native "
2
diff --git a/meta/classes/populate_sdk.bbclass b/meta/classes/populate_sdk.bbclass
new file mode 100644
index 0000000000..f64a911b72
--- /dev/null
+++ b/meta/classes/populate_sdk.bbclass
@@ -0,0 +1,7 @@
1# The majority of populate_sdk is located in populate_sdk_base
2# This chunk simply facilitates compatibility with SDK only recipes.
3
4inherit populate_sdk_base
5
6addtask populate_sdk after do_install before do_build
7
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
new file mode 100644
index 0000000000..de72e32ed8
--- /dev/null
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -0,0 +1,164 @@
1inherit meta toolchain-scripts
2
3# Wildcards specifying complementary packages to install for every package that has been explicitly
4# installed into the rootfs
5COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev'
6COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev'
7COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
8COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
9COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
10
11def complementary_globs(featurevar, d):
12 all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
13 globs = []
14 features = set((d.getVar(featurevar, True) or '').split())
15 for name, glob in all_globs.items():
16 if name in features:
17 globs.append(glob)
18 return ' '.join(globs)
19
20SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs"
21SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
22
23inherit rootfs_${IMAGE_PKGTYPE}
24
25SDK_DIR = "${WORKDIR}/sdk"
26SDK_OUTPUT = "${SDK_DIR}/image"
27SDK_DEPLOY = "${DEPLOY_DIR}/sdk"
28
29B_task-populate-sdk = "${SDK_DIR}"
30
31SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
32
33TOOLCHAIN_HOST_TASK ?= "nativesdk-packagegroup-sdk-host packagegroup-cross-canadian-${MACHINE}"
34TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
35TOOLCHAIN_TARGET_TASK ?= " \
36 ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} \
37 ${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target-dbg')} \
38 "
39TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
40TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
41
42SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
43SDK_DEPENDS = "virtual/fakeroot-native sed-native"
44
45# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
46# could be set to the MACHINE_ARCH
47REAL_MULTIMACH_TARGET_SYS = "${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
48
49PID = "${@os.getpid()}"
50
51EXCLUDE_FROM_WORLD = "1"
52
53SDK_PACKAGING_FUNC ?= "create_shar"
54SDK_POST_INSTALL_COMMAND ?= ""
55
56SDK_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.manifest"
57python write_target_sdk_manifest () {
58 from oe.sdk import sdk_list_installed_packages
59 sdkmanifestdir = os.path.dirname(d.getVar("SDK_MANIFEST", True))
60 if not os.path.exists(sdkmanifestdir):
61 bb.utils.mkdirhier(sdkmanifestdir)
62 with open(d.getVar('SDK_MANIFEST', True), 'w') as output:
63 output.write(sdk_list_installed_packages(d, True, 'ver'))
64}
65
66POPULATE_SDK_POST_TARGET_COMMAND_append = " write_target_sdk_manifest ; "
67
68fakeroot python do_populate_sdk() {
69 from oe.sdk import populate_sdk
70 from oe.manifest import create_manifest, Manifest
71
72 pn = d.getVar('PN', True)
73 runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
74 runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d)
75
76 ld = bb.data.createCopy(d)
77 ld.setVar("PKGDATA_DIR", "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}/pkgdata")
78 runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld)
79 runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld)
80 d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK", True))
81 d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", True))
82
83 # create target/host SDK manifests
84 create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True),
85 manifest_type=Manifest.MANIFEST_TYPE_SDK_HOST)
86 create_manifest(d, manifest_dir=d.getVar('SDK_DIR', True),
87 manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET)
88
89 populate_sdk(d)
90
91 # Process DEFAULTTUNE
92 bb.build.exec_func("create_sdk_files", d)
93
94 bb.build.exec_func("tar_sdk", d)
95
96 bb.build.exec_func(d.getVar("SDK_PACKAGING_FUNC", True), d)
97}
98
99fakeroot create_sdk_files() {
100 cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
101
102 # Replace the ##DEFAULT_INSTALL_DIR## with the correct pattern.
103 # Escape special characters like '+' and '.' in the SDKPATH
104 escaped_sdkpath=$(echo ${SDKPATH} |sed -e "s:[\+\.]:\\\\\\\\\0:g")
105 sed -i -e "s:##DEFAULT_INSTALL_DIR##:$escaped_sdkpath:" ${SDK_OUTPUT}/${SDKPATH}/relocate_sdk.py
106}
107
108SDKTAROPTS = "--owner=root --group=root -j"
109
110fakeroot tar_sdk() {
111 # Package it up
112 mkdir -p ${SDK_DEPLOY}
113 cd ${SDK_OUTPUT}/${SDKPATH}
114 tar ${SDKTAROPTS} -c --file=${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 .
115}
116
117fakeroot create_shar() {
118 # copy in the template shar extractor script
119 cp ${COREBASE}/meta/files/toolchain-shar-template.sh ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
120
121 cat << "EOF" > ${T}/post_install_command
122${SDK_POST_INSTALL_COMMAND}
123EOF
124 sed -i -e '/@SDK_POST_INSTALL_COMMAND@/r ${T}/post_install_command' ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
125
126 # substitute variables
127 sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
128 -e 's#@SDKPATH@#${SDKPATH}#g' \
129 -e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
130 -e '/@SDK_POST_INSTALL_COMMAND@/d' \
131 ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
132
133 # add execution permission
134 chmod +x ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
135
136 # append the SDK tarball
137 cat ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 >> ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
138
139 # delete the old tarball, we don't need it anymore
140 rm ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2
141}
142
143populate_sdk_log_check() {
144 for target in $*
145 do
146 lf_path="`dirname ${BB_LOGFILE}`/log.do_$target.${PID}"
147
148 echo "log_check: Using $lf_path as logfile"
149
150 if test -e "$lf_path"
151 then
152 ${IMAGE_PKGTYPE}_log_check $target $lf_path
153 else
154 echo "Cannot find logfile [$lf_path]"
155 fi
156 echo "Logfile is clean"
157 done
158}
159
160do_populate_sdk[dirs] = "${TOPDIR}"
161do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS', True).split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
162do_populate_sdk[rdepends] = "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_RDEPENDS', True).split()])}"
163do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb"
164addtask populate_sdk
diff --git a/meta/classes/prexport.bbclass b/meta/classes/prexport.bbclass
new file mode 100644
index 0000000000..5a1cb33c6a
--- /dev/null
+++ b/meta/classes/prexport.bbclass
@@ -0,0 +1,58 @@
1PRSERV_DUMPOPT_VERSION = "${PRAUTOINX}"
2PRSERV_DUMPOPT_PKGARCH = ""
3PRSERV_DUMPOPT_CHECKSUM = ""
4PRSERV_DUMPOPT_COL = "0"
5
6PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
7PRSERV_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv.inc"
8
9python prexport_handler () {
10 import bb.event
11 if not e.data:
12 return
13
14 if isinstance(e, bb.event.RecipeParsed):
15 import oe.prservice
16 #get all PR values for the current PRAUTOINX
17 ver = e.data.getVar('PRSERV_DUMPOPT_VERSION', True)
18 ver = ver.replace('%','-')
19 retval = oe.prservice.prserv_dump_db(e.data)
20 if not retval:
21 bb.fatal("prexport_handler: export failed!")
22 (metainfo, datainfo) = retval
23 if not datainfo:
24 bb.warn("prexport_handler: No AUTOPR values found for %s" % ver)
25 return
26 oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
27 if 'AUTOINC' in ver:
28 import re
29 srcpv = bb.fetch2.get_srcrev(e.data)
30 base_ver = "AUTOINC-%s" % ver[:ver.find(srcpv)]
31 e.data.setVar('PRSERV_DUMPOPT_VERSION', base_ver)
32 retval = oe.prservice.prserv_dump_db(e.data)
33 if not retval:
34 bb.fatal("prexport_handler: export failed!")
35 (metainfo, datainfo) = retval
36 oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
37 elif isinstance(e, bb.event.ParseStarted):
38 import bb.utils
39 import oe.prservice
40 oe.prservice.prserv_check_avail(e.data)
41 #remove dumpfile
42 bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE', True))
43 elif isinstance(e, bb.event.ParseCompleted):
44 import oe.prservice
45 #dump meta info of tables
46 d = e.data.createCopy()
47 d.setVar('PRSERV_DUMPOPT_COL', "1")
48 retval = oe.prservice.prserv_dump_db(d)
49 if not retval:
50 bb.error("prexport_handler: export failed!")
51 return
52 (metainfo, datainfo) = retval
53 oe.prservice.prserv_export_tofile(d, metainfo, None, True)
54
55}
56
57addhandler prexport_handler
58prexport_handler[eventmask] = "bb.event.RecipeParsed bb.event.ParseStarted bb.event.ParseCompleted"
diff --git a/meta/classes/primport.bbclass b/meta/classes/primport.bbclass
new file mode 100644
index 0000000000..8ed45f03f0
--- /dev/null
+++ b/meta/classes/primport.bbclass
@@ -0,0 +1,21 @@
1python primport_handler () {
2 import bb.event
3 if not e.data:
4 return
5
6 if isinstance(e, bb.event.ParseCompleted):
7 import oe.prservice
8 #import all exported AUTOPR values
9 imported = oe.prservice.prserv_import_db(e.data)
10 if imported is None:
11 bb.fatal("import failed!")
12
13 for (version, pkgarch, checksum, value) in imported:
14 bb.note("imported (%s,%s,%s,%d)" % (version, pkgarch, checksum, value))
15 elif isinstance(e, bb.event.ParseStarted):
16 import oe.prservice
17 oe.prservice.prserv_check_avail(e.data)
18}
19
20addhandler primport_handler
21primport_handler[eventmask] = "bb.event.ParseCompleted bb.event.ParseStarted"
diff --git a/meta/classes/prserv.bbclass b/meta/classes/prserv.bbclass
new file mode 100644
index 0000000000..139597f9cb
--- /dev/null
+++ b/meta/classes/prserv.bbclass
@@ -0,0 +1,2 @@
1
2
diff --git a/meta/classes/ptest-gnome.bbclass b/meta/classes/ptest-gnome.bbclass
new file mode 100644
index 0000000000..b2949af9bb
--- /dev/null
+++ b/meta/classes/ptest-gnome.bbclass
@@ -0,0 +1,8 @@
1inherit ptest
2
3EXTRA_OECONF_append_class-target = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
4
5FILES_${PN}-ptest += "${libexecdir}/installed-tests/ \
6 ${datadir}/installed-tests/"
7
8RDEPENDS_${PN}-ptest += "gnome-desktop-testing"
diff --git a/meta/classes/ptest.bbclass b/meta/classes/ptest.bbclass
new file mode 100644
index 0000000000..4e6f075efe
--- /dev/null
+++ b/meta/classes/ptest.bbclass
@@ -0,0 +1,62 @@
1SUMMARY_${PN}-ptest ?= "${SUMMARY} - Package test files"
2DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \
3This package contains a test directory ${PTEST_PATH} for package test purposes."
4
5PTEST_PATH ?= "${libdir}/${PN}/ptest"
6FILES_${PN}-ptest = "${PTEST_PATH}"
7SECTION_${PN}-ptest = "devel"
8ALLOW_EMPTY_${PN}-ptest = "1"
9PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
10PTEST_ENABLED_class-native = ""
11PTEST_ENABLED_class-nativesdk = ""
12PTEST_ENABLED_class-cross-canadian = ""
13RDEPENDS_${PN}-ptest_class-native = ""
14RDEPENDS_${PN}-ptest_class-nativesdk = ""
15
16PACKAGES =+ "${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}"
17
18do_configure_ptest() {
19 :
20}
21
22do_configure_ptest_base() {
23 do_configure_ptest
24}
25
26do_compile_ptest() {
27 :
28}
29
30do_compile_ptest_base() {
31 do_compile_ptest
32}
33
34do_install_ptest() {
35 :
36}
37
38do_install_ptest_base() {
39 if [ -f ${WORKDIR}/run-ptest ]; then
40 install -D ${WORKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest
41 if grep -q install-ptest: Makefile; then
42 oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest
43 fi
44 do_install_ptest
45 fi
46}
47
48do_install_ptest_base[cleandirs] = "${D}${PTEST_PATH}"
49
50addtask configure_ptest_base after do_configure before do_compile
51addtask compile_ptest_base after do_compile before do_install
52addtask install_ptest_base after do_install before do_package do_populate_sysroot
53
54python () {
55 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
56 d.setVarFlag('do_install_ptest_base', 'fakeroot', 1)
57
58 # Remove all '*ptest_base' tasks when ptest is not enabled
59 if not(d.getVar('PTEST_ENABLED', True) == "1"):
60 for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']:
61 bb.build.deltask(i, d)
62}
diff --git a/meta/classes/python-dir.bbclass b/meta/classes/python-dir.bbclass
new file mode 100644
index 0000000000..ebfa4b30f6
--- /dev/null
+++ b/meta/classes/python-dir.bbclass
@@ -0,0 +1,5 @@
1PYTHON_BASEVERSION ?= "2.7"
2PYTHON_ABI ?= ""
3PYTHON_DIR = "python${PYTHON_BASEVERSION}"
4PYTHON_PN = "python${@'' if '${PYTHON_BASEVERSION}'.startswith('2') else '3'}"
5PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/meta/classes/python3native.bbclass b/meta/classes/python3native.bbclass
new file mode 100644
index 0000000000..f86374fd33
--- /dev/null
+++ b/meta/classes/python3native.bbclass
@@ -0,0 +1,7 @@
1PYTHON_BASEVERSION = "3.3"
2
3inherit python-dir
4
5PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
6EXTRANATIVEPATH += "${PYTHON_PN}-native"
7DEPENDS += " ${PYTHON_PN}-native "
diff --git a/meta/classes/pythonnative.bbclass b/meta/classes/pythonnative.bbclass
new file mode 100644
index 0000000000..fdd22bbc86
--- /dev/null
+++ b/meta/classes/pythonnative.bbclass
@@ -0,0 +1,6 @@
1
2inherit python-dir
3
4PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
5EXTRANATIVEPATH += "${PYTHON_PN}-native"
6DEPENDS += " ${PYTHON_PN}-native "
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
new file mode 100644
index 0000000000..b2cf85d628
--- /dev/null
+++ b/meta/classes/qemu.bbclass
@@ -0,0 +1,48 @@
1#
2# This class contains functions for recipes that need QEMU or test for its
3# existence.
4#
5
6def qemu_target_binary(data):
7 target_arch = data.getVar("TARGET_ARCH", True)
8 if target_arch in ("i486", "i586", "i686"):
9 target_arch = "i386"
10 elif target_arch == "powerpc":
11 target_arch = "ppc"
12 elif target_arch == "powerpc64":
13 target_arch = "ppc64"
14
15 return "qemu-" + target_arch
16#
17# Next function will return a string containing the command that is needed to
18# to run a certain binary through qemu. For example, in order to make a certain
19# postinstall scriptlet run at do_rootfs time and running the postinstall is
20# architecture dependent, we can run it through qemu. For example, in the
21# postinstall scriptlet, we could use the following:
22#
23# ${@qemu_run_binary(d, '$D', '/usr/bin/test_app')} [test_app arguments]
24#
25def qemu_run_binary(data, rootfs_path, binary):
26 qemu_binary = qemu_target_binary(data)
27 if qemu_binary == "qemu-allarch":
28 qemu_binary = "qemuwrapper"
29
30 libdir = rootfs_path + data.getVar("libdir", False)
31 base_libdir = rootfs_path + data.getVar("base_libdir", False)
32 oldest_kernel = data.getVar("OLDEST_KERNEL", True)
33
34 return "PSEUDO_UNLOAD=1 " + qemu_binary + " -r " + oldest_kernel + " -L " + rootfs_path\
35 + " -E LD_LIBRARY_PATH=" + libdir + ":" + base_libdir + " "\
36 + rootfs_path + binary
37
38QEMU_OPTIONS = "-r ${OLDEST_KERNEL}"
39QEMU_OPTIONS_append_iwmmxt = " -cpu pxa270-c5"
40QEMU_OPTIONS_append_armv6 = " -cpu arm1136"
41QEMU_OPTIONS_append_armv7a = " -cpu cortex-a8"
42QEMU_OPTIONS_append_e500v2 = " -cpu e500v2"
43QEMU_OPTIONS_append_e500mc = " -cpu e500mc"
44QEMU_OPTIONS_append_e5500 = " -cpu e5500"
45QEMU_OPTIONS_append_e5500-64b = " -cpu e5500"
46QEMU_OPTIONS_append_e6500 = " -cpu e6500"
47QEMU_OPTIONS_append_e6500-64b = " -cpu e6500"
48QEMU_OPTIONS_append_ppc7400 = " -cpu 7400"
diff --git a/meta/classes/qmake2.bbclass b/meta/classes/qmake2.bbclass
new file mode 100644
index 0000000000..6e73ad2d1e
--- /dev/null
+++ b/meta/classes/qmake2.bbclass
@@ -0,0 +1,27 @@
1#
2# QMake variables for Qt4
3#
4inherit qmake_base
5
6DEPENDS_prepend = "qt4-tools-native "
7
8export QMAKESPEC = "${STAGING_DATADIR}/qt4/mkspecs/${TARGET_OS}-oe-g++"
9export OE_QMAKE_QT_CONFIG = "${STAGING_DATADIR}/qt4/mkspecs/qconfig.pri"
10export OE_QMAKE_UIC = "${STAGING_BINDIR_NATIVE}/uic4"
11export OE_QMAKE_UIC3 = "${STAGING_BINDIR_NATIVE}/uic34"
12export OE_QMAKE_MOC = "${STAGING_BINDIR_NATIVE}/moc4"
13export OE_QMAKE_RCC = "${STAGING_BINDIR_NATIVE}/rcc4"
14export OE_QMAKE_QDBUSCPP2XML = "${STAGING_BINDIR_NATIVE}/qdbuscpp2xml4"
15export OE_QMAKE_QDBUSXML2CPP = "${STAGING_BINDIR_NATIVE}/qdbusxml2cpp4"
16export OE_QMAKE_QMAKE = "${STAGING_BINDIR_NATIVE}/qmake2"
17export OE_QMAKE_LINK = "${CXX}"
18export OE_QMAKE_CXXFLAGS = "${CXXFLAGS}"
19export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/qt4"
20export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
21export OE_QMAKE_LIBS_QT = "qt"
22export OE_QMAKE_LIBS_X11 = "-lXext -lX11 -lm"
23export OE_QMAKE_LIBS_X11SM = "-lSM -lICE"
24export OE_QMAKE_LCONVERT = "${STAGING_BINDIR_NATIVE}/lconvert4"
25export OE_QMAKE_LRELEASE = "${STAGING_BINDIR_NATIVE}/lrelease4"
26export OE_QMAKE_LUPDATE = "${STAGING_BINDIR_NATIVE}/lupdate4"
27export OE_QMAKE_XMLPATTERNS = "${STAGING_BINDIR_NATIVE}/xmlpatterns4"
diff --git a/meta/classes/qmake_base.bbclass b/meta/classes/qmake_base.bbclass
new file mode 100644
index 0000000000..86bbede260
--- /dev/null
+++ b/meta/classes/qmake_base.bbclass
@@ -0,0 +1,119 @@
1QMAKE_MKSPEC_PATH ?= "${STAGING_DATADIR_NATIVE}/qmake"
2
3OE_QMAKE_PLATFORM = "${TARGET_OS}-oe-g++"
4QMAKESPEC := "${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}"
5
6# We override this completely to eliminate the -e normally passed in
7EXTRA_OEMAKE = ""
8
9export OE_QMAKE_CC="${CC}"
10export OE_QMAKE_CFLAGS="${CFLAGS}"
11export OE_QMAKE_CXX="${CXX}"
12export OE_QMAKE_LDFLAGS="${LDFLAGS}"
13export OE_QMAKE_AR="${AR}"
14export OE_QMAKE_STRIP="echo"
15export OE_QMAKE_RPATH="-Wl,-rpath-link,"
16
17# default to qte2 via bb.conf, inherit qt3x11 to configure for qt3x11
18
19oe_qmake_mkspecs () {
20 mkdir -p mkspecs/${OE_QMAKE_PLATFORM}
21 for f in ${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}/*; do
22 if [ -L $f ]; then
23 lnk=`readlink $f`
24 if [ -f mkspecs/${OE_QMAKE_PLATFORM}/$lnk ]; then
25 ln -s $lnk mkspecs/${OE_QMAKE_PLATFORM}/`basename $f`
26 else
27 cp $f mkspecs/${OE_QMAKE_PLATFORM}/
28 fi
29 else
30 cp $f mkspecs/${OE_QMAKE_PLATFORM}/
31 fi
32 done
33}
34
35do_generate_qt_config_file() {
36 export QT_CONF_PATH=${WORKDIR}/qt.conf
37 cat > ${WORKDIR}/qt.conf <<EOF
38[Paths]
39Prefix =
40Binaries = ${STAGING_BINDIR_NATIVE}
41Headers = ${STAGING_INCDIR}/qt4
42Plugins = ${STAGING_LIBDIR}/qt4/plugins/
43Mkspecs = ${STAGING_DATADIR}/qt4/mkspecs/
44EOF
45}
46
47addtask generate_qt_config_file after do_patch before do_configure
48
49qmake_base_do_configure() {
50 case ${QMAKESPEC} in
51 *linux-oe-g++|*linux-uclibc-oe-g++|*linux-gnueabi-oe-g++|*linux-uclibceabi-oe-g++|*linux-gnuspe-oe-g++|*linux-uclibcspe-oe-g++|*linux-gnun32-oe-g++)
52 ;;
53 *-oe-g++)
54 die Unsupported target ${TARGET_OS} for oe-g++ qmake spec
55 ;;
56 *)
57 bbnote Searching for qmake spec file
58 paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-${TARGET_ARCH}-g++"
59 paths="${QMAKE_MKSPEC_PATH}/${TARGET_OS}-g++ $paths"
60
61 if (echo "${TARGET_ARCH}"|grep -q 'i.86'); then
62 paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-x86-g++ $paths"
63 fi
64 for i in $paths; do
65 if test -e $i; then
66 export QMAKESPEC=$i
67 break
68 fi
69 done
70 ;;
71 esac
72
73 bbnote "using qmake spec in ${QMAKESPEC}, using profiles '${QMAKE_PROFILES}'"
74
75 if [ -z "${QMAKE_PROFILES}" ]; then
76 PROFILES="`ls *.pro`"
77 else
78 PROFILES="${QMAKE_PROFILES}"
79 fi
80
81 if [ -z "$PROFILES" ]; then
82 die "QMAKE_PROFILES not set and no profiles found in $PWD"
83 fi
84
85 if [ ! -z "${EXTRA_QMAKEVARS_POST}" ]; then
86 AFTER="-after"
87 QMAKE_VARSUBST_POST="${EXTRA_QMAKEVARS_POST}"
88 bbnote "qmake postvar substitution: ${EXTRA_QMAKEVARS_POST}"
89 fi
90
91 if [ ! -z "${EXTRA_QMAKEVARS_PRE}" ]; then
92 QMAKE_VARSUBST_PRE="${EXTRA_QMAKEVARS_PRE}"
93 bbnote "qmake prevar substitution: ${EXTRA_QMAKEVARS_PRE}"
94 fi
95
96 # Hack .pro files to use OE utilities
97 LCONVERT_NAME=$(basename ${OE_QMAKE_LCONVERT})
98 LRELEASE_NAME=$(basename ${OE_QMAKE_LRELEASE})
99 LUPDATE_NAME=$(basename ${OE_QMAKE_LUPDATE})
100 XMLPATTERNS_NAME=$(basename ${OE_QMAKE_XMLPATTERNS})
101 find -name '*.pro' \
102 -exec sed -i -e "s|\(=\s*.*\)/$LCONVERT_NAME|\1/lconvert|g" \
103 -e "s|\(=\s*.*\)/$LRELEASE_NAME|\1/lrelease|g" \
104 -e "s|\(=\s*.*\)/$LUPDATE_NAME|\1/lupdate|g" \
105 -e "s|\(=\s*.*\)/$XMLPATTERNS_NAME|\1/xmlpatterns|g" \
106 -e "s|\(=\s*.*\)/lconvert|\1/$LCONVERT_NAME|g" \
107 -e "s|\(=\s*.*\)/lrelease|\1/$LRELEASE_NAME|g" \
108 -e "s|\(=\s*.*\)/lupdate|\1/$LUPDATE_NAME|g" \
109 -e "s|\(=\s*.*\)/xmlpatterns|\1/$XMLPATTERNS_NAME|g" \
110 '{}' ';'
111
112#bbnote "Calling '${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST'"
113 unset QMAKESPEC || true
114 ${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST || die "Error calling ${OE_QMAKE_QMAKE} on $PROFILES"
115}
116
117EXPORT_FUNCTIONS do_configure
118
119addtask configure after do_unpack do_patch before do_compile
diff --git a/meta/classes/qt4e.bbclass b/meta/classes/qt4e.bbclass
new file mode 100644
index 0000000000..850bb6a717
--- /dev/null
+++ b/meta/classes/qt4e.bbclass
@@ -0,0 +1,24 @@
1QT4EDEPENDS ?= "qt4-embedded "
2DEPENDS_prepend = "${QT4EDEPENDS}"
3
4inherit qmake2
5
6QT_BASE_NAME = "qt4-embedded"
7QT_DIR_NAME = "qtopia"
8QT_LIBINFIX = "E"
9# override variables set by qmake-base to compile Qt/Embedded apps
10#
11export QMAKESPEC = "${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/${TARGET_OS}-oe-g++"
12export OE_QMAKE_QT_CONFIG = "${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/qconfig.pri"
13export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/${QT_DIR_NAME}"
14export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
15export OE_QMAKE_LIBS_QT = "qt"
16export OE_QMAKE_LIBS_X11 = ""
17export OE_QMAKE_EXTRA_MODULES = "network"
18EXTRA_QMAKEVARS_PRE += " QT_LIBINFIX=${QT_LIBINFIX} "
19
20# Qt4 uses atomic instructions not supported in thumb mode
21ARM_INSTRUCTION_SET = "arm"
22
23# Qt4 could NOT be built on MIPS64 with 64 bits userspace
24COMPATIBLE_HOST_mips64 = "mips64.*-linux-gnun32"
diff --git a/meta/classes/qt4x11.bbclass b/meta/classes/qt4x11.bbclass
new file mode 100644
index 0000000000..65d196afc6
--- /dev/null
+++ b/meta/classes/qt4x11.bbclass
@@ -0,0 +1,14 @@
1QT4DEPENDS ?= "qt4-x11 "
2DEPENDS_prepend = "${QT4DEPENDS}"
3
4inherit qmake2
5
6QT_BASE_NAME = "qt4"
7QT_DIR_NAME = "qt4"
8QT_LIBINFIX = ""
9
10# Qt4 uses atomic instructions not supported in thumb mode
11ARM_INSTRUCTION_SET = "arm"
12
13# Qt4 could NOT be built on MIPS64 with 64 bits userspace
14COMPATIBLE_HOST_mips64 = "mips64.*-linux-gnun32"
diff --git a/meta/classes/recipe_sanity.bbclass b/meta/classes/recipe_sanity.bbclass
new file mode 100644
index 0000000000..ee04e333db
--- /dev/null
+++ b/meta/classes/recipe_sanity.bbclass
@@ -0,0 +1,167 @@
1def __note(msg, d):
2 bb.note("%s: recipe_sanity: %s" % (d.getVar("P", True), msg))
3
4__recipe_sanity_badruntimevars = "RDEPENDS RPROVIDES RRECOMMENDS RCONFLICTS"
5def bad_runtime_vars(cfgdata, d):
6 if bb.data.inherits_class("native", d) or \
7 bb.data.inherits_class("cross", d):
8 return
9
10 for var in d.getVar("__recipe_sanity_badruntimevars", True).split():
11 val = d.getVar(var, 0)
12 if val and val != cfgdata.get(var):
13 __note("%s should be %s_${PN}" % (var, var), d)
14
15__recipe_sanity_reqvars = "DESCRIPTION"
16__recipe_sanity_reqdiffvars = ""
17def req_vars(cfgdata, d):
18 for var in d.getVar("__recipe_sanity_reqvars", True).split():
19 if not d.getVar(var, 0):
20 __note("%s should be set" % var, d)
21
22 for var in d.getVar("__recipe_sanity_reqdiffvars", True).split():
23 val = d.getVar(var, 0)
24 cfgval = cfgdata.get(var)
25
26 if not val:
27 __note("%s should be set" % var, d)
28 elif val == cfgval:
29 __note("%s should be defined to something other than default (%s)" % (var, cfgval), d)
30
31def var_renames_overwrite(cfgdata, d):
32 renames = d.getVar("__recipe_sanity_renames", 0)
33 if renames:
34 for (key, newkey, oldvalue, newvalue) in renames:
35 if oldvalue != newvalue and oldvalue != cfgdata.get(newkey):
36 __note("rename of variable '%s' to '%s' overwrote existing value '%s' with '%s'." % (key, newkey, oldvalue, newvalue), d)
37
38def incorrect_nonempty_PACKAGES(cfgdata, d):
39 if bb.data.inherits_class("native", d) or \
40 bb.data.inherits_class("cross", d):
41 if d.getVar("PACKAGES", True):
42 return True
43
44def can_use_autotools_base(cfgdata, d):
45 cfg = d.getVar("do_configure", True)
46 if not bb.data.inherits_class("autotools", d):
47 return False
48
49 for i in ["autoreconf"] + ["%s_do_configure" % cls for cls in ["gnomebase", "gnome", "e", "autotools", "efl", "gpephone", "openmoko", "openmoko2", "xfce", "xlibs"]]:
50 if cfg.find(i) != -1:
51 return False
52
53 for clsfile in d.getVar("__inherit_cache", 0):
54 (base, _) = os.path.splitext(os.path.basename(clsfile))
55 if cfg.find("%s_do_configure" % base) != -1:
56 __note("autotools_base usage needs verification, spotted %s_do_configure" % base, d)
57
58 return True
59
60def can_delete_FILESPATH(cfgdata, d):
61 expected = cfgdata.get("FILESPATH")
62 expectedpaths = d.expand(expected)
63 unexpanded = d.getVar("FILESPATH", 0)
64 filespath = d.getVar("FILESPATH", True).split(":")
65 filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
66 for fp in filespath:
67 if not fp in expectedpaths:
68 # __note("Path %s in FILESPATH not in the expected paths %s" %
69 # (fp, expectedpaths), d)
70 return False
71 return expected != unexpanded
72
73def can_delete_FILESDIR(cfgdata, d):
74 expected = cfgdata.get("FILESDIR")
75 #expected = "${@bb.utils.which(d.getVar('FILESPATH', True), '.')}"
76 unexpanded = d.getVar("FILESDIR", 0)
77 if unexpanded is None:
78 return False
79
80 expanded = os.path.normpath(d.getVar("FILESDIR", True))
81 filespath = d.getVar("FILESPATH", True).split(":")
82 filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
83
84 return unexpanded != expected and \
85 os.path.exists(expanded) and \
86 (expanded in filespath or
87 expanded == d.expand(expected))
88
89def can_delete_others(p, cfgdata, d):
90 for k in ["S", "PV", "PN", "DESCRIPTION", "DEPENDS",
91 "SECTION", "PACKAGES", "EXTRA_OECONF", "EXTRA_OEMAKE"]:
92 #for k in cfgdata:
93 unexpanded = d.getVar(k, 0)
94 cfgunexpanded = cfgdata.get(k)
95 if not cfgunexpanded:
96 continue
97
98 try:
99 expanded = d.getVar(k, True)
100 cfgexpanded = d.expand(cfgunexpanded)
101 except bb.fetch.ParameterError:
102 continue
103
104 if unexpanded != cfgunexpanded and \
105 cfgexpanded == expanded:
106 __note("candidate for removal of %s" % k, d)
107 bb.debug(1, "%s: recipe_sanity: cfg's '%s' and d's '%s' both expand to %s" %
108 (p, cfgunexpanded, unexpanded, expanded))
109
110python do_recipe_sanity () {
111 p = d.getVar("P", True)
112 p = "%s %s %s" % (d.getVar("PN", True), d.getVar("PV", True), d.getVar("PR", True))
113
114 sanitychecks = [
115 (can_delete_FILESDIR, "candidate for removal of FILESDIR"),
116 (can_delete_FILESPATH, "candidate for removal of FILESPATH"),
117 #(can_use_autotools_base, "candidate for use of autotools_base"),
118 (incorrect_nonempty_PACKAGES, "native or cross recipe with non-empty PACKAGES"),
119 ]
120 cfgdata = d.getVar("__recipe_sanity_cfgdata", 0)
121
122 for (func, msg) in sanitychecks:
123 if func(cfgdata, d):
124 __note(msg, d)
125
126 can_delete_others(p, cfgdata, d)
127 var_renames_overwrite(cfgdata, d)
128 req_vars(cfgdata, d)
129 bad_runtime_vars(cfgdata, d)
130}
131do_recipe_sanity[nostamp] = "1"
132addtask recipe_sanity
133
134do_recipe_sanity_all[nostamp] = "1"
135do_recipe_sanity_all[recrdeptask] = "do_recipe_sanity_all do_recipe_sanity"
136do_recipe_sanity_all () {
137 :
138}
139addtask recipe_sanity_all after do_recipe_sanity
140
141python recipe_sanity_eh () {
142 d = e.data
143
144 cfgdata = {}
145 for k in d.keys():
146 #for k in ["S", "PR", "PV", "PN", "DESCRIPTION", "LICENSE", "DEPENDS",
147 # "SECTION"]:
148 cfgdata[k] = d.getVar(k, 0)
149
150 d.setVar("__recipe_sanity_cfgdata", cfgdata)
151 #d.setVar("__recipe_sanity_cfgdata", d)
152
153 # Sick, very sick..
154 from bb.data_smart import DataSmart
155 old = DataSmart.renameVar
156 def myrename(self, key, newkey):
157 oldvalue = self.getVar(newkey, 0)
158 old(self, key, newkey)
159 newvalue = self.getVar(newkey, 0)
160 if oldvalue:
161 renames = self.getVar("__recipe_sanity_renames", 0) or set()
162 renames.add((key, newkey, oldvalue, newvalue))
163 self.setVar("__recipe_sanity_renames", renames)
164 DataSmart.renameVar = myrename
165}
166addhandler recipe_sanity_eh
167recipe_sanity_eh[eventmask] = "bb.event.ConfigParsed"
diff --git a/meta/classes/relocatable.bbclass b/meta/classes/relocatable.bbclass
new file mode 100644
index 0000000000..4ca9981f44
--- /dev/null
+++ b/meta/classes/relocatable.bbclass
@@ -0,0 +1,7 @@
1inherit chrpath
2
3SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess"
4
5python relocatable_binaries_preprocess() {
6 rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
7}
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
new file mode 100644
index 0000000000..8b30422edf
--- /dev/null
+++ b/meta/classes/report-error.bbclass
@@ -0,0 +1,70 @@
1#
2# Collects debug information in order to create error report files.
3#
4# Copyright (C) 2013 Intel Corporation
5# Author: Andreea Brandusa Proca <andreea.b.proca@intel.com>
6#
7# Licensed under the MIT license, see COPYING.MIT for details
8
9ERR_REPORT_DIR ?= "${LOG_DIR}/error-report"
10
11def errorreport_getdata(e):
12 logpath = e.data.getVar('ERR_REPORT_DIR', True)
13 datafile = os.path.join(logpath, "error-report.txt")
14 with open(datafile) as f:
15 data = f.read()
16 return data
17
18def errorreport_savedata(e, newdata, file):
19 import json
20 logpath = e.data.getVar('ERR_REPORT_DIR', True)
21 bb.utils.mkdirhier(logpath)
22 datafile = os.path.join(logpath, file)
23 with open(datafile, "w") as f:
24 json.dump(newdata, f, indent=4, sort_keys=True)
25 return datafile
26
27python errorreport_handler () {
28 import json
29
30 if isinstance(e, bb.event.BuildStarted):
31 data = {}
32 machine = e.data.getVar("MACHINE")
33 data['machine'] = machine
34 data['build_sys'] = e.data.getVar("BUILD_SYS", True)
35 data['nativelsb'] = e.data.getVar("NATIVELSBSTRING")
36 data['distro'] = e.data.getVar("DISTRO")
37 data['target_sys'] = e.data.getVar("TARGET_SYS", True)
38 data['failures'] = []
39 data['component'] = e.getPkgs()[0]
40 data['branch_commit'] = base_detect_branch(e.data) + ": " + base_detect_revision(e.data)
41 errorreport_savedata(e, data, "error-report.txt")
42
43 elif isinstance(e, bb.build.TaskFailed):
44 task = e.task
45 taskdata={}
46 log = e.data.getVar('BB_LOGFILE', True)
47 taskdata['package'] = e.data.expand("${PF}")
48 taskdata['task'] = task
49 if log:
50 logFile = open(log, 'r')
51 taskdata['log'] = logFile.read()
52 logFile.close()
53 else:
54 taskdata['log'] = "No Log"
55 jsondata = json.loads(errorreport_getdata(e))
56 jsondata['failures'].append(taskdata)
57 errorreport_savedata(e, jsondata, "error-report.txt")
58
59 elif isinstance(e, bb.event.BuildCompleted):
60 jsondata = json.loads(errorreport_getdata(e))
61 failures = jsondata['failures']
62 if(len(failures) > 0):
63 filename = "error_report_" + e.data.getVar("BUILDNAME")+".txt"
64 datafile = errorreport_savedata(e, jsondata, filename)
65 bb.note("The errors for this build are stored in %s\nYou can send the errors to an upstream server by running:\n send-error-report %s [server]" % (datafile, datafile))
66 bb.note("The contents of these logs will be posted in public if you use the above command with the default server. If you need to do so, please ensure you remove any identifying or proprietary information before sending.")
67}
68
69addhandler errorreport_handler
70errorreport_handler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskFailed"
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
new file mode 100644
index 0000000000..e68d02a783
--- /dev/null
+++ b/meta/classes/rm_work.bbclass
@@ -0,0 +1,120 @@
1#
2# Removes source after build
3#
4# To use it add that line to conf/local.conf:
5#
6# INHERIT += "rm_work"
7#
8# To inhibit rm_work for some recipes, specify them in RM_WORK_EXCLUDE.
9# For example, in conf/local.conf:
10#
11# RM_WORK_EXCLUDE += "icu-native icu busybox"
12#
13
14# Use the completion scheduler by default when rm_work is active
15# to try and reduce disk usage
16BB_SCHEDULER ?= "completion"
17
18RMWORK_ORIG_TASK := "${BB_DEFAULT_TASK}"
19BB_DEFAULT_TASK = "rm_work_all"
20
21do_rm_work () {
22 # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
23 for p in ${RM_WORK_EXCLUDE}; do
24 if [ "$p" = "${PN}" ]; then
25 bbnote "rm_work: Skipping ${PN} since it is in RM_WORK_EXCLUDE"
26 exit 0
27 fi
28 done
29
30 cd ${WORKDIR}
31 for dir in *
32 do
33 # Retain only logs and other files in temp, safely ignore
34 # failures of removing pseudo folers on NFS2/3 server.
35 if [ $dir = 'pseudo' ]; then
36 rm -rf $dir 2> /dev/null || true
37 elif [ $dir != 'temp' ]; then
38 rm -rf $dir
39 fi
40 done
41
42 # Need to add pseudo back or subsqeuent work in this workdir
43 # might fail since setscene may not rerun to recreate it
44 mkdir -p ${WORKDIR}/pseudo/
45
46 # Change normal stamps into setscene stamps as they better reflect the
47 # fact WORKDIR is now empty
48 # Also leave noexec stamps since setscene stamps don't cover them
49 cd `dirname ${STAMP}`
50 for i in `basename ${STAMP}`*
51 do
52 for j in ${SSTATETASKS}
53 do
54 case $i in
55 *do_setscene*)
56 break
57 ;;
58 *sigdata*)
59 i=dummy
60 break
61 ;;
62 *do_package_write*)
63 i=dummy
64 break
65 ;;
66 *do_build*)
67 i=dummy
68 break
69 ;;
70 # We remove do_package entirely, including any
71 # sstate version since otherwise we'd need to leave 'plaindirs' around
72 # such as 'packages' and 'packages-split' and these can be large. No end
73 # of chain tasks depend directly on do_package anymore.
74 *do_package|*do_package.*|*do_package_setscene.*)
75 rm -f $i;
76 i=dummy
77 break
78 ;;
79 *_setscene*)
80 i=dummy
81 break
82 ;;
83 *$j|*$j.*)
84 mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
85 i=dummy
86 break
87 ;;
88 esac
89 done
90 rm -f $i
91 done
92}
93addtask rm_work after do_${RMWORK_ORIG_TASK}
94
95do_rm_work_all () {
96 :
97}
98do_rm_work_all[recrdeptask] = "do_rm_work"
99addtask rm_work_all after do_rm_work
100
101do_populate_sdk[postfuncs] += "rm_work_populatesdk"
102rm_work_populatesdk () {
103 :
104}
105rm_work_populatesdk[cleandirs] = "${WORKDIR}/sdk"
106
107do_rootfs[postfuncs] += "rm_work_rootfs"
108rm_work_rootfs () {
109 :
110}
111rm_work_rootfs[cleandirs] = "${WORKDIR}/rootfs"
112
113python () {
114 # If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
115 excludes = (d.getVar("RM_WORK_EXCLUDE", True) or "").split()
116 pn = d.getVar("PN", True)
117 if pn in excludes:
118 d.delVarFlag('rm_work_rootfs', 'cleandirs')
119 d.delVarFlag('rm_work_populatesdk', 'cleandirs')
120}
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
new file mode 100644
index 0000000000..d51b4582d2
--- /dev/null
+++ b/meta/classes/rootfs_deb.bbclass
@@ -0,0 +1,39 @@
1#
2# Copyright 2006-2007 Openedhand Ltd.
3#
4
5ROOTFS_PKGMANAGE = "dpkg apt"
6ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
7
8do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
9do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
10do_rootfs[recrdeptask] += "do_package_write_deb"
11rootfs_deb_do_rootfs[vardepsexclude] += "BUILDNAME"
12do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
13
14do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
15do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
16
17python rootfs_deb_bad_recommendations() {
18 if d.getVar("BAD_RECOMMENDATIONS", True):
19 bb.warn("Debian package install does not support BAD_RECOMMENDATIONS")
20}
21do_rootfs[prefuncs] += "rootfs_deb_bad_recommendations"
22
23DEB_POSTPROCESS_COMMANDS = ""
24
25opkglibdir = "${localstatedir}/lib/opkg"
26
27python () {
28 # Map TARGET_ARCH to Debian's ideas about architectures
29 darch = d.getVar('SDK_ARCH', True)
30 if darch in ["x86", "i486", "i586", "i686", "pentium"]:
31 d.setVar('DEB_SDK_ARCH', 'i386')
32 elif darch == "x86_64":
33 d.setVar('DEB_SDK_ARCH', 'amd64')
34 elif darch == "arm":
35 d.setVar('DEB_SDK_ARCH', 'armel')
36}
37
38# This will of course only work after rootfs_deb_do_rootfs or populate_sdk_deb has been called
39DPKG_QUERY_COMMAND = "${STAGING_BINDIR_NATIVE}/dpkg-query --admindir=$INSTALL_ROOTFS_DEB/var/lib/dpkg"
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
new file mode 100644
index 0000000000..6139cc7d59
--- /dev/null
+++ b/meta/classes/rootfs_ipk.bbclass
@@ -0,0 +1,39 @@
1#
2# Creates a root filesystem out of IPKs
3#
4# This rootfs can be mounted via root-nfs or it can be put into an cramfs/jffs etc.
5# See image.bbclass for a usage of this.
6#
7
8EXTRAOPKGCONFIG ?= ""
9ROOTFS_PKGMANAGE = "opkg opkg-collateral ${EXTRAOPKGCONFIG}"
10ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
11
12do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
13do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
14do_rootfs[recrdeptask] += "do_package_write_ipk"
15do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
16rootfs_ipk_do_rootfs[vardepsexclude] += "BUILDNAME"
17
18do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
19do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
20
21OPKG_PREPROCESS_COMMANDS = ""
22
23OPKG_POSTPROCESS_COMMANDS = ""
24
25OPKGLIBDIR = "${localstatedir}/lib"
26
27MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg"
28
29python () {
30
31 if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
32 flags = d.getVarFlag('do_rootfs', 'recrdeptask')
33 flags = flags.replace("do_package_write_ipk", "")
34 flags = flags.replace("do_deploy", "")
35 flags = flags.replace("do_populate_sysroot", "")
36 d.setVarFlag('do_rootfs', 'recrdeptask', flags)
37 d.setVar('OPKG_PREPROCESS_COMMANDS', "")
38 d.setVar('OPKG_POSTPROCESS_COMMANDS', '')
39}
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
new file mode 100644
index 0000000000..d85d001a62
--- /dev/null
+++ b/meta/classes/rootfs_rpm.bbclass
@@ -0,0 +1,47 @@
1#
2# Creates a root filesystem out of rpm packages
3#
4
5ROOTFS_PKGMANAGE = "rpm smartpm"
6ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
7
8# Add 50Meg of extra space for Smart
9IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "smartpm", " + 51200", "" ,d)}"
10
11# Smart is python based, so be sure python-native is available to us.
12EXTRANATIVEPATH += "python-native"
13
14# opkg is needed for update-alternatives
15RPMROOTFSDEPENDS = "rpm-native:do_populate_sysroot \
16 rpmresolve-native:do_populate_sysroot \
17 python-smartpm-native:do_populate_sysroot \
18 createrepo-native:do_populate_sysroot \
19 opkg-native:do_populate_sysroot"
20
21do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
22do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
23
24do_rootfs[recrdeptask] += "do_package_write_rpm"
25rootfs_rpm_do_rootfs[vardepsexclude] += "BUILDNAME"
26do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
27
28# RPM doesn't work with multiple rootfs generation at once due to collisions in the use of files
29# in ${DEPLOY_DIR_RPM}. This can be removed if package_update_index_rpm can be called concurrently
30do_rootfs[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
31do_populate_sdk[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
32
33python () {
34 if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
35 flags = d.getVarFlag('do_rootfs', 'recrdeptask')
36 flags = flags.replace("do_package_write_rpm", "")
37 flags = flags.replace("do_deploy", "")
38 flags = flags.replace("do_populate_sysroot", "")
39 d.setVarFlag('do_rootfs', 'recrdeptask', flags)
40 d.setVar('RPM_PREPROCESS_COMMANDS', '')
41 d.setVar('RPM_POSTPROCESS_COMMANDS', '')
42
43}
44# Smart is python based, so be sure python-native is available to us.
45EXTRANATIVEPATH += "python-native"
46
47rpmlibdir = "/var/lib/rpm"
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
new file mode 100644
index 0000000000..5be5efb8a4
--- /dev/null
+++ b/meta/classes/sanity.bbclass
@@ -0,0 +1,887 @@
1#
2# Sanity check the users setup for common misconfigurations
3#
4
5SANITY_REQUIRED_UTILITIES ?= "patch diffstat makeinfo git bzip2 tar \
6 gzip gawk chrpath wget cpio perl"
7
8def bblayers_conf_file(d):
9 return os.path.join(d.getVar('TOPDIR', True), 'conf/bblayers.conf')
10
11def sanity_conf_read(fn):
12 with open(fn, 'r') as f:
13 lines = f.readlines()
14 return lines
15
16def sanity_conf_find_line(pattern, lines):
17 import re
18 return next(((index, line)
19 for index, line in enumerate(lines)
20 if re.search(pattern, line)), (None, None))
21
22def sanity_conf_update(fn, lines, version_var_name, new_version):
23 index, line = sanity_conf_find_line(version_var_name, lines)
24 lines[index] = '%s = "%d"\n' % (version_var_name, new_version)
25 with open(fn, "w") as f:
26 f.write(''.join(lines))
27
28# Functions added to this variable MUST throw an exception (or sys.exit()) unless they
29# successfully changed LCONF_VERSION in bblayers.conf
30BBLAYERS_CONF_UPDATE_FUNCS += "oecore_update_bblayers"
31
32python oecore_update_bblayers() {
33 # bblayers.conf is out of date, so see if we can resolve that
34
35 current_lconf = int(d.getVar('LCONF_VERSION', True))
36 if not current_lconf:
37 sys.exit()
38 lconf_version = int(d.getVar('LAYER_CONF_VERSION', True))
39 lines = []
40
41 if current_lconf < 4:
42 sys.exit()
43
44 bblayers_fn = bblayers_conf_file(d)
45 lines = sanity_conf_read(bblayers_fn)
46
47 if current_lconf == 4 and lconf_version > 4:
48 topdir_var = '$' + '{TOPDIR}'
49 index, bbpath_line = sanity_conf_find_line('BBPATH', lines)
50 if bbpath_line:
51 start = bbpath_line.find('"')
52 if start != -1 and (len(bbpath_line) != (start + 1)):
53 if bbpath_line[start + 1] == '"':
54 lines[index] = (bbpath_line[:start + 1] +
55 topdir_var + bbpath_line[start + 1:])
56 else:
57 if not topdir_var in bbpath_line:
58 lines[index] = (bbpath_line[:start + 1] +
59 topdir_var + ':' + bbpath_line[start + 1:])
60 else:
61 sys.exit()
62 else:
63 index, bbfiles_line = sanity_conf_find_line('BBFILES', lines)
64 if bbfiles_line:
65 lines.insert(index, 'BBPATH = "' + topdir_var + '"\n')
66 else:
67 sys.exit()
68
69 current_lconf += 1
70 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
71 return
72
73 sys.exit()
74}
75
76def raise_sanity_error(msg, d, network_error=False):
77 if d.getVar("SANITY_USE_EVENTS", True) == "1":
78 try:
79 bb.event.fire(bb.event.SanityCheckFailed(msg, network_error), d)
80 except TypeError:
81 bb.event.fire(bb.event.SanityCheckFailed(msg), d)
82 return
83
84 bb.fatal(""" OE-core's config sanity checker detected a potential misconfiguration.
85 Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
86 Following is the list of potential problems / advisories:
87
88 %s""" % msg)
89
90# Check flags associated with a tuning.
91def check_toolchain_tune_args(data, tune, multilib, errs):
92 found_errors = False
93 if check_toolchain_args_present(data, tune, multilib, errs, 'CCARGS'):
94 found_errors = True
95 if check_toolchain_args_present(data, tune, multilib, errs, 'ASARGS'):
96 found_errors = True
97 if check_toolchain_args_present(data, tune, multilib, errs, 'LDARGS'):
98 found_errors = True
99
100 return found_errors
101
102def check_toolchain_args_present(data, tune, multilib, tune_errors, which):
103 args_set = (data.getVar("TUNE_%s" % which, True) or "").split()
104 args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune), True) or "").split()
105 args_missing = []
106
107 # If no args are listed/required, we are done.
108 if not args_wanted:
109 return
110 for arg in args_wanted:
111 if arg not in args_set:
112 args_missing.append(arg)
113
114 found_errors = False
115 if args_missing:
116 found_errors = True
117 tune_errors.append("TUNEABI for %s requires '%s' in TUNE_%s (%s)." %
118 (tune, ' '.join(args_missing), which, ' '.join(args_set)))
119 return found_errors
120
121# Check a single tune for validity.
122def check_toolchain_tune(data, tune, multilib):
123 tune_errors = []
124 if not tune:
125 return "No tuning found for %s multilib." % multilib
126 localdata = bb.data.createCopy(data)
127 if multilib != "default":
128 # Apply the overrides so we can look at the details.
129 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
130 localdata.setVar("OVERRIDES", overrides)
131 bb.data.update_data(localdata)
132 bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
133 features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune, True) or "").split()
134 if not features:
135 return "Tuning '%s' has no defined features, and cannot be used." % tune
136 valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
137 conflicts = localdata.getVarFlags('TUNECONFLICTS') or {}
138 # [doc] is the documentation for the variable, not a real feature
139 if 'doc' in valid_tunes:
140 del valid_tunes['doc']
141 if 'doc' in conflicts:
142 del conflicts['doc']
143 for feature in features:
144 if feature in conflicts:
145 for conflict in conflicts[feature].split():
146 if conflict in features:
147 tune_errors.append("Feature '%s' conflicts with '%s'." %
148 (feature, conflict))
149 if feature in valid_tunes:
150 bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
151 else:
152 tune_errors.append("Feature '%s' is not defined." % feature)
153 whitelist = localdata.getVar("TUNEABI_WHITELIST", True)
154 if whitelist:
155 tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune, True)
156 if not tuneabi:
157 tuneabi = tune
158 if True not in [x in whitelist.split() for x in tuneabi.split()]:
159 tune_errors.append("Tuning '%s' (%s) cannot be used with any supported tuning/ABI." %
160 (tune, tuneabi))
161 else:
162 if not check_toolchain_tune_args(localdata, tuneabi, multilib, tune_errors):
163 bb.debug(2, "Sanity check: Compiler args OK for %s." % tune)
164 if tune_errors:
165 return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
166
167def check_toolchain(data):
168 tune_error_set = []
169 deftune = data.getVar("DEFAULTTUNE", True)
170 tune_errors = check_toolchain_tune(data, deftune, 'default')
171 if tune_errors:
172 tune_error_set.append(tune_errors)
173
174 multilibs = (data.getVar("MULTILIB_VARIANTS", True) or "").split()
175 global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS", True) or "").split()
176
177 if multilibs:
178 seen_libs = []
179 seen_tunes = []
180 for lib in multilibs:
181 if lib in seen_libs:
182 tune_error_set.append("The multilib '%s' appears more than once." % lib)
183 else:
184 seen_libs.append(lib)
185 if not lib in global_multilibs:
186 tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
187 tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib, True)
188 if tune in seen_tunes:
189 tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
190 else:
191 seen_libs.append(tune)
192 if tune == deftune:
193 tune_error_set.append("Multilib '%s' (%s) is also the default tuning." % (lib, deftune))
194 else:
195 tune_errors = check_toolchain_tune(data, tune, lib)
196 if tune_errors:
197 tune_error_set.append(tune_errors)
198 if tune_error_set:
199 return "Toolchain tunings invalid:\n" + '\n'.join(tune_error_set) + "\n"
200
201 return ""
202
203def check_conf_exists(fn, data):
204 bbpath = []
205 fn = data.expand(fn)
206 vbbpath = data.getVar("BBPATH")
207 if vbbpath:
208 bbpath += vbbpath.split(":")
209 for p in bbpath:
210 currname = os.path.join(data.expand(p), fn)
211 if os.access(currname, os.R_OK):
212 return True
213 return False
214
215def check_create_long_filename(filepath, pathname):
216 import string, random
217 testfile = os.path.join(filepath, ''.join(random.choice(string.ascii_letters) for x in range(200)))
218 try:
219 if not os.path.exists(filepath):
220 bb.utils.mkdirhier(filepath)
221 f = open(testfile, "w")
222 f.close()
223 os.remove(testfile)
224 except IOError as e:
225 import errno
226 err, strerror = e.args
227 if err == errno.ENAMETOOLONG:
228 return "Failed to create a file with a long name in %s. Please use a filesystem that does not unreasonably limit filename length.\n" % pathname
229 else:
230 return "Failed to create a file in %s: %s.\n" % (pathname, strerror)
231 except OSError as e:
232 errno, strerror = e.args
233 return "Failed to create %s directory in which to run long name sanity check: %s.\n" % (pathname, strerror)
234 return ""
235
236def check_path_length(filepath, pathname, limit):
237 if len(filepath) > limit:
238 return "The length of %s is longer than 410, this would cause unexpected errors, please use a shorter path.\n" % pathname
239 return ""
240
241def get_filesystem_id(path):
242 status, result = oe.utils.getstatusoutput("stat -f -c '%s' %s" % ("%t", path))
243 if status == 0:
244 return result
245 else:
246 bb.warn("Can't get the filesystem id of: %s" % path)
247 return None
248
249# Check that the path isn't located on nfs.
250def check_not_nfs(path, name):
251 # The nfs' filesystem id is 6969
252 if get_filesystem_id(path) == "6969":
253 return "The %s: %s can't be located on nfs.\n" % (name, path)
254 return ""
255
256def check_connectivity(d):
257 # URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable
258 # using the same syntax as for SRC_URI. If the variable is not set
259 # the check is skipped
260 test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS', True) or "").split()
261 retval = ""
262
263 # Only check connectivity if network enabled and the
264 # CONNECTIVITY_CHECK_URIS are set
265 network_enabled = not d.getVar('BB_NO_NETWORK', True)
266 check_enabled = len(test_uris)
267 # Take a copy of the data store and unset MIRRORS and PREMIRRORS
268 data = bb.data.createCopy(d)
269 data.delVar('PREMIRRORS')
270 data.delVar('MIRRORS')
271 if check_enabled and network_enabled:
272 try:
273 fetcher = bb.fetch2.Fetch(test_uris, data)
274 fetcher.checkstatus()
275 except Exception:
276 # Allow the message to be configured so that users can be
277 # pointed to a support mechanism.
278 msg = data.getVar('CONNECTIVITY_CHECK_MSG', True) or ""
279 if len(msg) == 0:
280 msg = "Failed to fetch test data from the network. Please ensure your network is configured correctly.\n"
281 retval = msg
282
283 return retval
284
285def check_supported_distro(sanity_data):
286 from fnmatch import fnmatch
287
288 tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS', True)
289 if not tested_distros:
290 return
291
292 try:
293 distro = oe.lsb.distro_identifier()
294 except Exception:
295 distro = None
296
297 if not distro:
298 bb.warn('Host distribution could not be determined; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.')
299
300 for supported in [x.strip() for x in tested_distros.split('\\n')]:
301 if fnmatch(distro, supported):
302 return
303
304 bb.warn('Host distribution "%s" has not been validated with this version of the build system; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.' % distro)
305
306# Checks we should only make if MACHINE is set correctly
307def check_sanity_validmachine(sanity_data):
308 messages = ""
309
310 # Check TUNE_ARCH is set
311 if sanity_data.getVar('TUNE_ARCH', True) == 'INVALID':
312 messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n'
313
314 # Check TARGET_OS is set
315 if sanity_data.getVar('TARGET_OS', True) == 'INVALID':
316 messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
317
318 # Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
319 pkgarchs = sanity_data.getVar('PACKAGE_ARCHS', True)
320 tunepkg = sanity_data.getVar('TUNE_PKGARCH', True)
321 tunefound = False
322 seen = {}
323 dups = []
324
325 for pa in pkgarchs.split():
326 if seen.get(pa, 0) == 1:
327 dups.append(pa)
328 else:
329 seen[pa] = 1
330 if pa == tunepkg:
331 tunefound = True
332
333 if len(dups):
334 messages = messages + "Error, the PACKAGE_ARCHS variable contains duplicates. The following archs are listed more than once: %s" % " ".join(dups)
335
336 if tunefound == False:
337 messages = messages + "Error, the PACKAGE_ARCHS variable does not contain TUNE_PKGARCH (%s)." % tunepkg
338
339 return messages
340
341# Checks if necessary to add option march to host gcc
342def check_gcc_march(sanity_data):
343 result = True
344 message = ""
345
346 # Check if -march not in BUILD_CFLAGS
347 if sanity_data.getVar("BUILD_CFLAGS",True).find("-march") < 0:
348 result = False
349
350 # Construct a test file
351 f = open("gcc_test.c", "w")
352 f.write("int main (){ volatile int atomic = 2; __sync_bool_compare_and_swap (&atomic, 2, 3); return 0; }\n")
353 f.close()
354
355 # Check if GCC could work without march
356 if not result:
357 status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc gcc_test.c -o gcc_test")
358 if status == 0:
359 result = True;
360
361 if not result:
362 status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc -march=native gcc_test.c -o gcc_test")
363 if status == 0:
364 message = "BUILD_CFLAGS_append = \" -march=native\""
365 result = True;
366
367 if not result:
368 build_arch = sanity_data.getVar('BUILD_ARCH', True)
369 status,res = oe.utils.getstatusoutput("${BUILD_PREFIX}gcc -march=%s gcc_test.c -o gcc_test" % build_arch)
370 if status == 0:
371 message = "BUILD_CFLAGS_append = \" -march=%s\"" % build_arch
372 result = True;
373
374 os.remove("gcc_test.c")
375 if os.path.exists("gcc_test"):
376 os.remove("gcc_test")
377
378 return (result, message)
379
380# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612.
381# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate.
382def check_make_version(sanity_data):
383 from distutils.version import LooseVersion
384 status, result = oe.utils.getstatusoutput("make --version")
385 if status != 0:
386 return "Unable to execute make --version, exit code %s\n" % status
387 version = result.split()[2]
388 if LooseVersion(version) == LooseVersion("3.82"):
389 # Construct a test file
390 f = open("makefile_test", "w")
391 f.write("makefile_test.a: makefile_test_a.c makefile_test_b.c makefile_test.a( makefile_test_a.c makefile_test_b.c)\n")
392 f.write("\n")
393 f.write("makefile_test_a.c:\n")
394 f.write(" touch $@\n")
395 f.write("\n")
396 f.write("makefile_test_b.c:\n")
397 f.write(" touch $@\n")
398 f.close()
399
400 # Check if make 3.82 has been patched
401 status,result = oe.utils.getstatusoutput("make -f makefile_test")
402
403 os.remove("makefile_test")
404 if os.path.exists("makefile_test_a.c"):
405 os.remove("makefile_test_a.c")
406 if os.path.exists("makefile_test_b.c"):
407 os.remove("makefile_test_b.c")
408 if os.path.exists("makefile_test.a"):
409 os.remove("makefile_test.a")
410
411 if status != 0:
412 return "Your version of make 3.82 is broken. Please revert to 3.81 or install a patched version.\n"
413 return None
414
415
416# Tar version 1.24 and onwards handle overwriting symlinks correctly
417# but earlier versions do not; this needs to work properly for sstate
418def check_tar_version(sanity_data):
419 from distutils.version import LooseVersion
420 status, result = oe.utils.getstatusoutput("tar --version")
421 if status != 0:
422 return "Unable to execute tar --version, exit code %s\n" % status
423 version = result.split()[3]
424 if LooseVersion(version) < LooseVersion("1.24"):
425 return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar.\n"
426 return None
427
428# We use git parameters and functionality only found in 1.7.8 or later
429def check_git_version(sanity_data):
430 from distutils.version import LooseVersion
431 status, result = oe.utils.getstatusoutput("git --version 2> /dev/null")
432 if status != 0:
433 return "Unable to execute git --version, exit code %s\n" % status
434 version = result.split()[2]
435 if LooseVersion(version) < LooseVersion("1.7.8"):
436 return "Your version of git is older than 1.7.8 and has bugs which will break builds. Please install a newer version of git.\n"
437 return None
438
439# Check the required perl modules which may not be installed by default
440def check_perl_modules(sanity_data):
441 ret = ""
442 modules = ( "Text::ParseWords", "Thread::Queue", "Data::Dumper" )
443 for m in modules:
444 status, result = oe.utils.getstatusoutput("perl -e 'use %s' 2> /dev/null" % m)
445 if status != 0:
446 ret += "%s " % m
447 if ret:
448 return "Required perl module(s) not found: %s\n" % ret
449 return None
450
451def sanity_check_conffiles(status, d):
452 # Check we are using a valid local.conf
453 current_conf = d.getVar('CONF_VERSION', True)
454 conf_version = d.getVar('LOCALCONF_VERSION', True)
455
456 if current_conf != conf_version:
457 status.addresult("Your version of local.conf was generated from an older/newer version of local.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/local.conf ${COREBASE}/meta*/conf/local.conf.sample\" is a good way to visualise the changes.\n")
458
459 # Check bblayers.conf is valid
460 current_lconf = d.getVar('LCONF_VERSION', True)
461 lconf_version = d.getVar('LAYER_CONF_VERSION', True)
462 if current_lconf != lconf_version:
463 funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS', True).split()
464 for func in funcs:
465 success = True
466 try:
467 bb.build.exec_func(func, d)
468 except Exception:
469 success = False
470 if success:
471 bb.note("Your conf/bblayers.conf has been automatically updated.")
472 status.reparse = True
473 break
474 if not status.reparse:
475 status.addresult("Your version of bblayers.conf has the wrong LCONF_VERSION (has %s, expecting %s).\nPlease compare the your file against bblayers.conf.sample and merge any changes before continuing.\n\"meld conf/bblayers.conf ${COREBASE}/meta*/conf/bblayers.conf.sample\" is a good way to visualise the changes.\n" % (current_lconf, lconf_version))
476
477 # If we have a site.conf, check it's valid
478 if check_conf_exists("conf/site.conf", d):
479 current_sconf = d.getVar('SCONF_VERSION', True)
480 sconf_version = d.getVar('SITE_CONF_VERSION', True)
481 if current_sconf != sconf_version:
482 status.addresult("Your version of site.conf was generated from an older version of site.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/site.conf ${COREBASE}/meta*/conf/site.conf.sample\" is a good way to visualise the changes.\n")
483
484
485def sanity_handle_abichanges(status, d):
486 #
487 # Check the 'ABI' of TMPDIR
488 #
489 current_abi = d.getVar('OELAYOUT_ABI', True)
490 abifile = d.getVar('SANITY_ABIFILE', True)
491 if os.path.exists(abifile):
492 with open(abifile, "r") as f:
493 abi = f.read().strip()
494 if not abi.isdigit():
495 with open(abifile, "w") as f:
496 f.write(current_abi)
497 elif abi == "2" and current_abi == "3":
498 bb.note("Converting staging from layout version 2 to layout version 3")
499 subprocess.call(d.expand("mv ${TMPDIR}/staging ${TMPDIR}/sysroots"), shell=True)
500 subprocess.call(d.expand("ln -s sysroots ${TMPDIR}/staging"), shell=True)
501 subprocess.call(d.expand("cd ${TMPDIR}/stamps; for i in */*do_populate_staging; do new=`echo $i | sed -e 's/do_populate_staging/do_populate_sysroot/'`; mv $i $new; done"), shell=True)
502 with open(abifile, "w") as f:
503 f.write(current_abi)
504 elif abi == "3" and current_abi == "4":
505 bb.note("Converting staging layout from version 3 to layout version 4")
506 if os.path.exists(d.expand("${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}")):
507 subprocess.call(d.expand("mv ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS} ${STAGING_BINDIR_CROSS}"), shell=True)
508 subprocess.call(d.expand("ln -s ${STAGING_BINDIR_CROSS} ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}"), shell=True)
509 with open(abifile, "w") as f:
510 f.write(current_abi)
511 elif abi == "4":
512 status.addresult("Staging layout has changed. The cross directory has been deprecated and cross packages are now built under the native sysroot.\nThis requires a rebuild.\n")
513 elif abi == "5" and current_abi == "6":
514 bb.note("Converting staging layout from version 5 to layout version 6")
515 subprocess.call(d.expand("mv ${TMPDIR}/pstagelogs ${SSTATE_MANIFESTS}"), shell=True)
516 with open(abifile, "w") as f:
517 f.write(current_abi)
518 elif abi == "7" and current_abi == "8":
519 status.addresult("Your configuration is using stamp files including the sstate hash but your build directory was built with stamp files that do not include this.\nTo continue, either rebuild or switch back to the OEBasic signature handler with BB_SIGNATURE_HANDLER = 'OEBasic'.\n")
520 elif (abi != current_abi and current_abi == "9"):
521 status.addresult("The layout of the TMPDIR STAMPS directory has changed. Please clean out TMPDIR and rebuild (sstate will be still be valid and reused)\n")
522 elif (abi != current_abi):
523 # Code to convert from one ABI to another could go here if possible.
524 status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
525 else:
526 with open(abifile, "w") as f:
527 f.write(current_abi)
528
529def check_sanity_sstate_dir_change(sstate_dir, data):
530 # Sanity checks to be done when the value of SSTATE_DIR changes
531
532 # Check that SSTATE_DIR isn't on a filesystem with limited filename length (eg. eCryptFS)
533 testmsg = ""
534 if sstate_dir != "":
535 testmsg = check_create_long_filename(sstate_dir, "SSTATE_DIR")
536 # If we don't have permissions to SSTATE_DIR, suggest the user set it as an SSTATE_MIRRORS
537 try:
538 err = testmsg.split(': ')[1].strip()
539 if err == "Permission denied.":
540 testmsg = testmsg + "You could try using %s in SSTATE_MIRRORS rather than as an SSTATE_CACHE.\n" % (sstate_dir)
541 except IndexError:
542 pass
543 return testmsg
544
545def check_sanity_version_change(status, d):
546 # Sanity checks to be done when SANITY_VERSION changes
547 # In other words, these tests run once in a given build directory and then
548 # never again until the sanity version changes.
549
550 # Check the python install is complete. glib-2.0-natives requries
551 # xml.parsers.expat
552 try:
553 import xml.parsers.expat
554 except ImportError:
555 status.addresult('Your python is not a full install. Please install the module xml.parsers.expat (python-xml on openSUSE and SUSE Linux).\n')
556 import stat
557
558 status.addresult(check_make_version(d))
559 status.addresult(check_tar_version(d))
560 status.addresult(check_git_version(d))
561 status.addresult(check_perl_modules(d))
562
563 missing = ""
564
565 if not check_app_exists("${MAKE}", d):
566 missing = missing + "GNU make,"
567
568 if not check_app_exists('${BUILD_PREFIX}gcc', d):
569 missing = missing + "C Compiler (%sgcc)," % d.getVar("BUILD_PREFIX", True)
570
571 if not check_app_exists('${BUILD_PREFIX}g++', d):
572 missing = missing + "C++ Compiler (%sg++)," % d.getVar("BUILD_PREFIX", True)
573
574 required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES', True)
575
576 for util in required_utilities.split():
577 if not check_app_exists(util, d):
578 missing = missing + "%s," % util
579
580 if missing:
581 missing = missing.rstrip(',')
582 status.addresult("Please install the following missing utilities: %s\n" % missing)
583
584 assume_provided = d.getVar('ASSUME_PROVIDED', True).split()
585 # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
586 if "diffstat-native" not in assume_provided:
587 status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
588
589 if "qemu-native" in assume_provided:
590 if not check_app_exists("qemu-arm", d):
591 status.addresult("qemu-native was in ASSUME_PROVIDED but the QEMU binaries (qemu-arm) can't be found in PATH")
592
593 if "libsdl-native" in assume_provided:
594 if not check_app_exists("sdl-config", d):
595 status.addresult("libsdl-native is set to be ASSUME_PROVIDED but sdl-config can't be found in PATH. Please either install it, or configure qemu not to require sdl.")
596
597 (result, message) = check_gcc_march(d)
598 if result and message:
599 status.addresult("Your gcc version is older than 4.5, please add the following param to local.conf\n \
600 %s\n" % message)
601 if not result:
602 status.addresult("Your gcc version is older than 4.5 or is not working properly. Please verify you can build")
603 status.addresult(" and link something that uses atomic operations, such as: \n")
604 status.addresult(" __sync_bool_compare_and_swap (&atomic, 2, 3);\n")
605
606 # Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
607 tmpdir = d.getVar('TMPDIR', True)
608 status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
609 tmpdirmode = os.stat(tmpdir).st_mode
610 if (tmpdirmode & stat.S_ISGID):
611 status.addresult("TMPDIR is setgid, please don't build in a setgid directory")
612 if (tmpdirmode & stat.S_ISUID):
613 status.addresult("TMPDIR is setuid, please don't build in a setuid directory")
614
615 # Some third-party software apparently relies on chmod etc. being suid root (!!)
616 import stat
617 suid_check_bins = "chown chmod mknod".split()
618 for bin_cmd in suid_check_bins:
619 bin_path = bb.utils.which(os.environ["PATH"], bin_cmd)
620 if bin_path:
621 bin_stat = os.stat(bin_path)
622 if bin_stat.st_uid == 0 and bin_stat.st_mode & stat.S_ISUID:
623 status.addresult('%s has the setuid bit set. This interferes with pseudo and may cause other issues that break the build process.\n' % bin_path)
624
625 # Check that we can fetch from various network transports
626 netcheck = check_connectivity(d)
627 status.addresult(netcheck)
628 if netcheck:
629 status.network_error = True
630
631 nolibs = d.getVar('NO32LIBS', True)
632 if not nolibs:
633 lib32path = '/lib'
634 if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ):
635 lib32path = '/lib32'
636
637 if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'):
638 status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n")
639
640 bbpaths = d.getVar('BBPATH', True).split(":")
641 if ("." in bbpaths or "" in bbpaths) and not status.reparse:
642 status.addresult("BBPATH references the current directory, either through " \
643 "an empty entry, or a '.'.\n\t This is unsafe and means your "\
644 "layer configuration is adding empty elements to BBPATH.\n\t "\
645 "Please check your layer.conf files and other BBPATH " \
646 "settings to remove the current working directory " \
647 "references.\n" \
648 "Parsed BBPATH is" + str(bbpaths));
649
650 oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF', True)
651 if not oes_bb_conf:
652 status.addresult('You are not using the OpenEmbedded version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n')
653
654 # The length of TMPDIR can't be longer than 410
655 status.addresult(check_path_length(tmpdir, "TMPDIR", 410))
656
657 # Check that TMPDIR isn't located on nfs
658 status.addresult(check_not_nfs(tmpdir, "TMPDIR"))
659
660def check_sanity_everybuild(status, d):
661 # Sanity tests which test the users environment so need to run at each build (or are so cheap
662 # it makes sense to always run them.
663
664 if 0 == os.getuid():
665 raise_sanity_error("Do not use Bitbake as root.", d)
666
667 # Check the Python version, we now have a minimum of Python 2.7.3
668 import sys
669 if sys.hexversion < 0x020703F0:
670 status.addresult('The system requires at least Python 2.7.3 to run. Please update your Python interpreter.\n')
671
672 # Check the bitbake version meets minimum requirements
673 from distutils.version import LooseVersion
674 minversion = d.getVar('BB_MIN_VERSION', True)
675 if (LooseVersion(bb.__version__) < LooseVersion(minversion)):
676 status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
677
678 sanity_check_conffiles(status, d)
679
680 paths = d.getVar('PATH', True).split(":")
681 if "." in paths or "" in paths:
682 status.addresult("PATH contains '.' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
683
684 # Check that the DISTRO is valid, if set
685 # need to take into account DISTRO renaming DISTRO
686 distro = d.getVar('DISTRO', True)
687 if distro and distro != "nodistro":
688 if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ):
689 status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO", True))
690
691 # Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
692 # set, since so much relies on it being set.
693 dldir = d.getVar('DL_DIR', True)
694 if not dldir:
695 status.addresult("DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n")
696 if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
697 status.addresult("DL_DIR: %s exists but you do not appear to have write access to it. \n" % dldir)
698
699 # Check that the MACHINE is valid, if it is set
700 machinevalid = True
701 if d.getVar('MACHINE', True):
702 if not check_conf_exists("conf/machine/${MACHINE}.conf", d):
703 status.addresult('Please set a valid MACHINE in your local.conf or environment\n')
704 machinevalid = False
705 else:
706 status.addresult(check_sanity_validmachine(d))
707 else:
708 status.addresult('Please set a MACHINE in your local.conf or environment\n')
709 machinevalid = False
710 if machinevalid:
711 status.addresult(check_toolchain(d))
712
713 # Check that the SDKMACHINE is valid, if it is set
714 if d.getVar('SDKMACHINE', True):
715 if not check_conf_exists("conf/machine-sdk/${SDKMACHINE}.conf", d):
716 status.addresult('Specified SDKMACHINE value is not valid\n')
717 elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
718 status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
719
720 check_supported_distro(d)
721
722 # Check if DISPLAY is set if TEST_IMAGE is set
723 if d.getVar('TEST_IMAGE', True) == '1' or d.getVar('DEFAULT_TEST_SUITES', True):
724 testtarget = d.getVar('TEST_TARGET', True)
725 if testtarget == 'qemu' or testtarget == 'QemuTarget':
726 display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True)
727 if not display:
728 status.addresult('testimage needs an X desktop to start qemu, please set DISPLAY correctly (e.g. DISPLAY=:1.0)\n')
729
730 omask = os.umask(022)
731 if omask & 0755:
732 status.addresult("Please use a umask which allows a+rx and u+rwx\n")
733 os.umask(omask)
734
735 if d.getVar('TARGET_ARCH', True) == "arm":
736 # This path is no longer user-readable in modern (very recent) Linux
737 try:
738 if os.path.exists("/proc/sys/vm/mmap_min_addr"):
739 f = open("/proc/sys/vm/mmap_min_addr", "r")
740 try:
741 if (int(f.read().strip()) > 65536):
742 status.addresult("/proc/sys/vm/mmap_min_addr is not <= 65536. This will cause problems with qemu so please fix the value (as root).\n\nTo fix this in later reboots, set vm.mmap_min_addr = 65536 in /etc/sysctl.conf.\n")
743 finally:
744 f.close()
745 except:
746 pass
747
748 oeroot = d.getVar('COREBASE', True)
749 if oeroot.find('+') != -1:
750 status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.")
751 if oeroot.find('@') != -1:
752 status.addresult("Error, you have an invalid character (@) in your COREBASE directory path. Please move the installation to a directory which doesn't include any @ characters.")
753 if oeroot.find(' ') != -1:
754 status.addresult("Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this.")
755
756 # Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS
757 import re
758 mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
759 protocols = ['http', 'ftp', 'file', 'https', \
760 'git', 'gitsm', 'hg', 'osc', 'p4', 'svk', 'svn', \
761 'bzr', 'cvs']
762 for mirror_var in mirror_vars:
763 mirrors = (d.getVar(mirror_var, True) or '').replace('\\n', '\n').split('\n')
764 for mirror_entry in mirrors:
765 mirror_entry = mirror_entry.strip()
766 if not mirror_entry:
767 # ignore blank lines
768 continue
769
770 try:
771 pattern, mirror = mirror_entry.split()
772 except ValueError:
773 bb.warn('Invalid %s: %s, should be 2 members.' % (mirror_var, mirror_entry.strip()))
774 continue
775
776 decoded = bb.fetch2.decodeurl(pattern)
777 try:
778 pattern_scheme = re.compile(decoded[0])
779 except re.error as exc:
780 bb.warn('Invalid scheme regex (%s) in %s; %s' % (pattern, mirror_var, mirror_entry))
781 continue
782
783 if not any(pattern_scheme.match(protocol) for protocol in protocols):
784 bb.warn('Invalid protocol (%s) in %s: %s' % (decoded[0], mirror_var, mirror_entry))
785 continue
786
787 if not any(mirror.startswith(protocol + '://') for protocol in protocols):
788 bb.warn('Invalid protocol in %s: %s' % (mirror_var, mirror_entry))
789 continue
790
791 if mirror.startswith('file://') and not mirror.startswith('file:///'):
792 bb.warn('Invalid file url in %s: %s, must be absolute path (file:///)' % (mirror_var, mirror_entry))
793
794 # Check that TMPDIR hasn't changed location since the last time we were run
795 tmpdir = d.getVar('TMPDIR', True)
796 checkfile = os.path.join(tmpdir, "saved_tmpdir")
797 if os.path.exists(checkfile):
798 with open(checkfile, "r") as f:
799 saved_tmpdir = f.read().strip()
800 if (saved_tmpdir != tmpdir):
801 status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or rebuild\n" % saved_tmpdir)
802 else:
803 bb.utils.mkdirhier(tmpdir)
804 with open(checkfile, "w") as f:
805 f.write(tmpdir)
806
807def check_sanity(sanity_data):
808 import subprocess
809
810 class SanityStatus(object):
811 def __init__(self):
812 self.messages = ""
813 self.network_error = False
814 self.reparse = False
815
816 def addresult(self, message):
817 if message:
818 self.messages = self.messages + message
819
820 status = SanityStatus()
821
822 tmpdir = sanity_data.getVar('TMPDIR', True)
823 sstate_dir = sanity_data.getVar('SSTATE_DIR', True)
824
825 # Check saved sanity info
826 last_sanity_version = 0
827 last_tmpdir = ""
828 last_sstate_dir = ""
829 sanityverfile = sanity_data.expand("${TOPDIR}/conf/sanity_info")
830 if os.path.exists(sanityverfile):
831 with open(sanityverfile, 'r') as f:
832 for line in f:
833 if line.startswith('SANITY_VERSION'):
834 last_sanity_version = int(line.split()[1])
835 if line.startswith('TMPDIR'):
836 last_tmpdir = line.split()[1]
837 if line.startswith('SSTATE_DIR'):
838 last_sstate_dir = line.split()[1]
839
840 check_sanity_everybuild(status, sanity_data)
841
842 sanity_version = int(sanity_data.getVar('SANITY_VERSION', True) or 1)
843 network_error = False
844 if last_sanity_version < sanity_version:
845 check_sanity_version_change(status, sanity_data)
846 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
847 else:
848 if last_sstate_dir != sstate_dir:
849 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
850
851 if os.path.exists(os.path.dirname(sanityverfile)) and not status.messages:
852 with open(sanityverfile, 'w') as f:
853 f.write("SANITY_VERSION %s\n" % sanity_version)
854 f.write("TMPDIR %s\n" % tmpdir)
855 f.write("SSTATE_DIR %s\n" % sstate_dir)
856
857 sanity_handle_abichanges(status, sanity_data)
858
859 if status.messages != "":
860 raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error)
861 return status.reparse
862
863# Create a copy of the datastore and finalise it to ensure appends and
864# overrides are set - the datastore has yet to be finalised at ConfigParsed
865def copy_data(e):
866 sanity_data = bb.data.createCopy(e.data)
867 sanity_data.finalize()
868 return sanity_data
869
870addhandler check_sanity_eventhandler
871check_sanity_eventhandler[eventmask] = "bb.event.SanityCheck bb.event.NetworkTest"
872python check_sanity_eventhandler() {
873 if bb.event.getName(e) == "SanityCheck":
874 sanity_data = copy_data(e)
875 if e.generateevents:
876 sanity_data.setVar("SANITY_USE_EVENTS", "1")
877 reparse = check_sanity(sanity_data)
878 e.data.setVar("BB_INVALIDCONF", reparse)
879 bb.event.fire(bb.event.SanityCheckPassed(), e.data)
880 elif bb.event.getName(e) == "NetworkTest":
881 sanity_data = copy_data(e)
882 if e.generateevents:
883 sanity_data.setVar("SANITY_USE_EVENTS", "1")
884 bb.event.fire(bb.event.NetworkTestFailed() if check_connectivity(sanity_data) else bb.event.NetworkTestPassed(), e.data)
885
886 return
887}
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
new file mode 100644
index 0000000000..fc0f26b17b
--- /dev/null
+++ b/meta/classes/scons.bbclass
@@ -0,0 +1,15 @@
1DEPENDS += "python-scons-native"
2
3EXTRA_OESCONS ?= ""
4
5scons_do_compile() {
6 ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
7 bbfatal "scons build execution failed."
8}
9
10scons_do_install() {
11 ${STAGING_BINDIR_NATIVE}/scons PREFIX=${D}${prefix} prefix=${D}${prefix} install ${EXTRA_OESCONS}|| \
12 bbfatal "scons install execution failed."
13}
14
15EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/sdl.bbclass b/meta/classes/sdl.bbclass
new file mode 100644
index 0000000000..cc31288f61
--- /dev/null
+++ b/meta/classes/sdl.bbclass
@@ -0,0 +1,6 @@
1#
2# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
3#
4
5DEPENDS += "virtual/libsdl libsdl-mixer libsdl-image"
6SECTION = "x11/games"
diff --git a/meta/classes/setuptools.bbclass b/meta/classes/setuptools.bbclass
new file mode 100644
index 0000000000..56343b1c73
--- /dev/null
+++ b/meta/classes/setuptools.bbclass
@@ -0,0 +1,8 @@
1inherit distutils
2
3DEPENDS += "python-distribute-native"
4
5DISTUTILS_INSTALL_ARGS = "--root=${D} \
6 --prefix=${prefix} \
7 --install-lib=${PYTHON_SITEPACKAGES_DIR} \
8 --install-data=${datadir}"
diff --git a/meta/classes/setuptools3.bbclass b/meta/classes/setuptools3.bbclass
new file mode 100644
index 0000000000..40c18c8976
--- /dev/null
+++ b/meta/classes/setuptools3.bbclass
@@ -0,0 +1,8 @@
1inherit distutils3
2
3DEPENDS += "python3-distribute-native"
4
5DISTUTILS_INSTALL_ARGS = "--root=${D} \
6 --prefix=${prefix} \
7 --install-lib=${PYTHON_SITEPACKAGES_DIR} \
8 --install-data=${datadir}"
diff --git a/meta/classes/sip.bbclass b/meta/classes/sip.bbclass
new file mode 100644
index 0000000000..6ed2a13bda
--- /dev/null
+++ b/meta/classes/sip.bbclass
@@ -0,0 +1,61 @@
1# Build Class for Sip based Python Bindings
2# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
3#
4STAGING_SIPDIR ?= "${STAGING_DATADIR_NATIVE}/sip"
5
6DEPENDS =+ "sip-native"
7RDEPENDS_${PN} += "python-sip"
8
9# default stuff, do not uncomment
10# EXTRA_SIPTAGS = "-tWS_X11 -tQt_4_3_0"
11
12# do_generate is before do_configure so ensure that sip_native is populated in sysroot before executing it
13do_generate[depends] += "sip-native:do_populate_sysroot"
14
15sip_do_generate() {
16 if [ -z "${SIP_MODULES}" ]; then
17 MODULES="`ls sip/*mod.sip`"
18 else
19 MODULES="${SIP_MODULES}"
20 fi
21
22 if [ -z "$MODULES" ]; then
23 die "SIP_MODULES not set and no modules found in $PWD"
24 else
25 bbnote "using modules '${SIP_MODULES}' and tags '${EXTRA_SIPTAGS}'"
26 fi
27
28 if [ -z "${EXTRA_SIPTAGS}" ]; then
29 die "EXTRA_SIPTAGS needs to be set!"
30 else
31 SIPTAGS="${EXTRA_SIPTAGS}"
32 fi
33
34 if [ ! -z "${SIP_FEATURES}" ]; then
35 FEATURES="-z ${SIP_FEATURES}"
36 bbnote "sip feature file: ${SIP_FEATURES}"
37 fi
38
39 for module in $MODULES
40 do
41 install -d ${module}/
42 echo "calling 'sip4 -I sip -I ${STAGING_SIPDIR} ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.pro.in sip/${module}/${module}mod.sip'"
43 sip4 -I ${STAGING_SIPDIR} -I sip ${SIPTAGS} ${FEATURES} -c ${module} -b ${module}/${module}.sbf \
44 sip/${module}/${module}mod.sip || die "Error calling sip on ${module}"
45 sed -e 's,target,TARGET,' -e 's,sources,SOURCES,' -e 's,headers,HEADERS,' \
46 ${module}/${module}.sbf | sed s,"moc_HEADERS =","HEADERS +=", \
47 >${module}/${module}.pro
48 echo "TEMPLATE=lib" >>${module}/${module}.pro
49 [ "${module}" = "qt" ] && echo "" >>${module}/${module}.pro
50 [ "${module}" = "qtcanvas" ] && echo "" >>${module}/${module}.pro
51 [ "${module}" = "qttable" ] && echo "" >>${module}/${module}.pro
52 [ "${module}" = "qwt" ] && echo "" >>${module}/${module}.pro
53 [ "${module}" = "qtpe" ] && echo "" >>${module}/${module}.pro
54 [ "${module}" = "qtpe" ] && echo "LIBS+=-lqpe" >>${module}/${module}.pro
55 true
56 done
57}
58
59EXPORT_FUNCTIONS do_generate
60
61addtask generate after do_unpack do_patch before do_configure
diff --git a/meta/classes/siteconfig.bbclass b/meta/classes/siteconfig.bbclass
new file mode 100644
index 0000000000..45dce489de
--- /dev/null
+++ b/meta/classes/siteconfig.bbclass
@@ -0,0 +1,33 @@
1python siteconfig_do_siteconfig () {
2 shared_state = sstate_state_fromvars(d)
3 if shared_state['task'] != 'populate_sysroot':
4 return
5 if not os.path.isdir(os.path.join(d.getVar('FILE_DIRNAME', True), 'site_config')):
6 bb.debug(1, "No site_config directory, skipping do_siteconfig")
7 return
8 bb.build.exec_func('do_siteconfig_gencache', d)
9 sstate_clean(shared_state, d)
10 sstate_install(shared_state, d)
11}
12
13EXTRASITECONFIG ?= ""
14
15siteconfig_do_siteconfig_gencache () {
16 mkdir -p ${WORKDIR}/site_config_${MACHINE}
17 gen-site-config ${FILE_DIRNAME}/site_config \
18 >${WORKDIR}/site_config_${MACHINE}/configure.ac
19 cd ${WORKDIR}/site_config_${MACHINE}
20 autoconf
21 rm -f ${BPN}_cache
22 CONFIG_SITE="" ${EXTRASITECONFIG} ./configure ${CONFIGUREOPTS} --cache-file ${BPN}_cache
23 sed -n -e "/ac_cv_c_bigendian/p" -e "/ac_cv_sizeof_/p" \
24 -e "/ac_cv_type_/p" -e "/ac_cv_header_/p" -e "/ac_cv_func_/p" \
25 < ${BPN}_cache > ${BPN}_config
26 mkdir -p ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
27 cp ${BPN}_config ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
28
29}
30
31do_populate_sysroot[sstate-interceptfuncs] += "do_siteconfig "
32
33EXPORT_FUNCTIONS do_siteconfig do_siteconfig_gencache
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
new file mode 100644
index 0000000000..e90632aeef
--- /dev/null
+++ b/meta/classes/siteinfo.bbclass
@@ -0,0 +1,164 @@
1# This class exists to provide information about the targets that
2# may be needed by other classes and/or recipes. If you add a new
3# target this will probably need to be updated.
4
5#
6# Returns information about 'what' for the named target 'target'
7# where 'target' == "<arch>-<os>"
8#
9# 'what' can be one of
10# * target: Returns the target name ("<arch>-<os>")
11# * endianess: Return "be" for big endian targets, "le" for little endian
12# * bits: Returns the bit size of the target, either "32" or "64"
13# * libc: Returns the name of the c library used by the target
14#
15# It is an error for the target not to exist.
16# If 'what' doesn't exist then an empty value is returned
17#
18def siteinfo_data(d):
19 archinfo = {
20 "allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
21 "aarch64": "endian-little bit-64 arm-common",
22 "aarch64_be": "endian-big bit-64 arm-common",
23 "arm": "endian-little bit-32 arm-common",
24 "armeb": "endian-big bit-32 arm-common",
25 "avr32": "endian-big bit-32 avr32-common",
26 "bfin": "endian-little bit-32 bfin-common",
27 "i386": "endian-little bit-32 ix86-common",
28 "i486": "endian-little bit-32 ix86-common",
29 "i586": "endian-little bit-32 ix86-common",
30 "i686": "endian-little bit-32 ix86-common",
31 "ia64": "endian-little bit-64",
32 "microblaze": "endian-big bit-32 microblaze-common",
33 "microblazeel": "endian-little bit-32 microblaze-common",
34 "mips": "endian-big bit-32 mips-common",
35 "mips64": "endian-big bit-64 mips-common",
36 "mips64el": "endian-little bit-64 mips-common",
37 "mipsel": "endian-little bit-32 mips-common",
38 "powerpc": "endian-big bit-32 powerpc-common",
39 "nios2": "endian-little bit-32 nios2-common",
40 "powerpc64": "endian-big bit-64 powerpc-common",
41 "ppc": "endian-big bit-32 powerpc-common",
42 "ppc64": "endian-big bit-64 powerpc-common",
43 "sh3": "endian-little bit-32 sh-common",
44 "sh4": "endian-little bit-32 sh-common",
45 "sparc": "endian-big bit-32",
46 "viac3": "endian-little bit-32 ix86-common",
47 "x86_64": "endian-little", # bitinfo specified in targetinfo
48 }
49 osinfo = {
50 "darwin": "common-darwin",
51 "darwin9": "common-darwin",
52 "linux": "common-linux common-glibc",
53 "linux-gnu": "common-linux common-glibc",
54 "linux-gnux32": "common-linux common-glibc",
55 "linux-gnun32": "common-linux common-glibc",
56 "linux-gnueabi": "common-linux common-glibc",
57 "linux-gnuspe": "common-linux common-glibc",
58 "linux-uclibc": "common-linux common-uclibc",
59 "linux-uclibceabi": "common-linux common-uclibc",
60 "linux-uclibcspe": "common-linux common-uclibc",
61 "linux-musl": "common-linux common-musl",
62 "linux-musleabi": "common-linux common-musl",
63 "linux-muslspe": "common-linux common-musl",
64 "uclinux-uclibc": "common-uclibc",
65 "cygwin": "common-cygwin",
66 "mingw32": "common-mingw",
67 }
68 targetinfo = {
69 "aarch64-linux-gnu": "aarch64-linux",
70 "aarch64_be-linux-gnu": "aarch64_be-linux",
71 "arm-linux-gnueabi": "arm-linux",
72 "arm-linux-musleabi": "arm-linux",
73 "arm-linux-uclibceabi": "arm-linux-uclibc",
74 "armeb-linux-gnueabi": "armeb-linux",
75 "armeb-linux-uclibceabi": "armeb-linux-uclibc",
76 "armeb-linux-musleabi": "armeb-linux",
77 "mips-linux-musl": "mips-linux",
78 "mipsel-linux-musl": "mipsel-linux",
79 "mips64-linux-musl": "mips-linux",
80 "mips64el-linux-musl": "mipsel-linux",
81 "mips64-linux-gnun32": "mips-linux bit-32",
82 "mips64el-linux-gnun32": "mipsel-linux bit-32",
83 "powerpc-linux": "powerpc32-linux",
84 "powerpc-linux-musl": "powerpc-linux powerpc32-linux",
85 "powerpc-linux-uclibc": "powerpc-linux powerpc32-linux",
86 "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux",
87 "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux",
88 "powerpc-linux-uclibcspe": "powerpc-linux powerpc32-linux powerpc-linux-uclibc",
89 "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
90 "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux",
91 "powerpc64-linux": "powerpc-linux",
92 "x86_64-cygwin": "bit-64",
93 "x86_64-darwin": "bit-64",
94 "x86_64-darwin9": "bit-64",
95 "x86_64-linux": "bit-64",
96 "x86_64-linux-musl": "x86_64-linux bit-64",
97 "x86_64-linux-uclibc": "bit-64",
98 "x86_64-linux-gnu": "bit-64 x86_64-linux",
99 "x86_64-linux-gnux32": "bit-32 ix86-common x32-linux",
100 "x86_64-mingw32": "bit-64",
101 }
102
103 hostarch = d.getVar("HOST_ARCH", True)
104 hostos = d.getVar("HOST_OS", True)
105 target = "%s-%s" % (hostarch, hostos)
106
107 sitedata = []
108 if hostarch in archinfo:
109 sitedata.extend(archinfo[hostarch].split())
110 if hostos in osinfo:
111 sitedata.extend(osinfo[hostos].split())
112 if target in targetinfo:
113 sitedata.extend(targetinfo[target].split())
114 sitedata.append(target)
115 sitedata.append("common")
116
117 bb.debug(1, "SITE files %s" % sitedata);
118 return sitedata
119
120python () {
121 sitedata = set(siteinfo_data(d))
122 if "endian-little" in sitedata:
123 d.setVar("SITEINFO_ENDIANNESS", "le")
124 elif "endian-big" in sitedata:
125 d.setVar("SITEINFO_ENDIANNESS", "be")
126 else:
127 bb.error("Unable to determine endianness for architecture '%s'" %
128 d.getVar("HOST_ARCH", True))
129 bb.fatal("Please add your architecture to siteinfo.bbclass")
130
131 if "bit-32" in sitedata:
132 d.setVar("SITEINFO_BITS", "32")
133 elif "bit-64" in sitedata:
134 d.setVar("SITEINFO_BITS", "64")
135 else:
136 bb.error("Unable to determine bit size for architecture '%s'" %
137 d.getVar("HOST_ARCH", True))
138 bb.fatal("Please add your architecture to siteinfo.bbclass")
139}
140
141def siteinfo_get_files(d, no_cache = False):
142 sitedata = siteinfo_data(d)
143 sitefiles = ""
144 for path in d.getVar("BBPATH", True).split(":"):
145 for element in sitedata:
146 filename = os.path.join(path, "site", element)
147 if os.path.exists(filename):
148 sitefiles += filename + " "
149
150 if no_cache: return sitefiles
151
152 # Now check for siteconfig cache files
153 path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE', True)
154 if os.path.isdir(path_siteconfig):
155 for i in os.listdir(path_siteconfig):
156 filename = os.path.join(path_siteconfig, i)
157 sitefiles += filename + " "
158
159 return sitefiles
160
161#
162# Make some information available via variables
163#
164SITECONFIG_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
diff --git a/meta/classes/spdx.bbclass b/meta/classes/spdx.bbclass
new file mode 100644
index 0000000000..bccc230d8c
--- /dev/null
+++ b/meta/classes/spdx.bbclass
@@ -0,0 +1,325 @@
1# This class integrates real-time license scanning, generation of SPDX standard
2# output and verifiying license info during the building process.
3# It is a combination of efforts from the OE-Core, SPDX and Fossology projects.
4#
5# For more information on FOSSology:
6# http://www.fossology.org
7#
8# For more information on FOSSologySPDX commandline:
9# https://github.com/spdx-tools/fossology-spdx/wiki/Fossology-SPDX-Web-API
10#
11# For more information on SPDX:
12# http://www.spdx.org
13#
14
15# SPDX file will be output to the path which is defined as[SPDX_MANIFEST_DIR]
16# in ./meta/conf/licenses.conf.
17
18SPDXOUTPUTDIR = "${WORKDIR}/spdx_output_dir"
19SPDXSSTATEDIR = "${WORKDIR}/spdx_sstate_dir"
20
21# If ${S} isn't actually the top-level source directory, set SPDX_S to point at
22# the real top-level directory.
23SPDX_S ?= "${S}"
24
25python do_spdx () {
26 import os, sys
27 import json
28
29 info = {}
30 info['workdir'] = (d.getVar('WORKDIR', True) or "")
31 info['sourcedir'] = (d.getVar('SPDX_S', True) or "")
32 info['pn'] = (d.getVar( 'PN', True ) or "")
33 info['pv'] = (d.getVar( 'PV', True ) or "")
34 info['src_uri'] = (d.getVar( 'SRC_URI', True ) or "")
35 info['spdx_version'] = (d.getVar('SPDX_VERSION', True) or '')
36 info['data_license'] = (d.getVar('DATA_LICENSE', True) or '')
37
38 spdx_sstate_dir = (d.getVar('SPDXSSTATEDIR', True) or "")
39 manifest_dir = (d.getVar('SPDX_MANIFEST_DIR', True) or "")
40 info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" )
41 sstatefile = os.path.join(spdx_sstate_dir,
42 info['pn'] + info['pv'] + ".spdx" )
43 info['spdx_temp_dir'] = (d.getVar('SPDX_TEMP_DIR', True) or "")
44 info['tar_file'] = os.path.join( info['workdir'], info['pn'] + ".tar.gz" )
45
46
47 ## get everything from cache. use it to decide if
48 ## something needs to be rerun
49 cur_ver_code = get_ver_code( info['sourcedir'] )
50 cache_cur = False
51 if not os.path.exists( spdx_sstate_dir ):
52 bb.utils.mkdirhier( spdx_sstate_dir )
53 if not os.path.exists( info['spdx_temp_dir'] ):
54 bb.utils.mkdirhier( info['spdx_temp_dir'] )
55 if os.path.exists( sstatefile ):
56 ## cache for this package exists. read it in
57 cached_spdx = get_cached_spdx( sstatefile )
58
59 if cached_spdx['PackageVerificationCode'] == cur_ver_code:
60 bb.warn(info['pn'] + "'s ver code same as cache's. do nothing")
61 cache_cur = True
62 else:
63 local_file_info = setup_foss_scan( info,
64 True, cached_spdx['Files'] )
65 else:
66 local_file_info = setup_foss_scan( info, False, None )
67
68 if cache_cur:
69 spdx_file_info = cached_spdx['Files']
70 else:
71 ## setup fossology command
72 foss_server = (d.getVar('FOSS_SERVER', True) or "")
73 foss_flags = (d.getVar('FOSS_WGET_FLAGS', True) or "")
74 foss_command = "wget %s --post-file=%s %s"\
75 % (foss_flags,info['tar_file'],foss_server)
76
77 #bb.warn(info['pn'] + json.dumps(local_file_info))
78 foss_file_info = run_fossology( foss_command )
79 spdx_file_info = create_spdx_doc( local_file_info, foss_file_info )
80 ## write to cache
81 write_cached_spdx(sstatefile,cur_ver_code,spdx_file_info)
82
83 ## Get document and package level information
84 spdx_header_info = get_header_info(info, cur_ver_code, spdx_file_info)
85
86 ## CREATE MANIFEST
87 create_manifest(info,spdx_header_info,spdx_file_info)
88
89 ## clean up the temp stuff
90 remove_dir_tree( info['spdx_temp_dir'] )
91 if os.path.exists(info['tar_file']):
92 remove_file( info['tar_file'] )
93}
94addtask spdx after do_patch before do_configure
95
96def create_manifest(info,header,files):
97 with open(info['outfile'], 'w') as f:
98 f.write(header + '\n')
99 for chksum, block in files.iteritems():
100 for key, value in block.iteritems():
101 f.write(key + ": " + value)
102 f.write('\n')
103 f.write('\n')
104
105def get_cached_spdx( sstatefile ):
106 import json
107 cached_spdx_info = {}
108 with open( sstatefile, 'r' ) as f:
109 try:
110 cached_spdx_info = json.load(f)
111 except ValueError as e:
112 cached_spdx_info = None
113 return cached_spdx_info
114
115def write_cached_spdx( sstatefile, ver_code, files ):
116 import json
117 spdx_doc = {}
118 spdx_doc['PackageVerificationCode'] = ver_code
119 spdx_doc['Files'] = {}
120 spdx_doc['Files'] = files
121 with open( sstatefile, 'w' ) as f:
122 f.write(json.dumps(spdx_doc))
123
124def setup_foss_scan( info, cache, cached_files ):
125 import errno, shutil
126 import tarfile
127 file_info = {}
128 cache_dict = {}
129
130 for f_dir, f in list_files( info['sourcedir'] ):
131 full_path = os.path.join( f_dir, f )
132 abs_path = os.path.join(info['sourcedir'], full_path)
133 dest_dir = os.path.join( info['spdx_temp_dir'], f_dir )
134 dest_path = os.path.join( info['spdx_temp_dir'], full_path )
135 try:
136 stats = os.stat(abs_path)
137 except OSError as e:
138 bb.warn( "Stat failed" + str(e) + "\n")
139 continue
140
141 checksum = hash_file( abs_path )
142 mtime = time.asctime(time.localtime(stats.st_mtime))
143
144 ## retain cache information if it exists
145 file_info[checksum] = {}
146 if cache and checksum in cached_files:
147 file_info[checksum] = cached_files[checksum]
148 else:
149 file_info[checksum]['FileName'] = full_path
150
151 try:
152 os.makedirs( dest_dir )
153 except OSError as e:
154 if e.errno == errno.EEXIST and os.path.isdir(dest_dir):
155 pass
156 else:
157 bb.warn( "mkdir failed " + str(e) + "\n" )
158 continue
159
160 if(cache and checksum not in cached_files) or not cache:
161 try:
162 shutil.copyfile( abs_path, dest_path )
163 except shutil.Error as e:
164 bb.warn( str(e) + "\n" )
165 except IOError as e:
166 bb.warn( str(e) + "\n" )
167
168 with tarfile.open( info['tar_file'], "w:gz" ) as tar:
169 tar.add( info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir']) )
170 tar.close()
171
172 return file_info
173
174
175def remove_dir_tree( dir_name ):
176 import shutil
177 try:
178 shutil.rmtree( dir_name )
179 except:
180 pass
181
182def remove_file( file_name ):
183 try:
184 os.remove( file_name )
185 except OSError as e:
186 pass
187
188def list_files( dir ):
189 for root, subFolders, files in os.walk( dir ):
190 for f in files:
191 rel_root = os.path.relpath( root, dir )
192 yield rel_root, f
193 return
194
195def hash_file( file_name ):
196 try:
197 f = open( file_name, 'rb' )
198 data_string = f.read()
199 except:
200 return None
201 finally:
202 f.close()
203 sha1 = hash_string( data_string )
204 return sha1
205
206def hash_string( data ):
207 import hashlib
208 sha1 = hashlib.sha1()
209 sha1.update( data )
210 return sha1.hexdigest()
211
212def run_fossology( foss_command ):
213 import string, re
214 import subprocess
215
216 p = subprocess.Popen(foss_command.split(),
217 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
218 foss_output, foss_error = p.communicate()
219
220 records = []
221 records = re.findall('FileName:.*?</text>', foss_output, re.S)
222
223 file_info = {}
224 for rec in records:
225 rec = string.replace( rec, '\r', '' )
226 chksum = re.findall( 'FileChecksum: SHA1: (.*)\n', rec)[0]
227 file_info[chksum] = {}
228 file_info[chksum]['FileCopyrightText'] = re.findall( 'FileCopyrightText: '
229 + '(.*?</text>)', rec, re.S )[0]
230 fields = ['FileType','LicenseConcluded',
231 'LicenseInfoInFile','FileName']
232 for field in fields:
233 file_info[chksum][field] = re.findall(field + ': (.*)', rec)[0]
234
235 return file_info
236
237def create_spdx_doc( file_info, scanned_files ):
238 import json
239 ## push foss changes back into cache
240 for chksum, lic_info in scanned_files.iteritems():
241 if chksum in file_info:
242 file_info[chksum]['FileName'] = file_info[chksum]['FileName']
243 file_info[chksum]['FileType'] = lic_info['FileType']
244 file_info[chksum]['FileChecksum: SHA1'] = chksum
245 file_info[chksum]['LicenseInfoInFile'] = lic_info['LicenseInfoInFile']
246 file_info[chksum]['LicenseConcluded'] = lic_info['LicenseConcluded']
247 file_info[chksum]['FileCopyrightText'] = lic_info['FileCopyrightText']
248 else:
249 bb.warn(lic_info['FileName'] + " : " + chksum
250 + " : is not in the local file info: "
251 + json.dumps(lic_info,indent=1))
252 return file_info
253
254def get_ver_code( dirname ):
255 chksums = []
256 for f_dir, f in list_files( dirname ):
257 try:
258 stats = os.stat(os.path.join(dirname,f_dir,f))
259 except OSError as e:
260 bb.warn( "Stat failed" + str(e) + "\n")
261 continue
262 chksums.append(hash_file(os.path.join(dirname,f_dir,f)))
263 ver_code_string = ''.join( chksums ).lower()
264 ver_code = hash_string( ver_code_string )
265 return ver_code
266
267def get_header_info( info, spdx_verification_code, spdx_files ):
268 """
269 Put together the header SPDX information.
270 Eventually this needs to become a lot less
271 of a hardcoded thing.
272 """
273 from datetime import datetime
274 import os
275 head = []
276 DEFAULT = "NOASSERTION"
277
278 #spdx_verification_code = get_ver_code( info['sourcedir'] )
279 package_checksum = ''
280 if os.path.exists(info['tar_file']):
281 package_checksum = hash_file( info['tar_file'] )
282 else:
283 package_checksum = DEFAULT
284
285 ## document level information
286 head.append("SPDXVersion: " + info['spdx_version'])
287 head.append("DataLicense: " + info['data_license'])
288 head.append("DocumentComment: <text>SPDX for "
289 + info['pn'] + " version " + info['pv'] + "</text>")
290 head.append("")
291
292 ## Creator information
293 now = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
294 head.append("## Creation Information")
295 head.append("Creator: fossology-spdx")
296 head.append("Created: " + now)
297 head.append("CreatorComment: <text>UNO</text>")
298 head.append("")
299
300 ## package level information
301 head.append("## Package Information")
302 head.append("PackageName: " + info['pn'])
303 head.append("PackageVersion: " + info['pv'])
304 head.append("PackageDownloadLocation: " + DEFAULT)
305 head.append("PackageSummary: <text></text>")
306 head.append("PackageFileName: " + os.path.basename(info['tar_file']))
307 head.append("PackageSupplier: Person:" + DEFAULT)
308 head.append("PackageOriginator: Person:" + DEFAULT)
309 head.append("PackageChecksum: SHA1: " + package_checksum)
310 head.append("PackageVerificationCode: " + spdx_verification_code)
311 head.append("PackageDescription: <text>" + info['pn']
312 + " version " + info['pv'] + "</text>")
313 head.append("")
314 head.append("PackageCopyrightText: <text>" + DEFAULT + "</text>")
315 head.append("")
316 head.append("PackageLicenseDeclared: " + DEFAULT)
317 head.append("PackageLicenseConcluded: " + DEFAULT)
318 head.append("PackageLicenseInfoFromFiles: " + DEFAULT)
319 head.append("")
320
321 ## header for file level
322 head.append("## File Information")
323 head.append("")
324
325 return '\n'.join(head)
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
new file mode 100644
index 0000000000..ace6bdb57a
--- /dev/null
+++ b/meta/classes/sstate.bbclass
@@ -0,0 +1,837 @@
1SSTATE_VERSION = "3"
2
3SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
4SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
5
6def generate_sstatefn(spec, hash, d):
7 if not hash:
8 hash = "INVALID"
9 return hash[:2] + "/" + spec + hash
10
11SSTATE_PKGARCH = "${PACKAGE_ARCH}"
12SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
13SSTATE_SWSPEC = "sstate:${BPN}::${PV}:${PR}::${SSTATE_VERSION}:"
14SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC', True), d.getVar('BB_TASKHASH', True), d)}"
15SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
16SSTATE_EXTRAPATH = ""
17SSTATE_EXTRAPATHWILDCARD = ""
18SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC}"
19
20# We don't want the sstate to depend on things like the distro string
21# of the system, we let the sstate paths take care of this.
22SSTATE_EXTRAPATH[vardepvalue] = ""
23
24# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
25SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR_RPM}/all/"
26# Avoid docbook/sgml catalog warnings for now
27SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
28
29SSTATE_SCAN_FILES ?= "*.la *-config *_config"
30SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
31
32BB_HASHFILENAME = "${SSTATE_EXTRAPATH} ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
33
34SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
35
36SSTATECREATEFUNCS = "sstate_hardcode_path"
37SSTATEPOSTCREATEFUNCS = ""
38SSTATEPREINSTFUNCS = ""
39SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
40SSTATEPOSTINSTFUNCS = ""
41EXTRA_STAGING_FIXMES ?= ""
42
43SIGGEN_LOCKEDSIGS_CHECK_LEVEL ?= 'error'
44
45# Specify dirs in which the shell function is executed and don't use ${B}
46# as default dirs to avoid possible race about ${B} with other task.
47sstate_create_package[dirs] = "${SSTATE_BUILDDIR}"
48sstate_unpack_package[dirs] = "${SSTATE_INSTDIR}"
49
50# Do not run sstate_hardcode_path() in ${B}:
51# the ${B} maybe removed by cmake_do_configure() while
52# sstate_hardcode_path() running.
53sstate_hardcode_path[dirs] = "${SSTATE_BUILDDIR}"
54
55python () {
56 if bb.data.inherits_class('native', d):
57 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH'))
58 elif bb.data.inherits_class('crosssdk', d):
59 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
60 elif bb.data.inherits_class('cross', d):
61 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}"))
62 elif bb.data.inherits_class('nativesdk', d):
63 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
64 elif bb.data.inherits_class('cross-canadian', d):
65 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
66 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH", True) == "all":
67 d.setVar('SSTATE_PKGARCH', "allarch")
68 else:
69 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
70
71 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
72 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
73 d.setVar('SSTATE_EXTRAPATHWILDCARD', "*/")
74
75 # These classes encode staging paths into their scripts data so can only be
76 # reused if we manipulate the paths
77 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d):
78 scan_cmd = "grep -Irl ${STAGING_DIR} ${SSTATE_BUILDDIR}"
79 d.setVar('SSTATE_SCAN_CMD', scan_cmd)
80
81 unique_tasks = set((d.getVar('SSTATETASKS', True) or "").split())
82 d.setVar('SSTATETASKS', " ".join(unique_tasks))
83 for task in unique_tasks:
84 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
85 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
86}
87
88def sstate_init(task, d):
89 ss = {}
90 ss['task'] = task
91 ss['dirs'] = []
92 ss['plaindirs'] = []
93 ss['lockfiles'] = []
94 ss['lockfiles-shared'] = []
95 return ss
96
97def sstate_state_fromvars(d, task = None):
98 if task is None:
99 task = d.getVar('BB_CURRENTTASK', True)
100 if not task:
101 bb.fatal("sstate code running without task context?!")
102 task = task.replace("_setscene", "")
103
104 if task.startswith("do_"):
105 task = task[3:]
106 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs', True) or "").split()
107 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs', True) or "").split()
108 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs', True) or "").split()
109 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile', True) or "").split()
110 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared', True) or "").split()
111 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs', True) or "").split()
112 if not task or len(inputs) != len(outputs):
113 bb.fatal("sstate variables not setup correctly?!")
114
115 if task == "populate_lic":
116 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
117 d.setVar("SSTATE_EXTRAPATH", "")
118
119 ss = sstate_init(task, d)
120 for i in range(len(inputs)):
121 sstate_add(ss, inputs[i], outputs[i], d)
122 ss['lockfiles'] = lockfiles
123 ss['lockfiles-shared'] = lockfilesshared
124 ss['plaindirs'] = plaindirs
125 ss['interceptfuncs'] = interceptfuncs
126 return ss
127
128def sstate_add(ss, source, dest, d):
129 if not source.endswith("/"):
130 source = source + "/"
131 if not dest.endswith("/"):
132 dest = dest + "/"
133 source = os.path.normpath(source)
134 dest = os.path.normpath(dest)
135 srcbase = os.path.basename(source)
136 ss['dirs'].append([srcbase, source, dest])
137 return ss
138
139def sstate_install(ss, d):
140 import oe.path
141 import subprocess
142
143 sharedfiles = []
144 shareddirs = []
145 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
146
147 d2 = d.createCopy()
148 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True)
149 if extrainf:
150 d2.setVar("SSTATE_MANMACH", extrainf)
151 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
152
153 if os.access(manifest, os.R_OK):
154 bb.fatal("Package already staged (%s)?!" % manifest)
155
156 locks = []
157 for lock in ss['lockfiles-shared']:
158 locks.append(bb.utils.lockfile(lock, True))
159 for lock in ss['lockfiles']:
160 locks.append(bb.utils.lockfile(lock))
161
162 for state in ss['dirs']:
163 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
164 for walkroot, dirs, files in os.walk(state[1]):
165 for file in files:
166 srcpath = os.path.join(walkroot, file)
167 dstpath = srcpath.replace(state[1], state[2])
168 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
169 sharedfiles.append(dstpath)
170 for dir in dirs:
171 srcdir = os.path.join(walkroot, dir)
172 dstdir = srcdir.replace(state[1], state[2])
173 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
174 if not dstdir.endswith("/"):
175 dstdir = dstdir + "/"
176 shareddirs.append(dstdir)
177
178 # Check the file list for conflicts against files which already exist
179 whitelist = (d.getVar("SSTATE_DUPWHITELIST", True) or "").split()
180 match = []
181 for f in sharedfiles:
182 if os.path.exists(f):
183 f = os.path.normpath(f)
184 realmatch = True
185 for w in whitelist:
186 if f.startswith(w):
187 realmatch = False
188 break
189 if realmatch:
190 match.append(f)
191 sstate_search_cmd = "grep -rl '%s' %s --exclude=master.list | sed -e 's:^.*/::' -e 's:\.populate-sysroot::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
192 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
193 if search_output != "":
194 match.append("Matched in %s" % search_output.rstrip())
195 if match:
196 bb.error("The recipe %s is trying to install files into a shared " \
197 "area when those files already exist. Those files and their manifest " \
198 "location are:\n %s\nPlease verify which recipe should provide the " \
199 "above files.\nThe build has stopped as continuing in this scenario WILL " \
200 "break things, if not now, possibly in the future (we've seen builds fail " \
201 "several months later). If the system knew how to recover from this " \
202 "automatically it would however there are several different scenarios " \
203 "which can result in this and we don't know which one this is. It may be " \
204 "you have switched providers of something like virtual/kernel (e.g. from " \
205 "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
206 "clean task for both recipes and it will resolve this error. It may be " \
207 "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
208 "those recipes should again resolve this error however switching " \
209 "DISTRO_FEATURES on an existing build directory is not supported, you " \
210 "should really clean out tmp and rebuild (reusing sstate should be safe). " \
211 "It could be the overlapping files detected are harmless in which case " \
212 "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
213 "also be your build is including two different conflicting versions of " \
214 "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
215 "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
216 "sharing the error and filelist above." % \
217 (d.getVar('PN', True), "\n ".join(match)))
218 bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
219
220 # Write out the manifest
221 f = open(manifest, "w")
222 for file in sharedfiles:
223 f.write(file + "\n")
224
225 # We want to ensure that directories appear at the end of the manifest
226 # so that when we test to see if they should be deleted any contents
227 # added by the task will have been removed first.
228 dirs = sorted(shareddirs, key=len)
229 # Must remove children first, which will have a longer path than the parent
230 for di in reversed(dirs):
231 f.write(di + "\n")
232 f.close()
233
234 # Run the actual file install
235 for state in ss['dirs']:
236 if os.path.exists(state[1]):
237 oe.path.copyhardlinktree(state[1], state[2])
238
239 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS', True) or '').split():
240 bb.build.exec_func(postinst, d)
241
242 for lock in locks:
243 bb.utils.unlockfile(lock)
244
245sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX"
246sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
247
248def sstate_installpkg(ss, d):
249 import oe.path
250 import subprocess
251
252 def prepdir(dir):
253 # remove dir if it exists, ensure any parent directories do exist
254 if os.path.exists(dir):
255 oe.path.remove(dir)
256 bb.utils.mkdirhier(dir)
257 oe.path.remove(dir)
258
259 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
260 sstatefetch = d.getVar('SSTATE_PKGNAME', True) + '_' + ss['task'] + ".tgz"
261 sstatepkg = d.getVar('SSTATE_PKG', True) + '_' + ss['task'] + ".tgz"
262
263 if not os.path.exists(sstatepkg):
264 pstaging_fetch(sstatefetch, sstatepkg, d)
265
266 if not os.path.isfile(sstatepkg):
267 bb.note("Staging package %s does not exist" % sstatepkg)
268 return False
269
270 sstate_clean(ss, d)
271
272 d.setVar('SSTATE_INSTDIR', sstateinst)
273 d.setVar('SSTATE_PKG', sstatepkg)
274
275 for f in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split() + ['sstate_unpack_package'] + (d.getVar('SSTATEPOSTUNPACKFUNCS', True) or '').split():
276 bb.build.exec_func(f, d)
277
278 for state in ss['dirs']:
279 prepdir(state[1])
280 os.rename(sstateinst + state[0], state[1])
281 sstate_install(ss, d)
282
283 for plain in ss['plaindirs']:
284 workdir = d.getVar('WORKDIR', True)
285 src = sstateinst + "/" + plain.replace(workdir, '')
286 dest = plain
287 bb.utils.mkdirhier(src)
288 prepdir(dest)
289 os.rename(src, dest)
290
291 return True
292
293python sstate_hardcode_path_unpack () {
294 # Fixup hardcoded paths
295 #
296 # Note: The logic below must match the reverse logic in
297 # sstate_hardcode_path(d)
298 import subprocess
299
300 sstateinst = d.getVar('SSTATE_INSTDIR', True)
301 fixmefn = sstateinst + "fixmepath"
302 if os.path.isfile(fixmefn):
303 staging = d.getVar('STAGING_DIR', True)
304 staging_target = d.getVar('STAGING_DIR_TARGET', True)
305 staging_host = d.getVar('STAGING_DIR_HOST', True)
306
307 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
308 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIR:%s:g'" % (staging)
309 elif bb.data.inherits_class('cross', d):
310 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIR:%s:g'" % (staging_target, staging)
311 else:
312 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
313
314 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
315 for fixmevar in extra_staging_fixmes.split():
316 fixme_path = d.getVar(fixmevar, True)
317 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
318
319 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
320 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
321
322 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
323 subprocess.call(sstate_hardcode_cmd, shell=True)
324
325 # Need to remove this or we'd copy it into the target directory and may
326 # conflict with another writer
327 os.remove(fixmefn)
328}
329
330def sstate_clean_cachefile(ss, d):
331 import oe.path
332
333 sstatepkgfile = d.getVar('SSTATE_PATHSPEC', True) + "*_" + ss['task'] + ".tgz*"
334 bb.note("Removing %s" % sstatepkgfile)
335 oe.path.remove(sstatepkgfile)
336
337def sstate_clean_cachefiles(d):
338 for task in (d.getVar('SSTATETASKS', True) or "").split():
339 ld = d.createCopy()
340 ss = sstate_state_fromvars(ld, task)
341 sstate_clean_cachefile(ss, ld)
342
343def sstate_clean_manifest(manifest, d):
344 import oe.path
345
346 mfile = open(manifest)
347 entries = mfile.readlines()
348 mfile.close()
349
350 for entry in entries:
351 entry = entry.strip()
352 bb.debug(2, "Removing manifest: %s" % entry)
353 # We can race against another package populating directories as we're removing them
354 # so we ignore errors here.
355 try:
356 if entry.endswith("/"):
357 if os.path.islink(entry[:-1]):
358 os.remove(entry[:-1])
359 elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
360 os.rmdir(entry[:-1])
361 else:
362 oe.path.remove(entry)
363 except OSError:
364 pass
365
366 oe.path.remove(manifest)
367
368def sstate_clean(ss, d):
369 import oe.path
370 import glob
371
372 d2 = d.createCopy()
373 stamp_clean = d.getVar("STAMPCLEAN", True)
374 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True)
375 if extrainf:
376 d2.setVar("SSTATE_MANMACH", extrainf)
377 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
378 else:
379 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
380
381 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
382
383 if os.path.exists(manifest):
384 locks = []
385 for lock in ss['lockfiles-shared']:
386 locks.append(bb.utils.lockfile(lock))
387 for lock in ss['lockfiles']:
388 locks.append(bb.utils.lockfile(lock))
389
390 sstate_clean_manifest(manifest, d)
391
392 for lock in locks:
393 bb.utils.unlockfile(lock)
394
395 # Remove the current and previous stamps, but keep the sigdata.
396 #
397 # The glob() matches do_task* which may match multiple tasks, for
398 # example: do_package and do_package_write_ipk, so we need to
399 # exactly match *.do_task.* and *.do_task_setscene.*
400 rm_stamp = '.do_%s.' % ss['task']
401 rm_setscene = '.do_%s_setscene.' % ss['task']
402 # For BB_SIGNATURE_HANDLER = "noop"
403 rm_nohash = ".do_%s" % ss['task']
404 for stfile in glob.glob(wildcard_stfile):
405 # Keep the sigdata
406 if ".sigdata." in stfile:
407 continue
408 # Preserve taint files in the stamps directory
409 if stfile.endswith('.taint'):
410 continue
411 if rm_stamp in stfile or rm_setscene in stfile or \
412 stfile.endswith(rm_nohash):
413 oe.path.remove(stfile)
414
415sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
416
417CLEANFUNCS += "sstate_cleanall"
418
419python sstate_cleanall() {
420 bb.note("Removing shared state for package %s" % d.getVar('PN', True))
421
422 manifest_dir = d.getVar('SSTATE_MANIFESTS', True)
423 if not os.path.exists(manifest_dir):
424 return
425
426 tasks = d.getVar('SSTATETASKS', True).split()
427 for name in tasks:
428 ld = d.createCopy()
429 shared_state = sstate_state_fromvars(ld, name)
430 sstate_clean(shared_state, ld)
431}
432
433python sstate_hardcode_path () {
434 import subprocess, platform
435
436 # Need to remove hardcoded paths and fix these when we install the
437 # staging packages.
438 #
439 # Note: the logic in this function needs to match the reverse logic
440 # in sstate_installpkg(ss, d)
441
442 staging = d.getVar('STAGING_DIR', True)
443 staging_target = d.getVar('STAGING_DIR_TARGET', True)
444 staging_host = d.getVar('STAGING_DIR_HOST', True)
445 sstate_builddir = d.getVar('SSTATE_BUILDDIR', True)
446
447 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
448 sstate_grep_cmd = "grep -l -e '%s'" % (staging)
449 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIR:g'" % (staging)
450 elif bb.data.inherits_class('cross', d):
451 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging)
452 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIR:g'" % (staging_target, staging)
453 else:
454 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
455 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % (staging_host)
456
457 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
458 for fixmevar in extra_staging_fixmes.split():
459 fixme_path = d.getVar(fixmevar, True)
460 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
461
462 fixmefn = sstate_builddir + "fixmepath"
463
464 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD', True)
465 sstate_filelist_cmd = "tee %s" % (fixmefn)
466
467 # fixmepath file needs relative paths, drop sstate_builddir prefix
468 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
469
470 xargs_no_empty_run_cmd = '--no-run-if-empty'
471 if platform.system() == 'Darwin':
472 xargs_no_empty_run_cmd = ''
473
474 # Limit the fixpaths and sed operations based on the initial grep search
475 # This has the side effect of making sure the vfs cache is hot
476 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
477
478 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
479 subprocess.call(sstate_hardcode_cmd, shell=True)
480
481 # If the fixmefn is empty, remove it..
482 if os.stat(fixmefn).st_size == 0:
483 os.remove(fixmefn)
484 else:
485 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
486 subprocess.call(sstate_filelist_relative_cmd, shell=True)
487}
488
489def sstate_package(ss, d):
490 import oe.path
491
492 def make_relative_symlink(path, outputpath, d):
493 # Replace out absolute TMPDIR paths in symlinks with relative ones
494 if not os.path.islink(path):
495 return
496 link = os.readlink(path)
497 if not os.path.isabs(link):
498 return
499 if not link.startswith(tmpdir):
500 return
501
502 depth = outputpath.rpartition(tmpdir)[2].count('/')
503 base = link.partition(tmpdir)[2].strip()
504 while depth > 1:
505 base = "/.." + base
506 depth -= 1
507 base = "." + base
508
509 bb.debug(2, "Replacing absolute path %s with relative path %s for %s" % (link, base, outputpath))
510 os.remove(path)
511 os.symlink(base, path)
512
513 tmpdir = d.getVar('TMPDIR', True)
514
515 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
516 sstatepkg = d.getVar('SSTATE_PKG', True) + '_'+ ss['task'] + ".tgz"
517 bb.utils.remove(sstatebuild, recurse=True)
518 bb.utils.mkdirhier(sstatebuild)
519 bb.utils.mkdirhier(os.path.dirname(sstatepkg))
520 for state in ss['dirs']:
521 if not os.path.exists(state[1]):
522 continue
523 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
524 for walkroot, dirs, files in os.walk(state[1]):
525 for file in files:
526 srcpath = os.path.join(walkroot, file)
527 dstpath = srcpath.replace(state[1], state[2])
528 make_relative_symlink(srcpath, dstpath, d)
529 for dir in dirs:
530 srcpath = os.path.join(walkroot, dir)
531 dstpath = srcpath.replace(state[1], state[2])
532 make_relative_symlink(srcpath, dstpath, d)
533 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
534 oe.path.copyhardlinktree(state[1], sstatebuild + state[0])
535
536 workdir = d.getVar('WORKDIR', True)
537 for plain in ss['plaindirs']:
538 pdir = plain.replace(workdir, sstatebuild)
539 bb.utils.mkdirhier(plain)
540 bb.utils.mkdirhier(pdir)
541 oe.path.copyhardlinktree(plain, pdir)
542
543 d.setVar('SSTATE_BUILDDIR', sstatebuild)
544 d.setVar('SSTATE_PKG', sstatepkg)
545
546 for f in (d.getVar('SSTATECREATEFUNCS', True) or '').split() + ['sstate_create_package'] + \
547 (d.getVar('SSTATEPOSTCREATEFUNCS', True) or '').split():
548 bb.build.exec_func(f, d)
549
550 bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
551
552 return
553
554def pstaging_fetch(sstatefetch, sstatepkg, d):
555 import bb.fetch2
556
557 # Only try and fetch if the user has configured a mirror
558 mirrors = d.getVar('SSTATE_MIRRORS', True)
559 if not mirrors:
560 return
561
562 # Copy the data object and override DL_DIR and SRC_URI
563 localdata = bb.data.createCopy(d)
564 bb.data.update_data(localdata)
565
566 dldir = localdata.expand("${SSTATE_DIR}")
567 bb.utils.mkdirhier(dldir)
568
569 localdata.delVar('MIRRORS')
570 localdata.delVar('FILESPATH')
571 localdata.setVar('DL_DIR', dldir)
572 localdata.setVar('PREMIRRORS', mirrors)
573
574 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
575 # we'll want to allow network access for the current set of fetches.
576 if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
577 localdata.delVar('BB_NO_NETWORK')
578
579 # Try a fetch from the sstate mirror, if it fails just return and
580 # we will build the package
581 for srcuri in ['file://{0}'.format(sstatefetch),
582 'file://{0}.siginfo'.format(sstatefetch)]:
583 localdata.setVar('SRC_URI', srcuri)
584 try:
585 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
586 fetcher.download()
587
588 # Need to optimise this, if using file:// urls, the fetcher just changes the local path
589 # For now work around by symlinking
590 localpath = bb.data.expand(fetcher.localpath(srcuri), localdata)
591 if localpath != sstatepkg and os.path.exists(localpath) and not os.path.exists(sstatepkg):
592 os.symlink(localpath, sstatepkg)
593
594 except bb.fetch2.BBFetchException:
595 break
596
597def sstate_setscene(d):
598 shared_state = sstate_state_fromvars(d)
599 accelerate = sstate_installpkg(shared_state, d)
600 if not accelerate:
601 raise bb.build.FuncFailed("No suitable staging package found")
602
603python sstate_task_prefunc () {
604 shared_state = sstate_state_fromvars(d)
605 sstate_clean(shared_state, d)
606}
607
608python sstate_task_postfunc () {
609 shared_state = sstate_state_fromvars(d)
610 sstate_install(shared_state, d)
611 for intercept in shared_state['interceptfuncs']:
612 bb.build.exec_func(intercept, d)
613 omask = os.umask(002)
614 if omask != 002:
615 bb.note("Using umask 002 (not %0o) for sstate packaging" % omask)
616 sstate_package(shared_state, d)
617 os.umask(omask)
618}
619
620
621#
622# Shell function to generate a sstate package from a directory
623# set as SSTATE_BUILDDIR
624#
625sstate_create_package () {
626 cd ${SSTATE_BUILDDIR}
627 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
628 # Need to handle empty directories
629 if [ "$(ls -A)" ]; then
630 set +e
631 tar -czf $TFILE *
632 ret=$?
633 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
634 exit 1
635 fi
636 set -e
637 else
638 tar -cz --file=$TFILE --files-from=/dev/null
639 fi
640 chmod 0664 $TFILE
641 mv -f $TFILE ${SSTATE_PKG}
642
643 cd ${WORKDIR}
644 rm -rf ${SSTATE_BUILDDIR}
645}
646
647#
648# Shell function to decompress and prepare a package for installation
649#
650sstate_unpack_package () {
651 mkdir -p ${SSTATE_INSTDIR}
652 cd ${SSTATE_INSTDIR}
653 tar -xmvzf ${SSTATE_PKG}
654 # Use "! -w ||" to return true for read only files
655 [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
656}
657
658BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
659
660def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d):
661
662 ret = []
663 missed = []
664
665 def getpathcomponents(task, d):
666 # Magic data from BB_HASHFILENAME
667 splithashfn = sq_hashfn[task].split(" ")
668 spec = splithashfn[1]
669 extrapath = splithashfn[0]
670
671 tname = sq_task[task][3:]
672
673 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
674 spec = splithashfn[2]
675 extrapath = ""
676
677 return spec, extrapath, tname
678
679
680 for task in range(len(sq_fn)):
681
682 spec, extrapath, tname = getpathcomponents(task, d)
683
684 sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz.siginfo")
685
686 if os.path.exists(sstatefile):
687 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
688 ret.append(task)
689 continue
690 else:
691 missed.append(task)
692 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
693
694 mirrors = d.getVar("SSTATE_MIRRORS", True)
695 if mirrors:
696 # Copy the data object and override DL_DIR and SRC_URI
697 localdata = bb.data.createCopy(d)
698 bb.data.update_data(localdata)
699
700 dldir = localdata.expand("${SSTATE_DIR}")
701 localdata.setVar('DL_DIR', dldir)
702 localdata.setVar('PREMIRRORS', mirrors)
703
704 bb.debug(2, "SState using premirror of: %s" % mirrors)
705
706 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
707 # we'll want to allow network access for the current set of fetches.
708 if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
709 localdata.delVar('BB_NO_NETWORK')
710
711 for task in range(len(sq_fn)):
712 if task in ret:
713 continue
714
715 spec, extrapath, tname = getpathcomponents(task, d)
716
717 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz.siginfo")
718
719 srcuri = "file://" + sstatefile
720 localdata.setVar('SRC_URI', srcuri)
721 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
722
723 try:
724 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata)
725 fetcher.checkstatus()
726 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
727 ret.append(task)
728 if task in missed:
729 missed.remove(task)
730 except:
731 missed.append(task)
732 bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
733 pass
734
735 inheritlist = d.getVar("INHERIT", True)
736 if "toaster" in inheritlist:
737 evdata = {'missed': [], 'found': []};
738 for task in missed:
739 spec, extrapath, tname = getpathcomponents(task, d)
740 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
741 evdata['missed'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
742 for task in ret:
743 spec, extrapath, tname = getpathcomponents(task, d)
744 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
745 evdata['found'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
746 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
747
748 if hasattr(bb.parse.siggen, "checkhashes"):
749 bb.parse.siggen.checkhashes(missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d)
750
751 return ret
752
753BB_SETSCENE_DEPVALID = "setscene_depvalid"
754
755def setscene_depvalid(task, taskdependees, notneeded, d):
756 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
757 # task is included in taskdependees too
758
759 bb.debug(2, "Considering setscene task: %s" % (str(taskdependees[task])))
760
761 def isNativeCross(x):
762 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x
763
764 def isPostInstDep(x):
765 if x in ["qemu-native", "gdk-pixbuf-native", "qemuwrapper-cross", "depmodwrapper-cross", "systemd-systemctl-native", "gtk-update-icon-cache-native"]:
766 return True
767 return False
768
769 # We only need to trigger populate_lic through direct dependencies
770 if taskdependees[task][1] == "do_populate_lic":
771 return True
772
773 for dep in taskdependees:
774 bb.debug(2, " considering dependency: %s" % (str(taskdependees[dep])))
775 if task == dep:
776 continue
777 if dep in notneeded:
778 continue
779 # do_package_write_* and do_package doesn't need do_package
780 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
781 continue
782 # do_package_write_* and do_package doesn't need do_populate_sysroot, unless is a postinstall dependency
783 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
784 if isPostInstDep(taskdependees[task][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
785 return False
786 continue
787 # Native/Cross packages don't exist and are noexec anyway
788 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
789 continue
790
791 # Consider sysroot depending on sysroot tasks
792 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
793 # base-passwd/shadow-sysroot don't need their dependencies
794 if taskdependees[dep][0].endswith(("base-passwd", "shadow-sysroot")):
795 continue
796 # Nothing need depend on libc-initial/gcc-cross-initial
797 if "-initial" in taskdependees[task][0]:
798 continue
799 # Native/Cross populate_sysroot need their dependencies
800 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
801 return False
802 # Target populate_sysroot depended on by cross tools need to be installed
803 if isNativeCross(taskdependees[dep][0]):
804 return False
805 # Native/cross tools depended upon by target sysroot are not needed
806 if isNativeCross(taskdependees[task][0]):
807 continue
808 # Target populate_sysroot need their dependencies
809 return False
810
811 # This is due to the [depends] in useradd.bbclass complicating matters
812 # The logic *is* reversed here due to the way hard setscene dependencies are injected
813 if taskdependees[task][1] == 'do_package' and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
814 continue
815
816 # Safe fallthrough default
817 bb.debug(2, " Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])))
818 return False
819 return True
820
821addhandler sstate_eventhandler
822sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
823python sstate_eventhandler() {
824 d = e.data
825 # When we write an sstate package we rewrite the SSTATE_PKG
826 spkg = d.getVar('SSTATE_PKG', True)
827 if not spkg.endswith(".tgz"):
828 taskname = d.getVar("BB_RUNTASK", True)[3:]
829 spec = d.getVar('SSTATE_PKGSPEC', True)
830 swspec = d.getVar('SSTATE_SWSPEC', True)
831 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
832 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
833 d.setVar("SSTATE_EXTRAPATH", "")
834 sstatepkg = d.getVar('SSTATE_PKG', True)
835 bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d)
836}
837
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
new file mode 100644
index 0000000000..57b2743196
--- /dev/null
+++ b/meta/classes/staging.bbclass
@@ -0,0 +1,122 @@
1
2sysroot_stage_dir() {
3 src="$1"
4 dest="$2"
5 # if the src doesn't exist don't do anything
6 if [ ! -d "$src" ]; then
7 return
8 fi
9
10 mkdir -p "$dest"
11 (
12 cd $src
13 find . -print0 | cpio --null -pdlu $dest
14 )
15}
16
17sysroot_stage_libdir() {
18 src="$1"
19 dest="$2"
20
21 sysroot_stage_dir $src $dest
22}
23
24sysroot_stage_dirs() {
25 from="$1"
26 to="$2"
27
28 sysroot_stage_dir $from${includedir} $to${includedir}
29 if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
30 sysroot_stage_dir $from${bindir} $to${bindir}
31 sysroot_stage_dir $from${sbindir} $to${sbindir}
32 sysroot_stage_dir $from${base_bindir} $to${base_bindir}
33 sysroot_stage_dir $from${base_sbindir} $to${base_sbindir}
34 sysroot_stage_dir $from${libexecdir} $to${libexecdir}
35 sysroot_stage_dir $from${sysconfdir} $to${sysconfdir}
36 sysroot_stage_dir $from${localstatedir} $to${localstatedir}
37 fi
38 if [ -d $from${libdir} ]
39 then
40 sysroot_stage_libdir $from${libdir} $to${libdir}
41 fi
42 if [ -d $from${base_libdir} ]
43 then
44 sysroot_stage_libdir $from${base_libdir} $to${base_libdir}
45 fi
46 if [ -d $from${nonarch_base_libdir} ]
47 then
48 sysroot_stage_libdir $from${nonarch_base_libdir} $to${nonarch_base_libdir}
49 fi
50 sysroot_stage_dir $from${datadir} $to${datadir}
51 # We don't care about docs/info/manpages/locales
52 rm -rf $to${mandir}/ $to${docdir}/ $to${infodir}/ ${to}${datadir}/locale/
53 rm -rf $to${datadir}/applications/ $to${datadir}/fonts/ $to${datadir}/pixmaps/
54}
55
56sysroot_stage_all() {
57 sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
58}
59
60do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
61do_populate_sysroot[umask] = "022"
62
63addtask populate_sysroot after do_install
64
65SYSROOT_PREPROCESS_FUNCS ?= ""
66SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir/"
67SYSROOT_LOCK = "${STAGING_DIR}/staging.lock"
68
69# We clean out any existing sstate from the sysroot if we rerun configure
70python sysroot_cleansstate () {
71 ss = sstate_state_fromvars(d, "populate_sysroot")
72 sstate_clean(ss, d)
73}
74do_configure[prefuncs] += "sysroot_cleansstate"
75
76
77BB_SETSCENE_VERIFY_FUNCTION = "sysroot_checkhashes"
78
79def sysroot_checkhashes(covered, tasknames, fnids, fns, d, invalidtasks = None):
80 problems = set()
81 configurefnids = set()
82 if not invalidtasks:
83 invalidtasks = xrange(len(tasknames))
84 for task in invalidtasks:
85 if tasknames[task] == "do_configure" and task not in covered:
86 configurefnids.add(fnids[task])
87 for task in covered:
88 if tasknames[task] == "do_populate_sysroot" and fnids[task] in configurefnids:
89 problems.add(task)
90 return problems
91
92python do_populate_sysroot () {
93 bb.build.exec_func("sysroot_stage_all", d)
94 for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS', True) or '').split():
95 bb.build.exec_func(f, d)
96 pn = d.getVar("PN", True)
97 multiprov = d.getVar("MULTI_PROVIDER_WHITELIST", True).split()
98 provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
99 bb.utils.mkdirhier(provdir)
100 for p in d.getVar("PROVIDES", True).split():
101 if p in multiprov:
102 continue
103 p = p.replace("/", "_")
104 with open(provdir + p, "w") as f:
105 f.write(pn)
106}
107
108do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
109do_populate_sysroot[vardepsexclude] += "MULTI_PROVIDER_WHITELIST"
110
111SSTATETASKS += "do_populate_sysroot"
112do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
113do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
114do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_HOST}/"
115do_populate_sysroot[stamp-extra-info] = "${MACHINE}"
116
117python do_populate_sysroot_setscene () {
118 sstate_setscene(d)
119}
120addtask do_populate_sysroot_setscene
121
122
diff --git a/meta/classes/syslinux.bbclass b/meta/classes/syslinux.bbclass
new file mode 100644
index 0000000000..d6498d98bb
--- /dev/null
+++ b/meta/classes/syslinux.bbclass
@@ -0,0 +1,187 @@
1# syslinux.bbclass
2# Copyright (C) 2004-2006, Advanced Micro Devices, Inc. All Rights Reserved
3# Released under the MIT license (see packages/COPYING)
4
5# Provide syslinux specific functions for building bootable images.
6
7# External variables
8# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
9# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
10# ${AUTO_SYSLINUXMENU} - set this to 1 to enable creating an automatic menu
11# ${LABELS} - a list of targets for the automatic config
12# ${APPEND} - an override list of append strings for each label
13# ${SYSLINUX_OPTS} - additional options to add to the syslinux file ';' delimited
14# ${SYSLINUX_SPLASH} - A background for the vga boot menu if using the boot menu
15# ${SYSLINUX_DEFAULT_CONSOLE} - set to "console=ttyX" to change kernel boot default console
16# ${SYSLINUX_SERIAL} - Set an alternate serial port or turn off serial with empty string
17# ${SYSLINUX_SERIAL_TTY} - Set alternate console=tty... kernel boot argument
18# ${SYSLINUX_KERNEL_ARGS} - Add additional kernel arguments
19
20do_bootimg[depends] += "${MLPREFIX}syslinux:do_populate_sysroot \
21 syslinux-native:do_populate_sysroot"
22
23SYSLINUXCFG = "${S}/syslinux.cfg"
24
25ISOLINUXDIR = "/isolinux"
26SYSLINUXDIR = "/"
27# The kernel has an internal default console, which you can override with
28# a console=...some_tty...
29SYSLINUX_DEFAULT_CONSOLE ?= ""
30SYSLINUX_SERIAL ?= "0 115200"
31SYSLINUX_SERIAL_TTY ?= "console=ttyS0,115200"
32ISO_BOOTIMG = "isolinux/isolinux.bin"
33ISO_BOOTCAT = "isolinux/boot.cat"
34MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table"
35APPEND_prepend = " ${SYSLINUX_ROOT} "
36
37syslinux_populate() {
38 DEST=$1
39 BOOTDIR=$2
40 CFGNAME=$3
41
42 install -d ${DEST}${BOOTDIR}
43
44 # Install the config files
45 install -m 0644 ${SYSLINUXCFG} ${DEST}${BOOTDIR}/${CFGNAME}
46 if [ "${AUTO_SYSLINUXMENU}" = 1 ] ; then
47 install -m 0644 ${STAGING_DATADIR}/syslinux/vesamenu.c32 ${DEST}${BOOTDIR}/vesamenu.c32
48 install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${DEST}${BOOTDIR}/libcom32.c32
49 install -m 0444 ${STAGING_DATADIR}/syslinux/libutil.c32 ${DEST}${BOOTDIR}/libutil.c32
50 if [ "${SYSLINUX_SPLASH}" != "" ] ; then
51 install -m 0644 ${SYSLINUX_SPLASH} ${DEST}${BOOTDIR}/splash.lss
52 fi
53 fi
54}
55
56syslinux_iso_populate() {
57 iso_dir=$1
58 syslinux_populate $iso_dir ${ISOLINUXDIR} isolinux.cfg
59 install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin $iso_dir${ISOLINUXDIR}
60 install -m 0644 ${STAGING_DATADIR}/syslinux/ldlinux.c32 $iso_dir${ISOLINUXDIR}
61}
62
63syslinux_hddimg_populate() {
64 hdd_dir=$1
65 syslinux_populate $hdd_dir ${SYSLINUXDIR} syslinux.cfg
66 install -m 0444 ${STAGING_DATADIR}/syslinux/ldlinux.sys $hdd_dir${SYSLINUXDIR}/ldlinux.sys
67}
68
69syslinux_hddimg_install() {
70 syslinux ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
71}
72
73syslinux_hdddirect_install() {
74 DEST=$1
75 syslinux $DEST
76}
77
78python build_syslinux_cfg () {
79 import copy
80 import sys
81
82 workdir = d.getVar('WORKDIR', True)
83 if not workdir:
84 bb.error("WORKDIR not defined, unable to package")
85 return
86
87 labels = d.getVar('LABELS', True)
88 if not labels:
89 bb.debug(1, "LABELS not defined, nothing to do")
90 return
91
92 if labels == []:
93 bb.debug(1, "No labels, nothing to do")
94 return
95
96 cfile = d.getVar('SYSLINUXCFG', True)
97 if not cfile:
98 raise bb.build.FuncFailed('Unable to read SYSLINUXCFG')
99
100 try:
101 cfgfile = file(cfile, 'w')
102 except OSError:
103 raise bb.build.funcFailed('Unable to open %s' % (cfile))
104
105 cfgfile.write('# Automatically created by OE\n')
106
107 opts = d.getVar('SYSLINUX_OPTS', True)
108
109 if opts:
110 for opt in opts.split(';'):
111 cfgfile.write('%s\n' % opt)
112
113 cfgfile.write('ALLOWOPTIONS 1\n');
114 syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE', True)
115 syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY', True)
116 syslinux_serial = d.getVar('SYSLINUX_SERIAL', True)
117 if syslinux_serial:
118 cfgfile.write('SERIAL %s\n' % syslinux_serial)
119
120 menu = d.getVar('AUTO_SYSLINUXMENU', True)
121
122 if menu and syslinux_serial:
123 cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0]))
124 else:
125 cfgfile.write('DEFAULT %s\n' % (labels.split()[0]))
126
127 timeout = d.getVar('SYSLINUX_TIMEOUT', True)
128
129 if timeout:
130 cfgfile.write('TIMEOUT %s\n' % timeout)
131 else:
132 cfgfile.write('TIMEOUT 50\n')
133
134 prompt = d.getVar('SYSLINUX_PROMPT', True)
135 if prompt:
136 cfgfile.write('PROMPT %s\n' % prompt)
137 else:
138 cfgfile.write('PROMPT 1\n')
139
140 if menu:
141 cfgfile.write('ui vesamenu.c32\n')
142 cfgfile.write('menu title Select kernel options and boot kernel\n')
143 cfgfile.write('menu tabmsg Press [Tab] to edit, [Return] to select\n')
144 splash = d.getVar('SYSLINUX_SPLASH', True)
145 if splash:
146 cfgfile.write('menu background splash.lss\n')
147
148 for label in labels.split():
149 localdata = bb.data.createCopy(d)
150
151 overrides = localdata.getVar('OVERRIDES', True)
152 if not overrides:
153 raise bb.build.FuncFailed('OVERRIDES not defined')
154
155 localdata.setVar('OVERRIDES', label + ':' + overrides)
156 bb.data.update_data(localdata)
157
158 btypes = [ [ "", syslinux_default_console ] ]
159 if menu and syslinux_serial:
160 btypes = [ [ "Graphics console ", syslinux_default_console ],
161 [ "Serial console ", syslinux_serial_tty ] ]
162
163 for btype in btypes:
164 cfgfile.write('LABEL %s%s\nKERNEL /vmlinuz\n' % (btype[0], label))
165
166 exargs = d.getVar('SYSLINUX_KERNEL_ARGS', True)
167 if exargs:
168 btype[1] += " " + exargs
169
170 append = localdata.getVar('APPEND', True)
171 initrd = localdata.getVar('INITRD', True)
172
173 if append:
174 cfgfile.write('APPEND ')
175
176 if initrd:
177 cfgfile.write('initrd=/initrd ')
178
179 cfgfile.write('LABEL=%s '% (label))
180
181 cfgfile.write('%s %s\n' % (append, btype[1]))
182 else:
183 cfgfile.write('APPEND %s\n' % btype[1])
184
185 cfgfile.close()
186}
187build_syslinux_cfg[vardeps] += "APPEND"
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
new file mode 100644
index 0000000000..c34884bd38
--- /dev/null
+++ b/meta/classes/systemd.bbclass
@@ -0,0 +1,197 @@
1# The list of packages that should have systemd packaging scripts added. For
2# each entry, optionally have a SYSTEMD_SERVICE_[package] that lists the service
3# files in this package. If this variable isn't set, [package].service is used.
4SYSTEMD_PACKAGES ?= "${PN}"
5SYSTEMD_PACKAGES_class-native ?= ""
6SYSTEMD_PACKAGES_class-nativesdk ?= ""
7
8# Whether to enable or disable the services on installation.
9SYSTEMD_AUTO_ENABLE ??= "enable"
10
11# This class will be included in any recipe that supports systemd init scripts,
12# even if systemd is not in DISTRO_FEATURES. As such don't make any changes
13# directly but check the DISTRO_FEATURES first.
14python __anonymous() {
15 # If the distro features have systemd but not sysvinit, inhibit update-rcd
16 # from doing any work so that pure-systemd images don't have redundant init
17 # files.
18 if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
19 d.appendVar("DEPENDS", " systemd-systemctl-native")
20 if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d):
21 d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1")
22}
23
24systemd_postinst() {
25OPTS=""
26
27if [ -n "$D" ]; then
28 OPTS="--root=$D"
29fi
30
31if type systemctl >/dev/null 2>/dev/null; then
32 systemctl $OPTS ${SYSTEMD_AUTO_ENABLE} ${SYSTEMD_SERVICE}
33
34 if [ -z "$D" -a "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
35 systemctl restart ${SYSTEMD_SERVICE}
36 fi
37fi
38}
39
40systemd_prerm() {
41OPTS=""
42
43if [ -n "$D" ]; then
44 OPTS="--root=$D"
45fi
46
47if type systemctl >/dev/null 2>/dev/null; then
48 if [ -z "$D" ]; then
49 systemctl stop ${SYSTEMD_SERVICE}
50 fi
51
52 systemctl $OPTS disable ${SYSTEMD_SERVICE}
53fi
54}
55
56
57systemd_populate_packages[vardeps] += "systemd_prerm systemd_postinst"
58systemd_populate_packages[vardepsexclude] += "OVERRIDES"
59
60
61python systemd_populate_packages() {
62 if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
63 return
64
65 def get_package_var(d, var, pkg):
66 val = (d.getVar('%s_%s' % (var, pkg), True) or "").strip()
67 if val == "":
68 val = (d.getVar(var, True) or "").strip()
69 return val
70
71 # Check if systemd-packages already included in PACKAGES
72 def systemd_check_package(pkg_systemd):
73 packages = d.getVar('PACKAGES', True)
74 if not pkg_systemd in packages.split():
75 bb.error('%s does not appear in package list, please add it' % pkg_systemd)
76
77
78 def systemd_generate_package_scripts(pkg):
79 bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
80
81 # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE_pkg
82 # variable.
83 localdata = d.createCopy()
84 localdata.prependVar("OVERRIDES", pkg + ":")
85 bb.data.update_data(localdata)
86
87 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
88 if not postinst:
89 postinst = '#!/bin/sh\n'
90 postinst += localdata.getVar('systemd_postinst', True)
91 d.setVar('pkg_postinst_%s' % pkg, postinst)
92
93 prerm = d.getVar('pkg_prerm_%s' % pkg, True)
94 if not prerm:
95 prerm = '#!/bin/sh\n'
96 prerm += localdata.getVar('systemd_prerm', True)
97 d.setVar('pkg_prerm_%s' % pkg, prerm)
98
99
100 # Add files to FILES_*-systemd if existent and not already done
101 def systemd_append_file(pkg_systemd, file_append):
102 appended = False
103 if os.path.exists(oe.path.join(d.getVar("D", True), file_append)):
104 var_name = "FILES_" + pkg_systemd
105 files = d.getVar(var_name, False) or ""
106 if file_append not in files.split():
107 d.appendVar(var_name, " " + file_append)
108 appended = True
109 return appended
110
111 # Add systemd files to FILES_*-systemd, parse for Also= and follow recursive
112 def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
113 # avoid infinite recursion
114 if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
115 fullpath = oe.path.join(d.getVar("D", True), path, service)
116 if service.find('.service') != -1:
117 # for *.service add *@.service
118 service_base = service.replace('.service', '')
119 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
120 if service.find('.socket') != -1:
121 # for *.socket add *.service and *@.service
122 service_base = service.replace('.socket', '')
123 systemd_add_files_and_parse(pkg_systemd, path, service_base + '.service', keys)
124 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
125 for key in keys.split():
126 # recurse all dependencies found in keys ('Also';'Conflicts';..) and add to files
127 cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, fullpath, key)
128 pipe = os.popen(cmd, 'r')
129 line = pipe.readline()
130 while line:
131 line = line.replace('\n', '')
132 systemd_add_files_and_parse(pkg_systemd, path, line, keys)
133 line = pipe.readline()
134 pipe.close()
135
136 # Check service-files and call systemd_add_files_and_parse for each entry
137 def systemd_check_services():
138 searchpaths = [oe.path.join(d.getVar("sysconfdir", True), "systemd", "system"),]
139 searchpaths.append(oe.path.join(d.getVar("nonarch_base_libdir", True), "systemd", "system"))
140 searchpaths.append(oe.path.join(d.getVar("exec_prefix", True), d.getVar("nonarch_base_libdir", True), "systemd", "system"))
141 systemd_packages = d.getVar('SYSTEMD_PACKAGES', True)
142 has_exactly_one_service = len(systemd_packages.split()) == 1
143 if has_exactly_one_service:
144 has_exactly_one_service = len(get_package_var(d, 'SYSTEMD_SERVICE', systemd_packages).split()) == 1
145
146 keys = 'Also'
147 # scan for all in SYSTEMD_SERVICE[]
148 for pkg_systemd in systemd_packages.split():
149 for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split():
150 path_found = ''
151 for path in searchpaths:
152 if os.path.exists(oe.path.join(d.getVar("D", True), path, service)):
153 path_found = path
154 break
155 if path_found != '':
156 systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
157 else:
158 raise bb.build.FuncFailed("SYSTEMD_SERVICE_%s value %s does not exist" % \
159 (pkg_systemd, service))
160
161 # Run all modifications once when creating package
162 if os.path.exists(d.getVar("D", True)):
163 for pkg in d.getVar('SYSTEMD_PACKAGES', True).split():
164 systemd_check_package(pkg)
165 if d.getVar('SYSTEMD_SERVICE_' + pkg, True):
166 systemd_generate_package_scripts(pkg)
167 systemd_check_services()
168}
169
170PACKAGESPLITFUNCS_prepend = "systemd_populate_packages "
171
172python rm_systemd_unitdir (){
173 import shutil
174 if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
175 systemd_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_unitdir', True))
176 if os.path.exists(systemd_unitdir):
177 shutil.rmtree(systemd_unitdir)
178 systemd_libdir = os.path.dirname(systemd_unitdir)
179 if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)):
180 os.rmdir(systemd_libdir)
181}
182do_install[postfuncs] += "rm_systemd_unitdir "
183
184python rm_sysvinit_initddir (){
185 import shutil
186 sysv_initddir = oe.path.join(d.getVar("D", True), (d.getVar('INIT_D_DIR', True) or "/etc/init.d"))
187
188 if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \
189 not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \
190 os.path.exists(sysv_initddir):
191 systemd_unitdir = oe.path.join(d.getVar("D", True), d.getVar('systemd_unitdir', True), "system")
192
193 # If systemd_unitdir contains anything, delete sysv_initddir
194 if (os.path.exists(systemd_unitdir) and os.listdir(systemd_unitdir)):
195 shutil.rmtree(sysv_initddir)
196}
197do_install[postfuncs] += "rm_sysvinit_initddir "
diff --git a/meta/classes/terminal.bbclass b/meta/classes/terminal.bbclass
new file mode 100644
index 0000000000..e577c6d594
--- /dev/null
+++ b/meta/classes/terminal.bbclass
@@ -0,0 +1,94 @@
1OE_TERMINAL ?= 'auto'
2OE_TERMINAL[type] = 'choice'
3OE_TERMINAL[choices] = 'auto none \
4 ${@" ".join(o.name \
5 for o in oe.terminal.prioritized())}'
6
7OE_TERMINAL_EXPORTS += 'EXTRA_OEMAKE'
8OE_TERMINAL_EXPORTS[type] = 'list'
9
10XAUTHORITY ?= "${HOME}/.Xauthority"
11SHELL ?= "bash"
12
13
14def emit_terminal_func(command, envdata, d):
15 cmd_func = 'do_terminal'
16
17 envdata.setVar(cmd_func, 'exec ' + command)
18 envdata.setVarFlag(cmd_func, 'func', 1)
19
20 runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}"
21 runfile = runfmt.format(func=cmd_func, task=cmd_func, taskfunc=cmd_func, pid=os.getpid())
22 runfile = os.path.join(d.getVar('T', True), runfile)
23 bb.utils.mkdirhier(os.path.dirname(runfile))
24
25 with open(runfile, 'w') as script:
26 script.write('#!/bin/sh -e\n')
27 bb.data.emit_func(cmd_func, script, envdata)
28 script.write(cmd_func)
29 script.write("\n")
30 os.chmod(runfile, 0755)
31
32 return runfile
33
34def oe_terminal(command, title, d):
35 import oe.data
36 import oe.terminal
37
38 envdata = bb.data.init()
39
40 for v in os.environ:
41 envdata.setVar(v, os.environ[v])
42 envdata.setVarFlag(v, 'export', 1)
43
44 for export in oe.data.typed_value('OE_TERMINAL_EXPORTS', d):
45 value = d.getVar(export, True)
46 if value is not None:
47 os.environ[export] = str(value)
48 envdata.setVar(export, str(value))
49 envdata.setVarFlag(export, 'export', 1)
50 if export == "PSEUDO_DISABLED":
51 if "PSEUDO_UNLOAD" in os.environ:
52 del os.environ["PSEUDO_UNLOAD"]
53 envdata.delVar("PSEUDO_UNLOAD")
54
55 # Add in all variables from the user's original environment which
56 # haven't subsequntly been set/changed
57 origbbenv = d.getVar("BB_ORIGENV", False) or {}
58 for key in origbbenv:
59 if key in envdata:
60 continue
61 value = origbbenv.getVar(key, True)
62 if value is not None:
63 os.environ[key] = str(value)
64 envdata.setVar(key, str(value))
65 envdata.setVarFlag(key, 'export', 1)
66
67 # A complex PS1 might need more escaping of chars.
68 # Lets not export PS1 instead.
69 envdata.delVar("PS1")
70
71 # Replace command with an executable wrapper script
72 command = emit_terminal_func(command, envdata, d)
73
74 terminal = oe.data.typed_value('OE_TERMINAL', d).lower()
75 if terminal == 'none':
76 bb.fatal('Devshell usage disabled with OE_TERMINAL')
77 elif terminal != 'auto':
78 try:
79 oe.terminal.spawn(terminal, command, title, None, d)
80 return
81 except oe.terminal.UnsupportedTerminal:
82 bb.warn('Unsupported terminal "%s", defaulting to "auto"' %
83 terminal)
84 except oe.terminal.ExecutionError as exc:
85 bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
86
87 try:
88 oe.terminal.spawn_preferred(command, title, None, d)
89 except oe.terminal.NoSupportedTerminals:
90 bb.fatal('No valid terminal found, unable to open devshell')
91 except oe.terminal.ExecutionError as exc:
92 bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
93
94oe_terminal[vardepsexclude] = "BB_ORIGENV"
diff --git a/meta/classes/testimage-auto.bbclass b/meta/classes/testimage-auto.bbclass
new file mode 100644
index 0000000000..860599d2b5
--- /dev/null
+++ b/meta/classes/testimage-auto.bbclass
@@ -0,0 +1,23 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5
6# Run tests automatically on an image after the image is constructed
7# (as opposed to testimage.bbclass alone where tests must be called
8# manually using bitbake -c testimage <image>).
9#
10# NOTE: to use this class, simply set TEST_IMAGE = "1" - no need to
11# inherit it since that will be done in image.bbclass when this variable
12# has been set.
13#
14# See testimage.bbclass for the test implementation.
15
16inherit testimage
17
18python do_testimage_auto() {
19 testimage_main(d)
20}
21addtask testimage_auto before do_build after do_rootfs
22do_testimage_auto[depends] += "${TESTIMAGEDEPENDS}"
23do_testimage_auto[lockfiles] += "${TESTIMAGELOCK}"
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
new file mode 100644
index 0000000000..683173854d
--- /dev/null
+++ b/meta/classes/testimage.bbclass
@@ -0,0 +1,323 @@
1# Copyright (C) 2013 Intel Corporation
2#
3# Released under the MIT license (see COPYING.MIT)
4
5
6# testimage.bbclass enables testing of qemu images using python unittests.
7# Most of the tests are commands run on target image over ssh.
8# To use it add testimage to global inherit and call your target image with -c testimage
9# You can try it out like this:
10# - first build a qemu core-image-sato
11# - add INHERIT += "testimage" in local.conf
12# - then bitbake core-image-sato -c testimage. That will run a standard suite of tests.
13
14# You can set (or append to) TEST_SUITES in local.conf to select the tests
15# which you want to run for your target.
16# The test names are the module names in meta/lib/oeqa/runtime.
17# Each name in TEST_SUITES represents a required test for the image. (no skipping allowed)
18# Appending "auto" means that it will try to run all tests that are suitable for the image (each test decides that on it's own).
19# Note that order in TEST_SUITES is important (it's the order tests run) and it influences tests dependencies.
20# A layer can add its own tests in lib/oeqa/runtime, provided it extends BBPATH as normal in its layer.conf.
21
22# TEST_LOG_DIR contains a command ssh log and may contain infromation about what command is running, output and return codes and for qemu a boot log till login.
23# Booting is handled by this class, and it's not a test in itself.
24# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt.
25
26TEST_LOG_DIR ?= "${WORKDIR}/testimage"
27
28TEST_EXPORT_DIR ?= "${TMPDIR}/testimage/${PN}"
29TEST_EXPORT_ONLY ?= "0"
30
31DEFAULT_TEST_SUITES = "ping auto"
32DEFAULT_TEST_SUITES_pn-core-image-minimal = "ping"
33DEFAULT_TEST_SUITES_pn-core-image-sato = "ping ssh df connman syslog xorg scp vnc date rpm smart dmesg python parselogs"
34DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "ping ssh df connman syslog xorg scp vnc date perl ldd gcc rpm smart kernelmodule dmesg python parselogs"
35DEFAULT_TEST_SUITES_pn-meta-toolchain = "auto"
36TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
37
38TEST_QEMUBOOT_TIMEOUT ?= "1000"
39TEST_TARGET ?= "qemu"
40TEST_TARGET_IP ?= ""
41TEST_SERVER_IP ?= ""
42
43TESTIMAGEDEPENDS = ""
44TESTIMAGEDEPENDS_qemuall = "qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot"
45
46TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
47TESTIMAGELOCK_qemuall = ""
48
49python do_testimage() {
50 testimage_main(d)
51}
52addtask testimage
53do_testimage[nostamp] = "1"
54do_testimage[depends] += "${TESTIMAGEDEPENDS}"
55do_testimage[lockfiles] += "${TESTIMAGELOCK}"
56
57python do_testsdk() {
58 testsdk_main(d)
59}
60addtask testsdk
61do_testsdk[nostamp] = "1"
62do_testsdk[depends] += "${TESTIMAGEDEPENDS}"
63do_testsdk[lockfiles] += "${TESTIMAGELOCK}"
64
65def get_tests_list(d, type="runtime"):
66 testsuites = d.getVar("TEST_SUITES", True).split()
67 bbpath = d.getVar("BBPATH", True).split(':')
68
69 # This relies on lib/ under each directory in BBPATH being added to sys.path
70 # (as done by default in base.bbclass)
71 testslist = []
72 for testname in testsuites:
73 if testname != "auto":
74 found = False
75 for p in bbpath:
76 if os.path.exists(os.path.join(p, 'lib', 'oeqa', type, testname + '.py')):
77 testslist.append("oeqa." + type + "." + testname)
78 found = True
79 break
80 if not found:
81 bb.fatal('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname)
82
83 if "auto" in testsuites:
84 def add_auto_list(path):
85 if not os.path.exists(os.path.join(path, '__init__.py')):
86 bb.fatal('Tests directory %s exists but is missing __init__.py' % path)
87 files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
88 for f in files:
89 module = 'oeqa.' + type + '.' + f[:-3]
90 if module not in testslist:
91 testslist.append(module)
92
93 for p in bbpath:
94 testpath = os.path.join(p, 'lib', 'oeqa', type)
95 bb.debug(2, 'Searching for tests in %s' % testpath)
96 if os.path.exists(testpath):
97 add_auto_list(testpath)
98
99 return testslist
100
101
102def exportTests(d,tc):
103 import json
104 import shutil
105 import pkgutil
106
107 exportpath = d.getVar("TEST_EXPORT_DIR", True)
108
109 savedata = {}
110 savedata["d"] = {}
111 savedata["target"] = {}
112 for key in tc.__dict__:
113 # special cases
114 if key != "d" and key != "target":
115 savedata[key] = getattr(tc, key)
116 savedata["target"]["ip"] = tc.target.ip or d.getVar("TEST_TARGET_IP", True)
117 savedata["target"]["server_ip"] = tc.target.server_ip or d.getVar("TEST_SERVER_IP", True)
118
119 keys = [ key for key in d.keys() if not key.startswith("_") and not key.startswith("BB") \
120 and not key.startswith("B_pn") and not key.startswith("do_") and not d.getVarFlag(key, "func")]
121 for key in keys:
122 try:
123 savedata["d"][key] = d.getVar(key, True)
124 except bb.data_smart.ExpansionError:
125 # we don't care about those anyway
126 pass
127
128 with open(os.path.join(exportpath, "testdata.json"), "w") as f:
129 json.dump(savedata, f, skipkeys=True, indent=4, sort_keys=True)
130
131 # now start copying files
132 # we'll basically copy everything under meta/lib/oeqa, with these exceptions
133 # - oeqa/targetcontrol.py - not needed
134 # - oeqa/selftest - something else
135 # That means:
136 # - all tests from oeqa/runtime defined in TEST_SUITES (including from other layers)
137 # - the contents of oeqa/utils and oeqa/runtime/files
138 # - oeqa/oetest.py and oeqa/runexport.py (this will get copied to exportpath not exportpath/oeqa)
139 # - __init__.py files
140 bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/runtime/files"))
141 bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/utils"))
142 # copy test modules, this should cover tests in other layers too
143 for t in tc.testslist:
144 mod = pkgutil.get_loader(t)
145 shutil.copy2(mod.filename, os.path.join(exportpath, "oeqa/runtime"))
146 # copy __init__.py files
147 oeqadir = pkgutil.get_loader("oeqa").filename
148 shutil.copy2(os.path.join(oeqadir, "__init__.py"), os.path.join(exportpath, "oeqa"))
149 shutil.copy2(os.path.join(oeqadir, "runtime/__init__.py"), os.path.join(exportpath, "oeqa/runtime"))
150 # copy oeqa/oetest.py and oeqa/runexported.py
151 shutil.copy2(os.path.join(oeqadir, "oetest.py"), os.path.join(exportpath, "oeqa"))
152 shutil.copy2(os.path.join(oeqadir, "runexported.py"), exportpath)
153 # copy oeqa/utils/*.py
154 for root, dirs, files in os.walk(os.path.join(oeqadir, "utils")):
155 for f in files:
156 if f.endswith(".py"):
157 shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/utils"))
158 # copy oeqa/runtime/files/*
159 for root, dirs, files in os.walk(os.path.join(oeqadir, "runtime/files")):
160 for f in files:
161 shutil.copy2(os.path.join(root, f), os.path.join(exportpath, "oeqa/runtime/files"))
162
163 bb.plain("Exported tests to: %s" % exportpath)
164
165
166def testimage_main(d):
167 import unittest
168 import os
169 import oeqa.runtime
170 import time
171 from oeqa.oetest import loadTests, runTests
172 from oeqa.targetcontrol import get_target_controller
173
174 pn = d.getVar("PN", True)
175 export = oe.utils.conditional("TEST_EXPORT_ONLY", "1", True, False, d)
176 bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
177 if export:
178 bb.utils.remove(d.getVar("TEST_EXPORT_DIR", True), recurse=True)
179 bb.utils.mkdirhier(d.getVar("TEST_EXPORT_DIR", True))
180
181 # tests in TEST_SUITES become required tests
182 # they won't be skipped even if they aren't suitable for a image (like xorg for minimal)
183 # testslist is what we'll actually pass to the unittest loader
184 testslist = get_tests_list(d)
185 testsrequired = [t for t in d.getVar("TEST_SUITES", True).split() if t != "auto"]
186
187 # the robot dance
188 target = get_target_controller(d)
189
190 class TestContext(object):
191 def __init__(self):
192 self.d = d
193 self.testslist = testslist
194 self.testsrequired = testsrequired
195 self.filesdir = os.path.join(os.path.dirname(os.path.abspath(oeqa.runtime.__file__)),"files")
196 self.target = target
197 self.imagefeatures = d.getVar("IMAGE_FEATURES", True).split()
198 self.distrofeatures = d.getVar("DISTRO_FEATURES", True).split()
199 manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + ".manifest")
200 try:
201 with open(manifest) as f:
202 self.pkgmanifest = f.read()
203 except IOError as e:
204 bb.fatal("No package manifest file found. Did you build the image?\n%s" % e)
205
206 # test context
207 tc = TestContext()
208
209 # this is a dummy load of tests
210 # we are doing that to find compile errors in the tests themselves
211 # before booting the image
212 try:
213 loadTests(tc)
214 except Exception as e:
215 import traceback
216 bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
217
218 target.deploy()
219
220 target.start()
221 try:
222 if export:
223 exportTests(d,tc)
224 else:
225 starttime = time.time()
226 result = runTests(tc)
227 stoptime = time.time()
228 if result.wasSuccessful():
229 bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
230 msg = "%s - OK - All required tests passed" % pn
231 skipped = len(result.skipped)
232 if skipped:
233 msg += " (skipped=%d)" % skipped
234 bb.plain(msg)
235 else:
236 raise bb.build.FuncFailed("%s - FAILED - check the task log and the ssh log" % pn )
237 finally:
238 target.stop()
239
240testimage_main[vardepsexclude] =+ "BB_ORIGENV"
241
242
243def testsdk_main(d):
244 import unittest
245 import os
246 import glob
247 import oeqa.runtime
248 import oeqa.sdk
249 import time
250 import subprocess
251 from oeqa.oetest import loadTests, runTests
252
253 pn = d.getVar("PN", True)
254 bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
255
256 # tests in TEST_SUITES become required tests
257 # they won't be skipped even if they aren't suitable.
258 # testslist is what we'll actually pass to the unittest loader
259 testslist = get_tests_list(d, "sdk")
260 testsrequired = [t for t in d.getVar("TEST_SUITES", True).split() if t != "auto"]
261
262 sdktestdir = d.expand("${WORKDIR}/testimage-sdk/")
263 bb.utils.remove(sdktestdir, True)
264 bb.utils.mkdirhier(sdktestdir)
265
266 tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh")
267 if not os.path.exists(tcname):
268 bb.fatal("The toolchain is not built. Build it before running the tests: 'bitbake meta-toolchain' .")
269 subprocess.call("cd %s; %s <<EOF\n./tc\nY\nEOF" % (sdktestdir, tcname), shell=True)
270
271 targets = glob.glob(d.expand(sdktestdir + "/tc/sysroots/*${TARGET_VENDOR}-linux*"))
272 if len(targets) > 1:
273 bb.fatal("Error, multiple targets within the SDK found and we don't know which to test? %s" % str(targets))
274 sdkenv = sdktestdir + "/tc/environment-setup-" + os.path.basename(targets[0])
275
276 class TestContext(object):
277 def __init__(self):
278 self.d = d
279 self.testslist = testslist
280 self.testsrequired = testsrequired
281 self.filesdir = os.path.join(os.path.dirname(os.path.abspath(oeqa.runtime.__file__)),"files")
282 self.sdktestdir = sdktestdir
283 self.sdkenv = sdkenv
284 self.imagefeatures = d.getVar("IMAGE_FEATURES", True).split()
285 self.distrofeatures = d.getVar("DISTRO_FEATURES", True).split()
286 manifest = os.path.join(d.getVar("SDK_MANIFEST", True))
287 try:
288 with open(manifest) as f:
289 self.pkgmanifest = f.read()
290 except IOError as e:
291 bb.fatal("No package manifest file found. Did you build the sdk image?\n%s" % e)
292
293 # test context
294 tc = TestContext()
295
296 # this is a dummy load of tests
297 # we are doing that to find compile errors in the tests themselves
298 # before booting the image
299 try:
300 loadTests(tc, "sdk")
301 except Exception as e:
302 import traceback
303 bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
304
305 try:
306 starttime = time.time()
307 result = runTests(tc, "sdk")
308 stoptime = time.time()
309 if result.wasSuccessful():
310 bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
311 msg = "%s - OK - All required tests passed" % pn
312 skipped = len(result.skipped)
313 if skipped:
314 msg += " (skipped=%d)" % skipped
315 bb.plain(msg)
316 else:
317 raise bb.build.FuncFailed("%s - FAILED - check the task log and the commands log" % pn )
318 finally:
319 pass
320 bb.utils.remove(sdktestdir, True)
321
322testsdk_main[vardepsexclude] =+ "BB_ORIGENV"
323
diff --git a/meta/classes/texinfo.bbclass b/meta/classes/texinfo.bbclass
new file mode 100644
index 0000000000..92efbccddf
--- /dev/null
+++ b/meta/classes/texinfo.bbclass
@@ -0,0 +1,15 @@
1# This class is inherited by recipes whose upstream packages invoke the
2# texinfo utilities at build-time. Native and cross recipes are made to use the
3# dummy scripts provided by texinfo-dummy-native, for improved performance.
4# Target architecture recipes use the genuine Texinfo utilities. By default,
5# they use the Texinfo utilities on the host system. If you want to use the
6# Texinfo recipe shipped with yoco, you can remove texinfo-native from
7# ASSUME_PROVIDED and makeinfo from SANITY_REQUIRED_UTILITIES.
8
9TEXDEP = "texinfo-native"
10TEXDEP_class-native = "texinfo-dummy-native"
11TEXDEP_class-cross = "texinfo-dummy-native"
12DEPENDS_append = " ${TEXDEP}"
13PATH_prepend_class-native = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
14PATH_prepend_class-cross = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
15
diff --git a/meta/classes/tinderclient.bbclass b/meta/classes/tinderclient.bbclass
new file mode 100644
index 0000000000..6984efd1be
--- /dev/null
+++ b/meta/classes/tinderclient.bbclass
@@ -0,0 +1,368 @@
1def tinder_http_post(server, selector, content_type, body):
2 import httplib
3 # now post it
4 for i in range(0,5):
5 try:
6 h = httplib.HTTP(server)
7 h.putrequest('POST', selector)
8 h.putheader('content-type', content_type)
9 h.putheader('content-length', str(len(body)))
10 h.endheaders()
11 h.send(body)
12 errcode, errmsg, headers = h.getreply()
13 #print errcode, errmsg, headers
14 return (errcode,errmsg, headers, h.file)
15 except:
16 print "Error sending the report!"
17 # try again
18 pass
19
20 # return some garbage
21 return (-1, "unknown", "unknown", None)
22
23def tinder_form_data(bound, dict, log):
24 output = []
25 # for each key in the dictionary
26 for name in dict:
27 assert dict[name]
28 output.append( "--" + bound )
29 output.append( 'Content-Disposition: form-data; name="%s"' % name )
30 output.append( "" )
31 output.append( dict[name] )
32 if log:
33 output.append( "--" + bound )
34 output.append( 'Content-Disposition: form-data; name="log"; filename="log.txt"' )
35 output.append( '' )
36 output.append( log )
37 output.append( '--' + bound + '--' )
38 output.append( '' )
39
40 return "\r\n".join(output)
41
42def tinder_time_string():
43 """
44 Return the time as GMT
45 """
46 return ""
47
48def tinder_format_http_post(d,status,log):
49 """
50 Format the Tinderbox HTTP post with the data needed
51 for the tinderbox to be happy.
52 """
53
54 import random
55
56 # the variables we will need to send on this form post
57 variables = {
58 "tree" : d.getVar('TINDER_TREE', True),
59 "machine_name" : d.getVar('TINDER_MACHINE', True),
60 "os" : os.uname()[0],
61 "os_version" : os.uname()[2],
62 "compiler" : "gcc",
63 "clobber" : d.getVar('TINDER_CLOBBER', True) or "0",
64 "srcdate" : d.getVar('SRCDATE', True),
65 "PN" : d.getVar('PN', True),
66 "PV" : d.getVar('PV', True),
67 "PR" : d.getVar('PR', True),
68 "FILE" : d.getVar('FILE', True) or "N/A",
69 "TARGETARCH" : d.getVar('TARGET_ARCH', True),
70 "TARGETFPU" : d.getVar('TARGET_FPU', True) or "Unknown",
71 "TARGETOS" : d.getVar('TARGET_OS', True) or "Unknown",
72 "MACHINE" : d.getVar('MACHINE', True) or "Unknown",
73 "DISTRO" : d.getVar('DISTRO', True) or "Unknown",
74 "zecke-rocks" : "sure",
75 }
76
77 # optionally add the status
78 if status:
79 variables["status"] = str(status)
80
81 # try to load the machine id
82 # we only need on build_status.pl but sending it
83 # always does not hurt
84 try:
85 f = file(d.getVar('TMPDIR',True)+'/tinder-machine.id', 'r')
86 id = f.read()
87 variables['machine_id'] = id
88 except:
89 pass
90
91 # the boundary we will need
92 boundary = "----------------------------------%d" % int(random.random()*1000000000000)
93
94 # now format the body
95 body = tinder_form_data( boundary, variables, log )
96
97 return ("multipart/form-data; boundary=%s" % boundary),body
98
99
100def tinder_build_start(d):
101 """
102 Inform the tinderbox that a build is starting. We do this
103 by posting our name and tree to the build_start.pl script
104 on the server.
105 """
106
107 # get the body and type
108 content_type, body = tinder_format_http_post(d,None,None)
109 server = d.getVar('TINDER_HOST', True )
110 url = d.getVar('TINDER_URL', True )
111
112 selector = url + "/xml/build_start.pl"
113
114 #print "selector %s and url %s" % (selector, url)
115
116 # now post it
117 errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
118 #print errcode, errmsg, headers
119 report = h_file.read()
120
121 # now let us find the machine id that was assigned to us
122 search = "<machine id='"
123 report = report[report.find(search)+len(search):]
124 report = report[0:report.find("'")]
125
126 bb.note("Machine ID assigned by tinderbox: %s" % report )
127
128 # now we will need to save the machine number
129 # we will override any previous numbers
130 f = file(d.getVar('TMPDIR', True)+"/tinder-machine.id", 'w')
131 f.write(report)
132
133
134def tinder_send_http(d, status, _log):
135 """
136 Send this log as build status
137 """
138
139 # get the body and type
140 server = d.getVar('TINDER_HOST', True)
141 url = d.getVar('TINDER_URL', True)
142
143 selector = url + "/xml/build_status.pl"
144
145 # now post it - in chunks of 10.000 charachters
146 new_log = _log
147 while len(new_log) > 0:
148 content_type, body = tinder_format_http_post(d,status,new_log[0:18000])
149 errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
150 #print errcode, errmsg, headers
151 #print h.file.read()
152 new_log = new_log[18000:]
153
154
155def tinder_print_info(d):
156 """
157 Print the TinderBox Info
158 Including informations of the BaseSystem and the Tree
159 we use.
160 """
161
162 # get the local vars
163 time = tinder_time_string()
164 ops = os.uname()[0]
165 version = os.uname()[2]
166 url = d.getVar( 'TINDER_URL' , True )
167 tree = d.getVar( 'TINDER_TREE', True )
168 branch = d.getVar( 'TINDER_BRANCH', True )
169 srcdate = d.getVar( 'SRCDATE', True )
170 machine = d.getVar( 'MACHINE', True )
171 distro = d.getVar( 'DISTRO', True )
172 bbfiles = d.getVar( 'BBFILES', True )
173 tarch = d.getVar( 'TARGET_ARCH', True )
174 fpu = d.getVar( 'TARGET_FPU', True )
175 oerev = d.getVar( 'OE_REVISION', True ) or "unknown"
176
177 # there is a bug with tipple quoted strings
178 # i will work around but will fix the original
179 # bug as well
180 output = []
181 output.append("== Tinderbox Info" )
182 output.append("Time: %(time)s" )
183 output.append("OS: %(ops)s" )
184 output.append("%(version)s" )
185 output.append("Compiler: gcc" )
186 output.append("Tinderbox Client: 0.1" )
187 output.append("Tinderbox Client Last Modified: yesterday" )
188 output.append("Tinderbox Protocol: 0.1" )
189 output.append("URL: %(url)s" )
190 output.append("Tree: %(tree)s" )
191 output.append("Config:" )
192 output.append("branch = '%(branch)s'" )
193 output.append("TARGET_ARCH = '%(tarch)s'" )
194 output.append("TARGET_FPU = '%(fpu)s'" )
195 output.append("SRCDATE = '%(srcdate)s'" )
196 output.append("MACHINE = '%(machine)s'" )
197 output.append("DISTRO = '%(distro)s'" )
198 output.append("BBFILES = '%(bbfiles)s'" )
199 output.append("OEREV = '%(oerev)s'" )
200 output.append("== End Tinderbox Client Info" )
201
202 # now create the real output
203 return "\n".join(output) % vars()
204
205
206def tinder_print_env():
207 """
208 Print the environment variables of this build
209 """
210 time_start = tinder_time_string()
211 time_end = tinder_time_string()
212
213 # build the environment
214 env = ""
215 for var in os.environ:
216 env += "%s=%s\n" % (var, os.environ[var])
217
218 output = []
219 output.append( "---> TINDERBOX RUNNING env %(time_start)s" )
220 output.append( env )
221 output.append( "<--- TINDERBOX FINISHED (SUCCESS) %(time_end)s" )
222
223 return "\n".join(output) % vars()
224
225def tinder_tinder_start(d, event):
226 """
227 PRINT the configuration of this build
228 """
229
230 time_start = tinder_time_string()
231 config = tinder_print_info(d)
232 #env = tinder_print_env()
233 time_end = tinder_time_string()
234 packages = " ".join( event.getPkgs() )
235
236 output = []
237 output.append( "---> TINDERBOX PRINTING CONFIGURATION %(time_start)s" )
238 output.append( config )
239 #output.append( env )
240 output.append( "<--- TINDERBOX FINISHED PRINTING CONFIGURATION %(time_end)s" )
241 output.append( "---> TINDERBOX BUILDING '%(packages)s'" )
242 output.append( "<--- TINDERBOX STARTING BUILD NOW" )
243
244 output.append( "" )
245
246 return "\n".join(output) % vars()
247
248def tinder_do_tinder_report(event):
249 """
250 Report to the tinderbox:
251 On the BuildStart we will inform the box directly
252 On the other events we will write to the TINDER_LOG and
253 when the Task is finished we will send the report.
254
255 The above is not yet fully implemented. Currently we send
256 information immediately. The caching/queuing needs to be
257 implemented. Also sending more or less information is not
258 implemented yet.
259
260 We have two temporary files stored in the TMP directory. One file
261 contains the assigned machine id for the tinderclient. This id gets
262 assigned when we connect the box and start the build process the second
263 file is used to workaround an EventHandler limitation. If BitBake is ran
264 with the continue option we want the Build to fail even if we get the
265 BuildCompleted Event. In this case we have to look up the status and
266 send it instead of 100/success.
267 """
268 import glob
269
270 # variables
271 name = bb.event.getName(event)
272 log = ""
273 status = 1
274 # Check what we need to do Build* shows we start or are done
275 if name == "BuildStarted":
276 tinder_build_start(event.data)
277 log = tinder_tinder_start(event.data,event)
278
279 try:
280 # truncate the tinder log file
281 f = file(event.data.getVar('TINDER_LOG', True), 'w')
282 f.write("")
283 f.close()
284 except:
285 pass
286
287 try:
288 # write a status to the file. This is needed for the -k option
289 # of BitBake
290 g = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
291 g.write("")
292 g.close()
293 except IOError:
294 pass
295
296 # Append the Task-Log (compile,configure...) to the log file
297 # we will send to the server
298 if name == "TaskSucceeded" or name == "TaskFailed":
299 log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
300
301 if len(log_file) != 0:
302 to_file = event.data.getVar('TINDER_LOG', True)
303 log += "".join(open(log_file[0], 'r').readlines())
304
305 # set the right 'HEADER'/Summary for the TinderBox
306 if name == "TaskStarted":
307 log += "---> TINDERBOX Task %s started\n" % event.task
308 elif name == "TaskSucceeded":
309 log += "<--- TINDERBOX Task %s done (SUCCESS)\n" % event.task
310 elif name == "TaskFailed":
311 log += "<--- TINDERBOX Task %s failed (FAILURE)\n" % event.task
312 elif name == "PkgStarted":
313 log += "---> TINDERBOX Package %s started\n" % event.data.getVar('PF', True)
314 elif name == "PkgSucceeded":
315 log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % event.data.getVar('PF', True)
316 elif name == "PkgFailed":
317 if not event.data.getVar('TINDER_AUTOBUILD', True) == "0":
318 build.exec_task('do_clean', event.data)
319 log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % event.data.getVar('PF', True)
320 status = 200
321 # remember the failure for the -k case
322 h = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
323 h.write("200")
324 elif name == "BuildCompleted":
325 log += "Build Completed\n"
326 status = 100
327 # Check if we have a old status...
328 try:
329 h = file(event.data.getVar('TMPDIR',True)+'/tinder-status', 'r')
330 status = int(h.read())
331 except:
332 pass
333
334 elif name == "MultipleProviders":
335 log += "---> TINDERBOX Multiple Providers\n"
336 log += "multiple providers are available (%s);\n" % ", ".join(event.getCandidates())
337 log += "consider defining PREFERRED_PROVIDER_%s\n" % event.getItem()
338 log += "is runtime: %d\n" % event.isRuntime()
339 log += "<--- TINDERBOX Multiple Providers\n"
340 elif name == "NoProvider":
341 log += "Error: No Provider for: %s\n" % event.getItem()
342 log += "Error:Was Runtime: %d\n" % event.isRuntime()
343 status = 200
344 # remember the failure for the -k case
345 h = file(event.data.getVar('TMPDIR', True)+"/tinder-status", 'w')
346 h.write("200")
347
348 # now post the log
349 if len(log) == 0:
350 return
351
352 # for now we will use the http post method as it is the only one
353 log_post_method = tinder_send_http
354 log_post_method(event.data, status, log)
355
356
357# we want to be an event handler
358addhandler tinderclient_eventhandler
359python tinderclient_eventhandler() {
360 if e.data is None or bb.event.getName(e) == "MsgNote":
361 return
362
363 do_tinder_report = e.data.getVar('TINDER_REPORT', True)
364 if do_tinder_report and do_tinder_report == "1":
365 tinder_do_tinder_report(e)
366
367 return
368}
diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass
new file mode 100644
index 0000000000..a7dd0aa854
--- /dev/null
+++ b/meta/classes/toaster.bbclass
@@ -0,0 +1,343 @@
1#
2# Toaster helper class
3#
4# Copyright (C) 2013 Intel Corporation
5#
6# Released under the MIT license (see COPYING.MIT)
7#
8# This bbclass is designed to extract data used by OE-Core during the build process,
9# for recording in the Toaster system.
10# The data access is synchronous, preserving the build data integrity across
11# different builds.
12#
13# The data is transferred through the event system, using the MetadataEvent objects.
14#
15# The model is to enable the datadump functions as postfuncs, and have the dump
16# executed after the real taskfunc has been executed. This prevents task signature changing
17# is toaster is enabled or not. Build performance is not affected if Toaster is not enabled.
18#
19# To enable, use INHERIT in local.conf:
20#
21# INHERIT += "toaster"
22#
23#
24#
25#
26
27# Find and dump layer info when we got the layers parsed
28
29
30
31python toaster_layerinfo_dumpdata() {
32 import subprocess
33
34 def _get_git_branch(layer_path):
35 branch = subprocess.Popen("git symbolic-ref HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0]
36 branch = branch.replace('refs/heads/', '').rstrip()
37 return branch
38
39 def _get_git_revision(layer_path):
40 revision = subprocess.Popen("git rev-parse HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0].rstrip()
41 return revision
42
43 def _get_url_map_name(layer_name):
44 """ Some layers have a different name on openembedded.org site,
45 this method returns the correct name to use in the URL
46 """
47
48 url_name = layer_name
49 url_mapping = {'meta': 'openembedded-core'}
50
51 for key in url_mapping.keys():
52 if key == layer_name:
53 url_name = url_mapping[key]
54
55 return url_name
56
57 def _get_layer_version_information(layer_path):
58
59 layer_version_info = {}
60 layer_version_info['branch'] = _get_git_branch(layer_path)
61 layer_version_info['commit'] = _get_git_revision(layer_path)
62 layer_version_info['priority'] = 0
63
64 return layer_version_info
65
66
67 def _get_layer_dict(layer_path):
68
69 layer_info = {}
70 layer_name = layer_path.split('/')[-1]
71 layer_url = 'http://layers.openembedded.org/layerindex/layer/{layer}/'
72 layer_url_name = _get_url_map_name(layer_name)
73
74 layer_info['name'] = layer_url_name
75 layer_info['local_path'] = layer_path
76 layer_info['layer_index_url'] = layer_url.format(layer=layer_url_name)
77 layer_info['version'] = _get_layer_version_information(layer_path)
78
79 return layer_info
80
81
82 bblayers = e.data.getVar("BBLAYERS", True)
83
84 llayerinfo = {}
85
86 for layer in { l for l in bblayers.strip().split(" ") if len(l) }:
87 llayerinfo[layer] = _get_layer_dict(layer)
88
89
90 bb.event.fire(bb.event.MetadataEvent("LayerInfo", llayerinfo), e.data)
91}
92
93# Dump package file info data
94
95def _toaster_load_pkgdatafile(dirpath, filepath):
96 import json
97 import re
98 pkgdata = {}
99 with open(os.path.join(dirpath, filepath), "r") as fin:
100 for line in fin:
101 try:
102 kn, kv = line.strip().split(": ", 1)
103 m = re.match(r"^PKG_([^A-Z:]*)", kn)
104 if m:
105 pkgdata['OPKGN'] = m.group(1)
106 kn = "_".join([x for x in kn.split("_") if x.isupper()])
107 pkgdata[kn] = kv.strip()
108 if kn == 'FILES_INFO':
109 pkgdata[kn] = json.loads(kv)
110
111 except ValueError:
112 pass # ignore lines without valid key: value pairs
113 return pkgdata
114
115
116python toaster_package_dumpdata() {
117 """
118 Dumps the data created by emit_pkgdata
119 """
120 # replicate variables from the package.bbclass
121
122 packages = d.getVar('PACKAGES', True)
123 pkgdest = d.getVar('PKGDEST', True)
124
125 pkgdatadir = d.getVar('PKGDESTWORK', True)
126
127 # scan and send data for each package
128
129 lpkgdata = {}
130 for pkg in packages.split():
131
132 lpkgdata = _toaster_load_pkgdatafile(pkgdatadir + "/runtime/", pkg)
133
134 # Fire an event containing the pkg data
135 bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d)
136}
137
138# 2. Dump output image files information
139
140python toaster_image_dumpdata() {
141 """
142 Image filename for output images is not standardized.
143 image_types.bbclass will spell out IMAGE_CMD_xxx variables that actually
144 have hardcoded ways to create image file names in them.
145 So we look for files starting with the set name.
146 """
147
148 deploy_dir_image = d.getVar('DEPLOY_DIR_IMAGE', True);
149 image_name = d.getVar('IMAGE_NAME', True);
150
151 image_info_data = {}
152
153 for dirpath, dirnames, filenames in os.walk(deploy_dir_image):
154 for fn in filenames:
155 if fn.startswith(image_name):
156 image_output = os.path.join(dirpath, fn)
157 image_info_data[image_output] = os.stat(image_output).st_size
158
159 bb.event.fire(bb.event.MetadataEvent("ImageFileSize",image_info_data), d)
160}
161
162
163
164# collect list of buildstats files based on fired events; when the build completes, collect all stats and fire an event with collected data
165
166python toaster_collect_task_stats() {
167 import bb.build
168 import bb.event
169 import bb.data
170 import bb.utils
171 import os
172
173 if not e.data.getVar('BUILDSTATS_BASE', True):
174 return # if we don't have buildstats, we cannot collect stats
175
176 def _append_read_list(v):
177 lock = bb.utils.lockfile(e.data.expand("${TOPDIR}/toaster.lock"), False, True)
178
179 with open(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"), "a") as fout:
180 bn = get_bn(e)
181 bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
182 taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
183 fout.write("%s:%s:%s:%s\n" % (e.taskfile, e.taskname, os.path.join(taskdir, e.task), e.data.expand("${PN}")))
184
185 bb.utils.unlockfile(lock)
186
187 def _read_stats(filename):
188 cpu_usage = 0
189 disk_io = 0
190 startio = '0'
191 endio = '0'
192 started = '0'
193 ended = '0'
194 pn = ''
195 taskname = ''
196 statinfo = {}
197
198 with open(filename, 'r') as task_bs:
199 for line in task_bs.readlines():
200 k,v = line.strip().split(": ", 1)
201 statinfo[k] = v
202
203 if "CPU usage" in statinfo:
204 cpu_usage = str(statinfo["CPU usage"]).strip('% \n\r')
205
206 if "EndTimeIO" in statinfo:
207 endio = str(statinfo["EndTimeIO"]).strip('% \n\r')
208
209 if "StartTimeIO" in statinfo:
210 startio = str(statinfo["StartTimeIO"]).strip('% \n\r')
211
212 if "Started" in statinfo:
213 started = str(statinfo["Started"]).strip('% \n\r')
214
215 if "Ended" in statinfo:
216 ended = str(statinfo["Ended"]).strip('% \n\r')
217
218 disk_io = int(endio) - int(startio)
219
220 elapsed_time = float(ended) - float(started)
221
222 cpu_usage = float(cpu_usage)
223
224 return {'cpu_usage': cpu_usage, 'disk_io': disk_io, 'elapsed_time': elapsed_time}
225
226
227 if isinstance(e, (bb.build.TaskSucceeded, bb.build.TaskFailed)):
228 _append_read_list(e)
229 pass
230
231
232 if isinstance(e, bb.event.BuildCompleted) and os.path.exists(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist")):
233 events = []
234 with open(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"), "r") as fin:
235 for line in fin:
236 (taskfile, taskname, filename, recipename) = line.strip().split(":")
237 events.append((taskfile, taskname, _read_stats(filename), recipename))
238 bb.event.fire(bb.event.MetadataEvent("BuildStatsList", events), e.data)
239 os.unlink(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"))
240}
241
242# dump relevant build history data as an event when the build is completed
243
244python toaster_buildhistory_dump() {
245 import re
246 BUILDHISTORY_DIR = e.data.expand("${TOPDIR}/buildhistory")
247 BUILDHISTORY_DIR_IMAGE_BASE = e.data.expand("%s/images/${MACHINE_ARCH}/${TCLIBC}/"% BUILDHISTORY_DIR)
248 pkgdata_dir = e.data.getVar("PKGDATA_DIR", True)
249
250
251 # scan the build targets for this build
252 images = {}
253 allpkgs = {}
254 files = {}
255 for target in e._pkgs:
256 installed_img_path = e.data.expand(os.path.join(BUILDHISTORY_DIR_IMAGE_BASE, target))
257 if os.path.exists(installed_img_path):
258 images[target] = {}
259 files[target] = {}
260 files[target]['dirs'] = []
261 files[target]['syms'] = []
262 files[target]['files'] = []
263 with open("%s/installed-package-sizes.txt" % installed_img_path, "r") as fin:
264 for line in fin:
265 line = line.rstrip(";")
266 psize, px = line.split("\t")
267 punit, pname = px.split(" ")
268 # this size is "installed-size" as it measures how much space it takes on disk
269 images[target][pname.strip()] = {'size':int(psize)*1024, 'depends' : []}
270
271 with open("%s/depends.dot" % installed_img_path, "r") as fin:
272 p = re.compile(r' -> ')
273 dot = re.compile(r'.*style=dotted')
274 for line in fin:
275 line = line.rstrip(';')
276 linesplit = p.split(line)
277 if len(linesplit) == 2:
278 pname = linesplit[0].rstrip('"').strip('"')
279 dependsname = linesplit[1].split(" ")[0].strip().strip(";").strip('"').rstrip('"')
280 deptype = "depends"
281 if dot.match(line):
282 deptype = "recommends"
283 if not pname in images[target]:
284 images[target][pname] = {'size': 0, 'depends' : []}
285 if not dependsname in images[target]:
286 images[target][dependsname] = {'size': 0, 'depends' : []}
287 images[target][pname]['depends'].append((dependsname, deptype))
288
289 with open("%s/files-in-image.txt" % installed_img_path, "r") as fin:
290 for line in fin:
291 lc = [ x for x in line.strip().split(" ") if len(x) > 0 ]
292 if lc[0].startswith("l"):
293 files[target]['syms'].append(lc)
294 elif lc[0].startswith("d"):
295 files[target]['dirs'].append(lc)
296 else:
297 files[target]['files'].append(lc)
298
299 for pname in images[target]:
300 if not pname in allpkgs:
301 try:
302 pkgdata = _toaster_load_pkgdatafile("%s/runtime-reverse/" % pkgdata_dir, pname)
303 except IOError as err:
304 if err.errno == 2:
305 # We expect this e.g. for RRECOMMENDS that are unsatisfied at runtime
306 continue
307 else:
308 raise
309 allpkgs[pname] = pkgdata
310
311
312 data = { 'pkgdata' : allpkgs, 'imgdata' : images, 'filedata' : files }
313
314 bb.event.fire(bb.event.MetadataEvent("ImagePkgList", data), e.data)
315
316}
317
318# dump information related to license manifest path
319
320python toaster_licensemanifest_dump() {
321 deploy_dir = d.getVar('DEPLOY_DIR', True);
322 image_name = d.getVar('IMAGE_NAME', True);
323
324 data = { 'deploy_dir' : deploy_dir, 'image_name' : image_name }
325
326 bb.event.fire(bb.event.MetadataEvent("LicenseManifestPath", data), d)
327}
328
329# set event handlers
330addhandler toaster_layerinfo_dumpdata
331toaster_layerinfo_dumpdata[eventmask] = "bb.event.TreeDataPreparationCompleted"
332
333addhandler toaster_collect_task_stats
334toaster_collect_task_stats[eventmask] = "bb.event.BuildCompleted bb.build.TaskSucceeded bb.build.TaskFailed"
335
336addhandler toaster_buildhistory_dump
337toaster_buildhistory_dump[eventmask] = "bb.event.BuildCompleted"
338do_package[postfuncs] += "toaster_package_dumpdata "
339do_package[vardepsexclude] += "toaster_package_dumpdata "
340
341do_rootfs[postfuncs] += "toaster_image_dumpdata "
342do_rootfs[postfuncs] += "toaster_licensemanifest_dump "
343do_rootfs[vardepsexclude] += "toaster_image_dumpdata toaster_licensemanifest_dump"
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
new file mode 100644
index 0000000000..75464d1317
--- /dev/null
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -0,0 +1,138 @@
1inherit siteinfo kernel-arch
2
3# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
4# doesn't always match our expectations... but we default to the stock value
5REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
6
7# This function creates an environment-setup-script for use in a deployable SDK
8toolchain_create_sdk_env_script () {
9 # Create environment setup script
10 libdir=${4:-${libdir}}
11 sysroot=${3:-${SDKTARGETSYSROOT}}
12 multimach_target_sys=${2:-${REAL_MULTIMACH_TARGET_SYS}}
13 script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-$multimach_target_sys}
14 rm -f $script
15 touch $script
16 echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
17 EXTRAPATH=""
18 for i in ${CANADIANEXTRAOS}; do
19 EXTRAPATH="$EXTRAPATH:${SDKPATHNATIVE}${bindir_nativesdk}/${TARGET_ARCH}${TARGET_VENDOR}-$i"
20 done
21 echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${bindir_nativesdk}/${TARGET_SYS}'$EXTRAPATH':$PATH' >> $script
22 echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
23 echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig' >> $script
24 echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
25 echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script
26 echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
27 echo 'export OECORE_ACLOCAL_OPTS="-I ${SDKPATHNATIVE}/usr/share/aclocal"' >> $script
28 echo 'export PYTHONHOME=${SDKPATHNATIVE}${prefix_nativesdk}' >> $script
29
30 toolchain_shared_env_script
31}
32
33# This function creates an environment-setup-script in the TMPDIR which enables
34# a OE-core IDE to integrate with the build tree
35toolchain_create_tree_env_script () {
36 script=${TMPDIR}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
37 rm -f $script
38 touch $script
39 echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${PATH}' >> $script
40 echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
41 echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
42 echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
43 echo 'export SDKTARGETSYSROOT=${STAGING_DIR_TARGET}' >> $script
44 echo 'export OECORE_NATIVE_SYSROOT="${STAGING_DIR_NATIVE}"' >> $script
45 echo 'export OECORE_TARGET_SYSROOT="${STAGING_DIR_TARGET}"' >> $script
46 echo 'export OECORE_ACLOCAL_OPTS="-I ${STAGING_DIR_NATIVE}/usr/share/aclocal"' >> $script
47
48 toolchain_shared_env_script
49}
50
51toolchain_shared_env_script () {
52 echo 'export CC="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
53 echo 'export CXX="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
54 echo 'export CPP="${TARGET_PREFIX}gcc -E ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
55 echo 'export AS="${TARGET_PREFIX}as ${TARGET_AS_ARCH}"' >> $script
56 echo 'export LD="${TARGET_PREFIX}ld ${TARGET_LD_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
57 echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
58 echo 'export STRIP=${TARGET_PREFIX}strip' >> $script
59 echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
60 echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
61 echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
62 echo 'export AR=${TARGET_PREFIX}ar' >> $script
63 echo 'export NM=${TARGET_PREFIX}nm' >> $script
64 echo 'export M4=m4' >> $script
65 echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
66 echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=$SDKTARGETSYSROOT"' >> $script
67 echo 'export CFLAGS="${TARGET_CFLAGS}"' >> $script
68 echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script
69 echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script
70 echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script
71 echo 'export KCFLAGS="--sysroot=$SDKTARGETSYSROOT"' >> $script
72 echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
73 echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
74 echo 'export ARCH=${ARCH}' >> $script
75 echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script
76
77 cat >> $script <<EOF
78
79# Append environment subscripts
80if [ -d "\$OECORE_TARGET_SYSROOT/environment-setup.d" ]; then
81 for envfile in \$OECORE_TARGET_SYSROOT/environment-setup.d/*.sh; do
82 source \$envfile
83 done
84fi
85if [ -d "\$OECORE_NATIVE_SYSROOT/environment-setup.d" ]; then
86 for envfile in \$OECORE_NATIVE_SYSROOT/environment-setup.d/*.sh; do
87 source \$envfile
88 done
89fi
90EOF
91}
92
93#we get the cached site config in the runtime
94TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d, True)}"
95TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
96TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "${TCLIBC} ncurses"
97
98#This function create a site config file
99toolchain_create_sdk_siteconfig () {
100 local siteconfig=$1
101
102 rm -f $siteconfig
103 touch $siteconfig
104
105 for sitefile in ${TOOLCHAIN_CONFIGSITE_NOCACHE} ; do
106 cat $sitefile >> $siteconfig
107 done
108
109 #get cached site config
110 for sitefile in ${TOOLCHAIN_NEED_CONFIGSITE_CACHE}; do
111 if [ -r ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config ]; then
112 cat ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config >> $siteconfig
113 fi
114 done
115}
116# The immediate expansion above can result in unwanted path dependencies here
117toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE"
118
119#This function create a version information file
120toolchain_create_sdk_version () {
121 local versionfile=$1
122 rm -f $versionfile
123 touch $versionfile
124 echo 'Distro: ${DISTRO}' >> $versionfile
125 echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile
126 echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile
127 echo 'Timestamp: ${DATETIME}' >> $versionfile
128}
129toolchain_create_sdk_version[vardepsexclude] = "DATETIME"
130
131python __anonymous () {
132 deps = ""
133 for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE', True) or "").split():
134 deps += " %s:do_populate_sysroot" % dep
135 for variant in (d.getVar('MULTILIB_VARIANTS', True) or "").split():
136 deps += " %s-%s:do_populate_sysroot" % (variant, dep)
137 d.appendVarFlag('do_configure', 'depends', deps)
138}
diff --git a/meta/classes/typecheck.bbclass b/meta/classes/typecheck.bbclass
new file mode 100644
index 0000000000..72da932232
--- /dev/null
+++ b/meta/classes/typecheck.bbclass
@@ -0,0 +1,12 @@
1# Check types of bitbake configuration variables
2#
3# See oe.types for details.
4
5python check_types() {
6 import oe.types
7 for key in e.data.keys():
8 if e.data.getVarFlag(key, "type"):
9 oe.data.typed_value(key, e.data)
10}
11addhandler check_types
12check_types[eventmask] = "bb.event.ConfigParsed"
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
new file mode 100644
index 0000000000..8ac1b71bc2
--- /dev/null
+++ b/meta/classes/uboot-config.bbclass
@@ -0,0 +1,61 @@
1# Handle U-Boot config for a machine
2#
3# The format to specify it, in the machine, is:
4#
5# UBOOT_CONFIG ??= <default>
6# UBOOT_CONFIG[foo] = "config,images"
7#
8# or
9#
10# UBOOT_MACHINE = "config"
11#
12# Copyright 2013, 2014 (C) O.S. Systems Software LTDA.
13
14python () {
15 ubootmachine = d.getVar("UBOOT_MACHINE", True)
16 ubootconfigflags = d.getVarFlags('UBOOT_CONFIG')
17 # The "doc" varflag is special, we don't want to see it here
18 ubootconfigflags.pop('doc', None)
19
20 if not ubootmachine and not ubootconfigflags:
21 PN = d.getVar("PN", True)
22 FILE = os.path.basename(d.getVar("FILE", True))
23 bb.debug(1, "To build %s, see %s for instructions on \
24 setting up your machine config" % (PN, FILE))
25 raise bb.parse.SkipPackage("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE", True))
26
27 if ubootmachine and ubootconfigflags:
28 raise bb.parse.SkipPackage("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
29
30 if not ubootconfigflags:
31 return
32
33 ubootconfig = (d.getVar('UBOOT_CONFIG', True) or "").split()
34 if len(ubootconfig) > 1:
35 raise bb.parse.SkipPackage('You can only have a single default for UBOOT_CONFIG.')
36 elif len(ubootconfig) == 0:
37 raise bb.parse.SkipPackage('You must set a default in UBOOT_CONFIG.')
38 ubootconfig = ubootconfig[0]
39
40 for f, v in ubootconfigflags.items():
41 if f == 'defaultval':
42 continue
43
44 items = v.split(',')
45 if items[0] and len(items) > 2:
46 raise bb.parse.SkipPackage('Only config,images can be specified!')
47
48 if ubootconfig == f:
49 bb.debug(1, "Setting UBOOT_MACHINE to %s." % items[0])
50 d.setVar('UBOOT_MACHINE', items[0])
51
52 # IMAGE_FSTYPES appending
53 if len(items) > 1 and items[1]:
54 bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
55 d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
56
57 # Go out as we found a match!
58 break
59 else:
60 raise bb.parse.SkipPackage("UBOOT_CONFIG %s is not supported" % ubootconfig)
61}
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
new file mode 100644
index 0000000000..51391dbc4a
--- /dev/null
+++ b/meta/classes/uninative.bbclass
@@ -0,0 +1,44 @@
1NATIVELSBSTRING = "universal"
2
3UNINATIVE_LOADER = "${STAGING_DIR_NATIVE}/lib/ld-linux-x86-64.so.2"
4
5addhandler uninative_eventhandler
6uninative_eventhandler[eventmask] = "bb.event.BuildStarted"
7
8python uninative_eventhandler() {
9 loader = e.data.getVar("UNINATIVE_LOADER", True)
10 if not os.path.exists(loader):
11 import subprocess
12 cmd = e.data.expand("mkdir -p ${STAGING_DIR}; cd ${STAGING_DIR}; tar -xjf ${COREBASE}/${BUILD_ARCH}-nativesdk-libc.tar.bz2; ${STAGING_DIR}/relocate_sdk.py ${STAGING_DIR_NATIVE} ${UNINATIVE_LOADER} ${UNINATIVE_LOADER} ${STAGING_BINDIR_NATIVE}/patchelf-uninative")
13 #bb.warn("nativesdk lib extraction: " + cmd)
14 subprocess.check_call(cmd, shell=True)
15}
16
17SSTATEPOSTUNPACKFUNCS_append = " uninative_changeinterp"
18
19python uninative_changeinterp () {
20 import subprocess
21 import stat
22 import oe.qa
23
24 if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)):
25 return
26
27 sstateinst = d.getVar('SSTATE_INSTDIR', True)
28 for walkroot, dirs, files in os.walk(sstateinst):
29 for file in files:
30 f = os.path.join(walkroot, file)
31 if os.path.islink(f):
32 continue
33 s = os.stat(f)
34 if not ((s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH)):
35 continue
36 elf = oe.qa.ELFFile(f)
37 try:
38 elf.open()
39 except:
40 continue
41
42 #bb.warn("patchelf-uninative --set-interpreter %s %s" % (d.getVar("UNINATIVE_LOADER", True), f))
43 subprocess.call("patchelf-uninative --set-interpreter %s %s" % (d.getVar("UNINATIVE_LOADER", True), f), shell=True)
44}
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
new file mode 100644
index 0000000000..9f2c250d03
--- /dev/null
+++ b/meta/classes/update-alternatives.bbclass
@@ -0,0 +1,267 @@
1# This class is used to help the alternatives system which is useful when
2# multiple sources provide same command. You can use update-alternatives
3# command directly in your recipe, but in most cases this class simplifies
4# that job.
5#
6# To use this class a number of variables should be defined:
7#
8# List all of the alternatives needed by a package:
9# ALTERNATIVE_<pkg> = "name1 name2 name3 ..."
10#
11# i.e. ALTERNATIVE_busybox = "sh sed test bracket"
12#
13# The pathname of the link
14# ALTERNATIVE_LINK_NAME[name] = "target"
15#
16# This is the name of the binary once it's been installed onto the runtime.
17# This name is global to all split packages in this recipe, and should match
18# other recipes with the same functionality.
19# i.e. ALTERNATIVE_LINK_NAME[bracket] = "/usr/bin/["
20#
21# NOTE: If ALTERNATIVE_LINK_NAME is not defined, it defaults to ${bindir}/name
22#
23# The default link to create for all targets
24# ALTERNATIVE_TARGET = "target"
25#
26# This is useful in a multicall binary case
27# i.e. ALTERNATIVE_TARGET = "/bin/busybox"
28#
29# A non-default link to create for a target
30# ALTERNATIVE_TARGET[name] = "target"
31#
32# This is the name of the binary as it's been install by do_install
33# i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash"
34#
35# A package specific link for a target
36# ALTERNATIVE_TARGET_<pkg>[name] = "target"
37#
38# This is useful when a recipe provides multiple alternatives for the
39# same item.
40#
41# NOTE: If ALTERNATIVE_TARGET is not defined, it will inherit the value
42# from ALTERNATIVE_LINK_NAME.
43#
44# NOTE: If the ALTERNATIVE_LINK_NAME and ALTERNATIVE_TARGET are the same,
45# ALTERNATIVE_TARGET will have '.{BPN}' appended to it. If the file
46# referenced has not been renamed, it will also be renamed. (This avoids
47# the need to rename alternative files in the do_install step, but still
48# supports it if necessary for some reason.)
49#
50# The default priority for any alternatives
51# ALTERNATIVE_PRIORITY = "priority"
52#
53# i.e. default is ALTERNATIVE_PRIORITY = "10"
54#
55# The non-default priority for a specific target
56# ALTERNATIVE_PRIORITY[name] = "priority"
57#
58# The package priority for a specific target
59# ALTERNATIVE_PRIORITY_<pkg>[name] = "priority"
60
61ALTERNATIVE_PRIORITY = "10"
62
63# We need special processing for vardeps because it can not work on
64# modified flag values. So we agregate the flags into a new variable
65# and include that vairable in the set.
66UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
67
68def gen_updatealternativesvardeps(d):
69 pkgs = (d.getVar("PACKAGES", True) or "").split()
70 vars = (d.getVar("UPDALTVARS", True) or "").split()
71
72 # First compute them for non_pkg versions
73 for v in vars:
74 for flag in (d.getVarFlags(v) or {}):
75 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
76 continue
77 d.appendVar('%s_VARDEPS' % (v), ' %s:%s' % (flag, d.getVarFlag(v, flag, False)))
78
79 for p in pkgs:
80 for v in vars:
81 for flag in (d.getVarFlags("%s_%s" % (v,p)) or {}):
82 if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
83 continue
84 d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
85
86def ua_extend_depends(d):
87 if not 'virtual/update-alternatives' in d.getVar('PROVIDES', True):
88 d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives')
89
90python __anonymous() {
91 # Update Alternatives only works on target packages...
92 if bb.data.inherits_class('native', d) or \
93 bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or \
94 bb.data.inherits_class('cross-canadian', d):
95 return
96
97 # compute special vardeps
98 gen_updatealternativesvardeps(d)
99
100 # extend the depends to include virtual/update-alternatives
101 ua_extend_depends(d)
102}
103
104def gen_updatealternativesvars(d):
105 ret = []
106 pkgs = (d.getVar("PACKAGES", True) or "").split()
107 vars = (d.getVar("UPDALTVARS", True) or "").split()
108
109 for v in vars:
110 ret.append(v + "_VARDEPS")
111
112 for p in pkgs:
113 for v in vars:
114 ret.append(v + "_" + p)
115 ret.append(v + "_VARDEPS_" + p)
116 return " ".join(ret)
117
118# Now the new stuff, we use a custom function to generate the right values
119populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}"
120
121# We need to do the rename after the image creation step, but before
122# the split and strip steps.. packagecopy seems to be the earliest reasonable
123# place.
124python perform_packagecopy_append () {
125 # Check for deprecated usage...
126 pn = d.getVar('BPN', True)
127 if d.getVar('ALTERNATIVE_LINKS', True) != None:
128 bb.fatal('%s: Use of ALTERNATIVE_LINKS/ALTERNATIVE_PATH/ALTERNATIVE_NAME is no longer supported, please convert to the updated syntax, see update-alternatives.bbclass for more info.' % pn)
129
130 # Do actual update alternatives processing
131 pkgdest = d.getVar('PKGD', True)
132 for pkg in (d.getVar('PACKAGES', True) or "").split():
133 # If the src == dest, we know we need to rename the dest by appending ${BPN}
134 link_rename = {}
135 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
136 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
137 if not alt_link:
138 alt_link = "%s/%s" % (d.getVar('bindir', True), alt_name)
139 d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link)
140
141 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
142 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
143 # Sometimes alt_target is specified as relative to the link name.
144 alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
145
146 # If the link and target are the same name, we need to rename the target.
147 if alt_link == alt_target:
148 src = '%s/%s' % (pkgdest, alt_target)
149 alt_target_rename = '%s.%s' % (alt_target, pn)
150 dest = '%s/%s' % (pkgdest, alt_target_rename)
151 if os.path.lexists(dest):
152 bb.note('%s: Already renamed: %s' % (pn, alt_target_rename))
153 elif os.path.lexists(src):
154 if os.path.islink(src):
155 # Delay rename of links
156 link_rename[alt_target] = alt_target_rename
157 else:
158 bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename))
159 os.rename(src, dest)
160 else:
161 bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename))
162 continue
163 d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, alt_target_rename)
164
165 # Process delayed link names
166 # Do these after other renames so we can correct broken links
167 for alt_target in link_rename:
168 src = '%s/%s' % (pkgdest, alt_target)
169 dest = '%s/%s' % (pkgdest, link_rename[alt_target])
170 link = os.readlink(src)
171 link_target = oe.path.realpath(src, pkgdest, True)
172
173 if os.path.lexists(link_target):
174 # Ok, the link_target exists, we can rename
175 bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, link_rename[alt_target]))
176 os.rename(src, dest)
177 else:
178 # Try to resolve the broken link to link.${BPN}
179 link_maybe = '%s.%s' % (os.readlink(src), pn)
180 if os.path.lexists(os.path.join(os.path.dirname(src), link_maybe)):
181 # Ok, the renamed link target exists.. create a new link, and remove the original
182 bb.note('%s: Creating new link %s -> %s' % (pn, link_rename[alt_target], link_maybe))
183 os.symlink(link_maybe, dest)
184 os.unlink(src)
185 else:
186 bb.warn('%s: Unable to resolve dangling symlink: %s' % (pn, alt_target))
187}
188
189PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives "
190
191python populate_packages_updatealternatives () {
192 pn = d.getVar('BPN', True)
193
194 # Do actual update alternatives processing
195 pkgdest = d.getVar('PKGD', True)
196 for pkg in (d.getVar('PACKAGES', True) or "").split():
197 # Create post install/removal scripts
198 alt_setup_links = ""
199 alt_remove_links = ""
200 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
201 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
202 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
203 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
204 # Sometimes alt_target is specified as relative to the link name.
205 alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
206
207 alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name, True)
208 alt_priority = alt_priority or d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg, True) or d.getVar('ALTERNATIVE_PRIORITY', True)
209
210 # This shouldn't trigger, as it should have been resolved earlier!
211 if alt_link == alt_target:
212 bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
213 alt_target = '%s.%s' % (alt_target, pn)
214
215 if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
216 bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
217 continue
218
219 # Default to generate shell script.. eventually we may want to change this...
220 alt_target = os.path.normpath(alt_target)
221
222 alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
223 alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target)
224
225 if alt_setup_links:
226 # RDEPENDS setup
227 provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives', True)
228 if provider:
229 #bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
230 d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX') + provider)
231
232 bb.note('adding update-alternatives calls to postinst/postrm for %s' % pkg)
233 bb.note('%s' % alt_setup_links)
234 postinst = d.getVar('pkg_postinst_%s' % pkg, True) or '#!/bin/sh\n'
235 postinst += alt_setup_links
236 d.setVar('pkg_postinst_%s' % pkg, postinst)
237
238 bb.note('%s' % alt_remove_links)
239 postrm = d.getVar('pkg_postrm_%s' % pkg, True) or '#!/bin/sh\n'
240 postrm += alt_remove_links
241 d.setVar('pkg_postrm_%s' % pkg, postrm)
242}
243
244python package_do_filedeps_append () {
245 pn = d.getVar('BPN', True)
246 pkgdest = d.getVar('PKGDEST', True)
247
248 for pkg in packages.split():
249 for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split():
250 alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True)
251 alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True)
252 alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
253
254 if alt_link == alt_target:
255 bb.warn('alt_link == alt_target: %s == %s' % (alt_link, alt_target))
256 alt_target = '%s.%s' % (alt_target, pn)
257
258 if not os.path.lexists('%s/%s/%s' % (pkgdest, pkg, alt_target)):
259 continue
260
261 # Add file provide
262 trans_target = oe.package.file_translate(alt_target)
263 d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link)
264 if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg, True) or ""):
265 d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target)
266}
267
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
new file mode 100644
index 0000000000..bc1aa7dad6
--- /dev/null
+++ b/meta/classes/update-rc.d.bbclass
@@ -0,0 +1,135 @@
1UPDATERCPN ?= "${PN}"
2
3DEPENDS_append = " update-rc.d-native"
4VIRTUAL-RUNTIME_initscripts ?= "initscripts"
5DEPENDS_append_class-target = " ${VIRTUAL-RUNTIME_initscripts}"
6UPDATERCD = "update-rc.d"
7UPDATERCD_class-cross = ""
8UPDATERCD_class-native = ""
9UPDATERCD_class-nativesdk = ""
10
11RRECOMMENDS_${UPDATERCPN}_append = " ${UPDATERCD}"
12
13INITSCRIPT_PARAMS ?= "defaults"
14
15INIT_D_DIR = "${sysconfdir}/init.d"
16
17updatercd_preinst() {
18if [ -z "$D" -a -f "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
19 ${INIT_D_DIR}/${INITSCRIPT_NAME} stop
20fi
21if type update-rc.d >/dev/null 2>/dev/null; then
22 if [ -n "$D" ]; then
23 OPT="-f -r $D"
24 else
25 OPT="-f"
26 fi
27 update-rc.d $OPT ${INITSCRIPT_NAME} remove
28fi
29}
30
31updatercd_postinst() {
32if type update-rc.d >/dev/null 2>/dev/null; then
33 if [ -n "$D" ]; then
34 OPT="-r $D"
35 else
36 OPT="-s"
37 fi
38 update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
39fi
40}
41
42updatercd_prerm() {
43if [ -z "$D" ]; then
44 ${INIT_D_DIR}/${INITSCRIPT_NAME} stop
45fi
46}
47
48updatercd_postrm() {
49if type update-rc.d >/dev/null 2>/dev/null; then
50 if [ -n "$D" ]; then
51 OPT="-r $D"
52 else
53 OPT=""
54 fi
55 update-rc.d $OPT ${INITSCRIPT_NAME} remove
56fi
57}
58
59
60def update_rc_after_parse(d):
61 if d.getVar('INITSCRIPT_PACKAGES') == None:
62 if d.getVar('INITSCRIPT_NAME') == None:
63 raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE'))
64 if d.getVar('INITSCRIPT_PARAMS') == None:
65 raise bb.build.FuncFailed("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE'))
66
67python __anonymous() {
68 update_rc_after_parse(d)
69}
70
71PACKAGESPLITFUNCS_prepend = "populate_packages_updatercd "
72PACKAGESPLITFUNCS_remove_class-nativesdk = "populate_packages_updatercd "
73
74populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_preinst updatercd_postinst"
75populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
76
77python populate_packages_updatercd () {
78 def update_rcd_auto_depend(pkg):
79 import subprocess
80 import os
81 path = d.expand("${D}${INIT_D_DIR}/${INITSCRIPT_NAME}")
82 if not os.path.exists(path):
83 return
84 statement = "grep -q -w '/etc/init.d/functions' %s" % path
85 if subprocess.call(statement, shell=True) == 0:
86 mlprefix = d.getVar('MLPREFIX', True) or ""
87 d.appendVar('RDEPENDS_' + pkg, ' %sinitscripts-functions' % (mlprefix))
88
89 def update_rcd_package(pkg):
90 bb.debug(1, 'adding update-rc.d calls to preinst/postinst/prerm/postrm for %s' % pkg)
91
92 localdata = bb.data.createCopy(d)
93 overrides = localdata.getVar("OVERRIDES", True)
94 localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides))
95 bb.data.update_data(localdata)
96
97 update_rcd_auto_depend(pkg)
98
99 preinst = d.getVar('pkg_preinst_%s' % pkg, True)
100 if not preinst:
101 preinst = '#!/bin/sh\n'
102 preinst += localdata.getVar('updatercd_preinst', True)
103 d.setVar('pkg_preinst_%s' % pkg, preinst)
104
105 postinst = d.getVar('pkg_postinst_%s' % pkg, True)
106 if not postinst:
107 postinst = '#!/bin/sh\n'
108 postinst += localdata.getVar('updatercd_postinst', True)
109 d.setVar('pkg_postinst_%s' % pkg, postinst)
110
111 prerm = d.getVar('pkg_prerm_%s' % pkg, True)
112 if not prerm:
113 prerm = '#!/bin/sh\n'
114 prerm += localdata.getVar('updatercd_prerm', True)
115 d.setVar('pkg_prerm_%s' % pkg, prerm)
116
117 postrm = d.getVar('pkg_postrm_%s' % pkg, True)
118 if not postrm:
119 postrm = '#!/bin/sh\n'
120 postrm += localdata.getVar('updatercd_postrm', True)
121 d.setVar('pkg_postrm_%s' % pkg, postrm)
122
123 # Check that this class isn't being inhibited (generally, by
124 # systemd.bbclass) before doing any work.
125 if bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) or \
126 not d.getVar("INHIBIT_UPDATERCD_BBCLASS", True):
127 pkgs = d.getVar('INITSCRIPT_PACKAGES', True)
128 if pkgs == None:
129 pkgs = d.getVar('UPDATERCPN', True)
130 packages = (d.getVar('PACKAGES', True) or "").split()
131 if not pkgs in packages and packages != []:
132 pkgs = packages[0]
133 for pkg in pkgs.split():
134 update_rcd_package(pkg)
135}
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
new file mode 100644
index 0000000000..421a70a6ab
--- /dev/null
+++ b/meta/classes/useradd-staticids.bbclass
@@ -0,0 +1,276 @@
1# In order to support a deterministic set of 'dynamic' users/groups,
2# we need a function to reformat the params based on a static file
3def update_useradd_static_config(d):
4 import argparse
5 import re
6
7 class myArgumentParser( argparse.ArgumentParser ):
8 def _print_message(self, message, file=None):
9 bb.warn("%s - %s: %s" % (d.getVar('PN', True), pkg, message))
10
11 # This should never be called...
12 def exit(self, status=0, message=None):
13 message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN', True), pkg))
14 error(message)
15
16 def error(self, message):
17 raise bb.build.FuncFailed(message)
18
19 # We parse and rewrite the useradd components
20 def rewrite_useradd(params):
21 # The following comes from --help on useradd from shadow
22 parser = myArgumentParser(prog='useradd')
23 parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
24 parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account")
25 parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account")
26 parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true")
27 parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account")
28 parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account")
29 parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account")
30 parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account")
31 parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
32 parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
33 parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
34 parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_true")
35 parser.add_argument("-M", "--no-create-home", help="do not create the user's home directory", action="store_true")
36 parser.add_argument("-N", "--no-user-group", help="do not create a group with the same name as the user", action="store_true")
37 parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
38 parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
39 parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
40 parser.add_argument("-r", "--system", help="create a system account", action="store_true")
41 parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
42 parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
43 parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_true")
44 parser.add_argument("LOGIN", help="Login name of the new user")
45
46 # Return a list of configuration files based on either the default
47 # files/passwd or the contents of USERADD_UID_TABLES
48 # paths are resulved via BBPATH
49 def get_passwd_list(d):
50 str = ""
51 bbpath = d.getVar('BBPATH', True)
52 passwd_tables = d.getVar('USERADD_UID_TABLES', True)
53 if not passwd_tables:
54 passwd_tables = 'files/passwd'
55 for conf_file in passwd_tables.split():
56 str += " %s" % bb.utils.which(bbpath, conf_file)
57 return str
58
59 newparams = []
60 for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
61 param = param.strip()
62 if not param:
63 continue
64 try:
65 uaargs = parser.parse_args(re.split('''[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
66 except:
67 raise bb.build.FuncFailed("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
68
69 # files/passwd or the contents of USERADD_UID_TABLES
70 # Use the standard passwd layout:
71 # username:password:user_id:group_id:comment:home_directory:login_shell
72 # (we want to process in reverse order, as 'last found' in the list wins)
73 #
74 # If a field is left blank, the original value will be used. The 'username'
75 # field is required.
76 #
77 # Note: we ignore the password field, as including even the hashed password
78 # in the useradd command may introduce a security hole. It's assumed that
79 # all new users get the default ('*' which prevents login) until the user is
80 # specifically configured by the system admin.
81 for conf in get_passwd_list(d).split()[::-1]:
82 if os.path.exists(conf):
83 f = open(conf, "r")
84 for line in f:
85 if line.startswith('#'):
86 continue
87 field = line.rstrip().split(":")
88 if field[0] == uaargs.LOGIN:
89 if uaargs.uid and field[2] and (uaargs.uid != field[2]):
90 bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.uid, field[2]))
91 uaargs.uid = [field[2], uaargs.uid][not field[2]]
92
93 # Determine the possible groupname
94 # Unless the group name (or gid) is specified, we assume that the LOGIN is the groupname
95 #
96 # By default the system has creation of the matching groups enabled
97 # So if the implicit username-group creation is on, then the implicit groupname (LOGIN)
98 # is used, and we disable the user_group option.
99 #
100 uaargs.groupname = [uaargs.gid, uaargs.LOGIN][not uaargs.gid or uaargs.user_group]
101 uaargs.groupid = [uaargs.gid, uaargs.groupname][not uaargs.gid]
102 uaargs.groupid = [field[3], uaargs.groupid][not field[3]]
103
104 if not uaargs.gid or uaargs.gid != uaargs.groupid:
105 if (uaargs.groupid and uaargs.groupid.isdigit()) and (uaargs.groupname and uaargs.groupname.isdigit()) and (uaargs.groupid != uaargs.groupname):
106 # We want to add a group, but we don't know it's name... so we can't add the group...
107 # We have to assume the group has previously been added or we'll fail on the adduser...
108 # Note: specifying the actual gid is very rare in OE, usually the group name is specified.
109 bb.warn("%s: Changing gid for login %s from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.groupname, uaargs.gid))
110 elif (uaargs.groupid and not uaargs.groupid.isdigit()) and uaargs.groupid == uaargs.groupname:
111 # We don't have a number, so we have to add a name
112 bb.debug(1, "Adding group %s!" % (uaargs.groupname))
113 uaargs.gid = uaargs.groupid
114 uaargs.user_group = False
115 groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg, True)
116 newgroup = "%s %s" % (['', ' --system'][uaargs.system], uaargs.groupname)
117 if groupadd:
118 d.setVar("GROUPADD_PARAM_%s" % pkg, "%s ; %s" % (groupadd, newgroup))
119 else:
120 d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
121 elif uaargs.groupname and (uaargs.groupid and uaargs.groupid.isdigit()):
122 # We have a group name and a group number to assign it to
123 bb.debug(1, "Adding group %s gid (%s)!" % (uaargs.groupname, uaargs.groupid))
124 uaargs.gid = uaargs.groupid
125 uaargs.user_group = False
126 groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg, True)
127 newgroup = "-g %s %s" % (uaargs.gid, uaargs.groupname)
128 if groupadd:
129 d.setVar("GROUPADD_PARAM_%s" % pkg, "%s ; %s" % (groupadd, newgroup))
130 else:
131 d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
132
133 uaargs.comment = ["'%s'" % field[4], uaargs.comment][not field[4]]
134 uaargs.home_dir = [field[5], uaargs.home_dir][not field[5]]
135 uaargs.shell = [field[6], uaargs.shell][not field[6]]
136 break
137
138 # Should be an error if a specific option is set...
139 if d.getVar('USERADD_ERROR_DYNAMIC', True) == '1' and not ((uaargs.uid and uaargs.uid.isdigit()) and uaargs.gid):
140 #bb.error("Skipping recipe %s, package %s which adds username %s does not have a static uid defined." % (d.getVar('PN', True), pkg, uaargs.LOGIN))
141 raise bb.build.FuncFailed("%s - %s: Username %s does not have a static uid defined." % (d.getVar('PN', True), pkg, uaargs.LOGIN))
142
143 # Reconstruct the args...
144 newparam = ['', ' --defaults'][uaargs.defaults]
145 newparam += ['', ' --base-dir %s' % uaargs.base_dir][uaargs.base_dir != None]
146 newparam += ['', ' --comment %s' % uaargs.comment][uaargs.comment != None]
147 newparam += ['', ' --home-dir %s' % uaargs.home_dir][uaargs.home_dir != None]
148 newparam += ['', ' --expiredata %s' % uaargs.expiredate][uaargs.expiredate != None]
149 newparam += ['', ' --inactive %s' % uaargs.inactive][uaargs.inactive != None]
150 newparam += ['', ' --gid %s' % uaargs.gid][uaargs.gid != None]
151 newparam += ['', ' --groups %s' % uaargs.groups][uaargs.groups != None]
152 newparam += ['', ' --skel %s' % uaargs.skel][uaargs.skel != None]
153 newparam += ['', ' --key %s' % uaargs.key][uaargs.key != None]
154 newparam += ['', ' --no-log-init'][uaargs.no_log_init]
155 newparam += ['', ' --create-home'][uaargs.create_home]
156 newparam += ['', ' --no-create-home'][uaargs.no_create_home]
157 newparam += ['', ' --no-user-group'][uaargs.no_user_group]
158 newparam += ['', ' --non-unique'][uaargs.non_unique]
159 newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
160 newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
161 newparam += ['', ' --system'][uaargs.system]
162 newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
163 newparam += ['', ' --uid %s' % uaargs.uid][uaargs.uid != None]
164 newparam += ['', ' --user-group'][uaargs.user_group]
165 newparam += ' %s' % uaargs.LOGIN
166
167 newparams.append(newparam)
168
169 return " ;".join(newparams).strip()
170
171 # We parse and rewrite the groupadd components
172 def rewrite_groupadd(params):
173 # The following comes from --help on groupadd from shadow
174 parser = myArgumentParser(prog='groupadd')
175 parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
176 parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group")
177 parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
178 parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
179 parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
180 parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
181 parser.add_argument("-r", "--system", help="create a system account", action="store_true")
182 parser.add_argument("GROUP", help="Group name of the new group")
183
184 # Return a list of configuration files based on either the default
185 # files/group or the contents of USERADD_GID_TABLES
186 # paths are resulved via BBPATH
187 def get_group_list(d):
188 str = ""
189 bbpath = d.getVar('BBPATH', True)
190 group_tables = d.getVar('USERADD_GID_TABLES', True)
191 if not group_tables:
192 group_tables = 'files/group'
193 for conf_file in group_tables.split():
194 str += " %s" % bb.utils.which(bbpath, conf_file)
195 return str
196
197 newparams = []
198 for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
199 param = param.strip()
200 if not param:
201 continue
202 try:
203 # If we're processing multiple lines, we could have left over values here...
204 gaargs = parser.parse_args(re.split('''[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', param))
205 except:
206 raise bb.build.FuncFailed("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
207
208 # Need to iterate over layers and open the right file(s)
209 # Use the standard group layout:
210 # groupname:password:group_id:group_members
211 #
212 # If a field is left blank, the original value will be used. The 'groupname' field
213 # is required.
214 #
215 # Note: similar to the passwd file, the 'password' filed is ignored
216 # Note: group_members is ignored, group members must be configured with the GROUPMEMS_PARAM
217 for conf in get_group_list(d).split()[::-1]:
218 if os.path.exists(conf):
219 f = open(conf, "r")
220 for line in f:
221 if line.startswith('#'):
222 continue
223 field = line.rstrip().split(":")
224 if field[0] == gaargs.GROUP and field[2]:
225 if gaargs.gid and (gaargs.gid != field[2]):
226 bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), gaargs.GROUP, gaargs.gid, field[2]))
227 gaargs.gid = field[2]
228 break
229
230 if d.getVar('USERADD_ERROR_DYNAMIC', True) == '1' and not (gaargs.gid and gaargs.gid.isdigit()):
231 #bb.error("Skipping recipe %s, package %s which adds groupname %s does not have a static gid defined." % (d.getVar('PN', True), pkg, gaargs.GROUP))
232 raise bb.build.FuncFailed("%s - %s: Groupname %s does not have a static gid defined." % (d.getVar('PN', True), pkg, gaargs.GROUP))
233
234 # Reconstruct the args...
235 newparam = ['', ' --force'][gaargs.force]
236 newparam += ['', ' --gid %s' % gaargs.gid][gaargs.gid != None]
237 newparam += ['', ' --key %s' % gaargs.key][gaargs.key != None]
238 newparam += ['', ' --non-unique'][gaargs.non_unique]
239 newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
240 newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None]
241 newparam += ['', ' --system'][gaargs.system]
242 newparam += ' %s' % gaargs.GROUP
243
244 newparams.append(newparam)
245
246 return " ;".join(newparams).strip()
247
248 # Load and process the users and groups, rewriting the adduser/addgroup params
249 useradd_packages = d.getVar('USERADD_PACKAGES', True)
250
251 for pkg in useradd_packages.split():
252 # Groupmems doesn't have anything we might want to change, so simply validating
253 # is a bit of a waste -- only process useradd/groupadd
254 useradd_param = d.getVar('USERADD_PARAM_%s' % pkg, True)
255 if useradd_param:
256 #bb.warn("Before: 'USERADD_PARAM_%s' - '%s'" % (pkg, useradd_param))
257 d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param))
258 #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg, True)))
259
260 groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg, True)
261 if groupadd_param:
262 #bb.warn("Before: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, groupadd_param))
263 d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param))
264 #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg, True)))
265
266
267
268python __anonymous() {
269 if not bb.data.inherits_class('nativesdk', d) \
270 and not bb.data.inherits_class('native', d):
271 try:
272 update_useradd_static_config(d)
273 except bb.build.FuncFailed as f:
274 bb.debug(1, "Skipping recipe %s: %s" % (d.getVar('PN', True), f))
275 raise bb.parse.SkipPackage(f)
276}
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
new file mode 100644
index 0000000000..0b9a843b24
--- /dev/null
+++ b/meta/classes/useradd.bbclass
@@ -0,0 +1,213 @@
1inherit useradd_base
2
3# base-passwd-cross provides the default passwd and group files in the
4# target sysroot, and shadow -native and -sysroot provide the utilities
5# and support files needed to add and modify user and group accounts
6DEPENDS_append = "${USERADDDEPENDS}"
7USERADDDEPENDS = " base-files base-passwd shadow-native shadow-sysroot shadow"
8USERADDDEPENDS_class-cross = ""
9USERADDDEPENDS_class-native = ""
10USERADDDEPENDS_class-nativesdk = ""
11
12# This preinstall function can be run in four different contexts:
13#
14# a) Before do_install
15# b) At do_populate_sysroot_setscene when installing from sstate packages
16# c) As the preinst script in the target package at do_rootfs time
17# d) As the preinst script in the target package on device as a package upgrade
18#
19useradd_preinst () {
20OPT=""
21SYSROOT=""
22
23if test "x$D" != "x"; then
24 # Installing into a sysroot
25 SYSROOT="$D"
26 OPT="--root $D"
27 # user/group lookups should match useradd/groupadd --root
28 export PSEUDO_PASSWD="$SYSROOT:${STAGING_DIR_NATIVE}"
29fi
30
31# If we're not doing a special SSTATE/SYSROOT install
32# then set the values, otherwise use the environment
33if test "x$UA_SYSROOT" = "x"; then
34 # Installing onto a target
35 # Add groups and users defined only for this package
36 GROUPADD_PARAM="${GROUPADD_PARAM}"
37 USERADD_PARAM="${USERADD_PARAM}"
38 GROUPMEMS_PARAM="${GROUPMEMS_PARAM}"
39fi
40
41# Perform group additions first, since user additions may depend
42# on these groups existing
43if test "x$GROUPADD_PARAM" != "x"; then
44 echo "Running groupadd commands..."
45 # Invoke multiple instances of groupadd for parameter lists
46 # separated by ';'
47 opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1`
48 remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2-`
49 while test "x$opts" != "x"; do
50 perform_groupadd "$SYSROOT" "$OPT $opts" 10
51 if test "x$opts" = "x$remaining"; then
52 break
53 fi
54 opts=`echo "$remaining" | cut -d ';' -f 1`
55 remaining=`echo "$remaining" | cut -d ';' -f 2-`
56 done
57fi
58
59if test "x$USERADD_PARAM" != "x"; then
60 echo "Running useradd commands..."
61 # Invoke multiple instances of useradd for parameter lists
62 # separated by ';'
63 opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1`
64 remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2-`
65 while test "x$opts" != "x"; do
66 perform_useradd "$SYSROOT" "$OPT $opts" 10
67 if test "x$opts" = "x$remaining"; then
68 break
69 fi
70 opts=`echo "$remaining" | cut -d ';' -f 1`
71 remaining=`echo "$remaining" | cut -d ';' -f 2-`
72 done
73fi
74
75if test "x$GROUPMEMS_PARAM" != "x"; then
76 echo "Running groupmems commands..."
77 # Invoke multiple instances of groupmems for parameter lists
78 # separated by ';'
79 opts=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 1`
80 remaining=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 2-`
81 while test "x$opts" != "x"; do
82 perform_groupmems "$SYSROOT" "$OPT $opts" 10
83 if test "x$opts" = "x$remaining"; then
84 break
85 fi
86 opts=`echo "$remaining" | cut -d ';' -f 1`
87 remaining=`echo "$remaining" | cut -d ';' -f 2-`
88 done
89fi
90}
91
92useradd_sysroot () {
93 # Pseudo may (do_install) or may not (do_populate_sysroot_setscene) be running
94 # at this point so we're explicit about the environment so pseudo can load if
95 # not already present.
96 export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir}/pseudo"
97
98 # Explicitly set $D since it isn't set to anything
99 # before do_install
100 D=${STAGING_DIR_TARGET}
101
102 # Add groups and users defined for all recipe packages
103 GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
104 USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
105 GROUPMEMS_PARAM="${@get_all_cmd_params(d, 'groupmems')}"
106
107 # Tell the system to use the environment vars
108 UA_SYSROOT=1
109
110 useradd_preinst
111}
112
113useradd_sysroot_sstate () {
114 if [ "${BB_CURRENTTASK}" = "package_setscene" -o "${BB_CURRENTTASK}" = "populate_sysroot_setscene" ]
115 then
116 useradd_sysroot
117 fi
118}
119
120do_install[prefuncs] += "${SYSROOTFUNC}"
121SYSROOTFUNC = "useradd_sysroot"
122SYSROOTFUNC_class-cross = ""
123SYSROOTFUNC_class-native = ""
124SYSROOTFUNC_class-nativesdk = ""
125SSTATEPREINSTFUNCS += "${SYSROOTPOSTFUNC}"
126SYSROOTPOSTFUNC = "useradd_sysroot_sstate"
127SYSROOTPOSTFUNC_class-cross = ""
128SYSROOTPOSTFUNC_class-native = ""
129SYSROOTPOSTFUNC_class-nativesdk = ""
130
131USERADDSETSCENEDEPS = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
132USERADDSETSCENEDEPS_class-cross = ""
133USERADDSETSCENEDEPS_class-native = ""
134USERADDSETSCENEDEPS_class-nativesdk = ""
135do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
136do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}"
137
138# Recipe parse-time sanity checks
139def update_useradd_after_parse(d):
140 useradd_packages = d.getVar('USERADD_PACKAGES', True)
141
142 if not useradd_packages:
143 raise bb.build.FuncFailed("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE'))
144
145 for pkg in useradd_packages.split():
146 if not d.getVar('USERADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg, True):
147 bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE'), pkg))
148
149python __anonymous() {
150 if not bb.data.inherits_class('nativesdk', d) \
151 and not bb.data.inherits_class('native', d):
152 update_useradd_after_parse(d)
153}
154
155# Return a single [GROUP|USER]ADD_PARAM formatted string which includes the
156# [group|user]add parameters for all USERADD_PACKAGES in this recipe
157def get_all_cmd_params(d, cmd_type):
158 import string
159
160 param_type = cmd_type.upper() + "_PARAM_%s"
161 params = []
162
163 useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
164 for pkg in useradd_packages.split():
165 param = d.getVar(param_type % pkg, True)
166 if param:
167 params.append(param)
168
169 return "; ".join(params)
170
171# Adds the preinst script into generated packages
172fakeroot python populate_packages_prepend () {
173 def update_useradd_package(pkg):
174 bb.debug(1, 'adding user/group calls to preinst for %s' % pkg)
175
176 """
177 useradd preinst is appended here because pkg_preinst may be
178 required to execute on the target. Not doing so may cause
179 useradd preinst to be invoked twice, causing unwanted warnings.
180 """
181 preinst = d.getVar('pkg_preinst_%s' % pkg, True) or d.getVar('pkg_preinst', True)
182 if not preinst:
183 preinst = '#!/bin/sh\n'
184 preinst += 'bbnote () {\n%s}\n' % d.getVar('bbnote', True)
185 preinst += 'bbwarn () {\n%s}\n' % d.getVar('bbwarn', True)
186 preinst += 'bbfatal () {\n%s}\n' % d.getVar('bbfatal', True)
187 preinst += 'perform_groupadd () {\n%s}\n' % d.getVar('perform_groupadd', True)
188 preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd', True)
189 preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems', True)
190 preinst += d.getVar('useradd_preinst', True)
191 d.setVar('pkg_preinst_%s' % pkg, preinst)
192
193 # RDEPENDS setup
194 rdepends = d.getVar("RDEPENDS_%s" % pkg, True) or ""
195 rdepends += ' ' + d.getVar('MLPREFIX') + 'base-passwd'
196 rdepends += ' ' + d.getVar('MLPREFIX') + 'shadow'
197 # base-files is where the default /etc/skel is packaged
198 rdepends += ' ' + d.getVar('MLPREFIX') + 'base-files'
199 d.setVar("RDEPENDS_%s" % pkg, rdepends)
200
201 # Add the user/group preinstall scripts and RDEPENDS requirements
202 # to packages specified by USERADD_PACKAGES
203 if not bb.data.inherits_class('nativesdk', d) \
204 and not bb.data.inherits_class('native', d):
205 useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
206 for pkg in useradd_packages.split():
207 update_useradd_package(pkg)
208}
209
210# Use the following to extend the useradd with custom functions
211USERADDEXTENSION ?= ""
212
213inherit ${USERADDEXTENSION}
diff --git a/meta/classes/useradd_base.bbclass b/meta/classes/useradd_base.bbclass
new file mode 100644
index 0000000000..c47b1eb810
--- /dev/null
+++ b/meta/classes/useradd_base.bbclass
@@ -0,0 +1,230 @@
1# This bbclass provides basic functionality for user/group settings.
2# This bbclass is intended to be inherited by useradd.bbclass and
3# extrausers.bbclass.
4
5# The following functions basically have similar logic.
6# *) Perform necessary checks before invoking the actual command
7# *) Invoke the actual command, make retries if necessary
8# *) Error out if an error occurs.
9
10# Note that before invoking these functions, make sure the global variable
11# PSEUDO is set up correctly.
12
13perform_groupadd () {
14 local rootdir="$1"
15 local opts="$2"
16 local retries="$3"
17 bbnote "Performing groupadd with [$opts] and $retries times of retry"
18 local groupname=`echo "$opts" | awk '{ print $NF }'`
19 local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
20 if test "x$group_exists" = "x"; then
21 local count=0
22 while true; do
23 eval $PSEUDO groupadd $opts || true
24 group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
25 if test "x$group_exists" = "x"; then
26 bbwarn "groupadd command did not succeed. Retrying..."
27 else
28 break
29 fi
30 count=`expr $count + 1`
31 if test $count = $retries; then
32 bbfatal "Tried running groupadd command $retries times without scucess, giving up"
33 fi
34 sleep $count
35 done
36 else
37 bbwarn "group $groupname already exists, not re-creating it"
38 fi
39}
40
41perform_useradd () {
42 local rootdir="$1"
43 local opts="$2"
44 local retries="$3"
45 bbnote "Performing useradd with [$opts] and $retries times of retry"
46 local username=`echo "$opts" | awk '{ print $NF }'`
47 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
48 if test "x$user_exists" = "x"; then
49 local count=0
50 while true; do
51 eval $PSEUDO useradd $opts || true
52 user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
53 if test "x$user_exists" = "x"; then
54 bbwarn "useradd command did not succeed. Retrying..."
55 else
56 break
57 fi
58 count=`expr $count + 1`
59 if test $count = $retries; then
60 bbfatal "Tried running useradd command $retries times without scucess, giving up"
61 fi
62 sleep $count
63 done
64 else
65 bbwarn "user $username already exists, not re-creating it"
66 fi
67}
68
69perform_groupmems () {
70 local rootdir="$1"
71 local opts="$2"
72 local retries="$3"
73 bbnote "Performing groupmems with [$opts] and $retries times of retry"
74 local groupname=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-g" || $i == "--group") print $(i+1) }'`
75 local username=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-a" || $i == "--add") print $(i+1) }'`
76 bbnote "Running groupmems command with group $groupname and user $username"
77 # groupmems fails if /etc/gshadow does not exist
78 local gshadow=""
79 if [ -f $rootdir${sysconfdir}/gshadow ]; then
80 gshadow="yes"
81 else
82 gshadow="no"
83 touch $rootdir${sysconfdir}/gshadow
84 fi
85 local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
86 if test "x$mem_exists" = "x"; then
87 local count=0
88 while true; do
89 eval $PSEUDO groupmems $opts || true
90 mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
91 if test "x$mem_exists" = "x"; then
92 bbwarn "groupmems command did not succeed. Retrying..."
93 else
94 break
95 fi
96 count=`expr $count + 1`
97 if test $count = $retries; then
98 if test "x$gshadow" = "xno"; then
99 rm -f $rootdir${sysconfdir}/gshadow
100 rm -f $rootdir${sysconfdir}/gshadow-
101 fi
102 bbfatal "Tried running groupmems command $retries times without scucess, giving up"
103 fi
104 sleep $count
105 done
106 else
107 bbwarn "group $groupname already contains $username, not re-adding it"
108 fi
109 if test "x$gshadow" = "xno"; then
110 rm -f $rootdir${sysconfdir}/gshadow
111 rm -f $rootdir${sysconfdir}/gshadow-
112 fi
113}
114
115perform_groupdel () {
116 local rootdir="$1"
117 local opts="$2"
118 local retries="$3"
119 bbnote "Performing groupdel with [$opts] and $retries times of retry"
120 local groupname=`echo "$opts" | awk '{ print $NF }'`
121 local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
122 if test "x$group_exists" != "x"; then
123 local count=0
124 while true; do
125 eval $PSEUDO groupdel $opts || true
126 group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
127 if test "x$group_exists" != "x"; then
128 bbwarn "groupdel command did not succeed. Retrying..."
129 else
130 break
131 fi
132 count=`expr $count + 1`
133 if test $count = $retries; then
134 bbfatal "Tried running groupdel command $retries times without scucess, giving up"
135 fi
136 sleep $count
137 done
138 else
139 bbwarn "group $groupname doesn't exist, not removing it"
140 fi
141}
142
143perform_userdel () {
144 local rootdir="$1"
145 local opts="$2"
146 local retries="$3"
147 bbnote "Performing userdel with [$opts] and $retries times of retry"
148 local username=`echo "$opts" | awk '{ print $NF }'`
149 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
150 if test "x$user_exists" != "x"; then
151 local count=0
152 while true; do
153 eval $PSEUDO userdel $opts || true
154 user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
155 if test "x$user_exists" != "x"; then
156 bbwarn "userdel command did not succeed. Retrying..."
157 else
158 break
159 fi
160 count=`expr $count + 1`
161 if test $count = $retries; then
162 bbfatal "Tried running userdel command $retries times without scucess, giving up"
163 fi
164 sleep $count
165 done
166 else
167 bbwarn "user $username doesn't exist, not removing it"
168 fi
169}
170
171perform_groupmod () {
172 # Other than the return value of groupmod, there's no simple way to judge whether the command
173 # succeeds, so we disable -e option temporarily
174 set +e
175 local rootdir="$1"
176 local opts="$2"
177 local retries="$3"
178 bbnote "Performing groupmod with [$opts] and $retries times of retry"
179 local groupname=`echo "$opts" | awk '{ print $NF }'`
180 local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
181 if test "x$group_exists" != "x"; then
182 local count=0
183 while true; do
184 eval $PSEUDO groupmod $opts
185 if test $? != 0; then
186 bbwarn "groupmod command did not succeed. Retrying..."
187 else
188 break
189 fi
190 count=`expr $count + 1`
191 if test $count = $retries; then
192 bbfatal "Tried running groupmod command $retries times without scucess, giving up"
193 fi
194 sleep $count
195 done
196 else
197 bbwarn "group $groupname doesn't exist, unable to modify it"
198 fi
199 set -e
200}
201
202perform_usermod () {
203 # Same reason with groupmod, temporarily disable -e option
204 set +e
205 local rootdir="$1"
206 local opts="$2"
207 local retries="$3"
208 bbnote "Performing usermod with [$opts] and $retries times of retry"
209 local username=`echo "$opts" | awk '{ print $NF }'`
210 local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
211 if test "x$user_exists" != "x"; then
212 local count=0
213 while true; do
214 eval $PSEUDO usermod $opts
215 if test $? != 0; then
216 bbwarn "usermod command did not succeed. Retrying..."
217 else
218 break
219 fi
220 count=`expr $count + 1`
221 if test $count = $retries; then
222 bbfatal "Tried running usermod command $retries times without scucess, giving up"
223 fi
224 sleep $count
225 done
226 else
227 bbwarn "user $username doesn't exist, unable to modify it"
228 fi
229 set -e
230}
diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass
new file mode 100644
index 0000000000..1792f18e8c
--- /dev/null
+++ b/meta/classes/utility-tasks.bbclass
@@ -0,0 +1,69 @@
1addtask listtasks
2do_listtasks[nostamp] = "1"
3python do_listtasks() {
4 taskdescs = {}
5 maxlen = 0
6 for e in d.keys():
7 if d.getVarFlag(e, 'task'):
8 maxlen = max(maxlen, len(e))
9 if e.endswith('_setscene'):
10 desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc') or '')
11 else:
12 desc = d.getVarFlag(e, 'doc') or ''
13 taskdescs[e] = desc
14
15 tasks = sorted(taskdescs.keys())
16 for taskname in tasks:
17 bb.plain("%s %s" % (taskname.ljust(maxlen), taskdescs[taskname]))
18}
19
20CLEANFUNCS ?= ""
21
22T_task-clean = "${LOG_DIR}/cleanlogs/${PN}"
23addtask clean
24do_clean[nostamp] = "1"
25python do_clean() {
26 """clear the build and temp directories"""
27 dir = d.expand("${WORKDIR}")
28 bb.note("Removing " + dir)
29 oe.path.remove(dir)
30
31 dir = "%s.*" % bb.data.expand(d.getVar('STAMP'), d)
32 bb.note("Removing " + dir)
33 oe.path.remove(dir)
34
35 for f in (d.getVar('CLEANFUNCS', True) or '').split():
36 bb.build.exec_func(f, d)
37}
38
39addtask checkuri
40do_checkuri[nostamp] = "1"
41python do_checkuri() {
42 src_uri = (d.getVar('SRC_URI', True) or "").split()
43 if len(src_uri) == 0:
44 return
45
46 localdata = bb.data.createCopy(d)
47 bb.data.update_data(localdata)
48
49 try:
50 fetcher = bb.fetch2.Fetch(src_uri, localdata)
51 fetcher.checkstatus()
52 except bb.fetch2.BBFetchException, e:
53 raise bb.build.FuncFailed(e)
54}
55
56addtask checkuriall after do_checkuri
57do_checkuriall[recrdeptask] = "do_checkuriall do_checkuri"
58do_checkuriall[recideptask] = "do_${BB_DEFAULT_TASK}"
59do_checkuriall[nostamp] = "1"
60do_checkuriall() {
61 :
62}
63
64addtask fetchall after do_fetch
65do_fetchall[recrdeptask] = "do_fetchall do_fetch"
66do_fetchall[recideptask] = "do_${BB_DEFAULT_TASK}"
67do_fetchall() {
68 :
69}
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
new file mode 100644
index 0000000000..80e90e8777
--- /dev/null
+++ b/meta/classes/utils.bbclass
@@ -0,0 +1,379 @@
1# For compatibility
2def base_path_join(a, *p):
3 return oe.path.join(a, *p)
4
5def base_path_relative(src, dest):
6 return oe.path.relative(src, dest)
7
8def base_path_out(path, d):
9 return oe.path.format_display(path, d)
10
11def base_read_file(filename):
12 return oe.utils.read_file(filename)
13
14def base_ifelse(condition, iftrue = True, iffalse = False):
15 return oe.utils.ifelse(condition, iftrue, iffalse)
16
17def base_conditional(variable, checkvalue, truevalue, falsevalue, d):
18 return oe.utils.conditional(variable, checkvalue, truevalue, falsevalue, d)
19
20def base_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
21 return oe.utils.less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
22
23def base_version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
24 return oe.utils.version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
25
26def base_contains(variable, checkvalues, truevalue, falsevalue, d):
27 return bb.utils.contains(variable, checkvalues, truevalue, falsevalue, d)
28
29def base_both_contain(variable1, variable2, checkvalue, d):
30 return oe.utils.both_contain(variable1, variable2, checkvalue, d)
31
32def base_prune_suffix(var, suffixes, d):
33 return oe.utils.prune_suffix(var, suffixes, d)
34
35def oe_filter(f, str, d):
36 return oe.utils.str_filter(f, str, d)
37
38def oe_filter_out(f, str, d):
39 return oe.utils.str_filter_out(f, str, d)
40
41def machine_paths(d):
42 """List any existing machine specific filespath directories"""
43 machine = d.getVar("MACHINE", True)
44 filespathpkg = d.getVar("FILESPATHPKG", True).split(":")
45 for basepath in d.getVar("FILESPATHBASE", True).split(":"):
46 for pkgpath in filespathpkg:
47 machinepath = os.path.join(basepath, pkgpath, machine)
48 if os.path.isdir(machinepath):
49 yield machinepath
50
51def is_machine_specific(d):
52 """Determine whether the current recipe is machine specific"""
53 machinepaths = set(machine_paths(d))
54 srcuri = d.getVar("SRC_URI", True).split()
55 for url in srcuri:
56 fetcher = bb.fetch2.Fetch([srcuri], d)
57 if url.startswith("file://"):
58 if any(fetcher.localpath(url).startswith(mp + "/") for mp in machinepaths):
59 return True
60
61oe_soinstall() {
62 # Purpose: Install shared library file and
63 # create the necessary links
64 # Example:
65 #
66 # oe_
67 #
68 #bbnote installing shared library $1 to $2
69 #
70 libname=`basename $1`
71 install -m 755 $1 $2/$libname
72 sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
73 solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
74 ln -sf $libname $2/$sonamelink
75 ln -sf $libname $2/$solink
76}
77
78oe_libinstall() {
79 # Purpose: Install a library, in all its forms
80 # Example
81 #
82 # oe_libinstall libltdl ${STAGING_LIBDIR}/
83 # oe_libinstall -C src/libblah libblah ${D}/${libdir}/
84 dir=""
85 libtool=""
86 silent=""
87 require_static=""
88 require_shared=""
89 staging_install=""
90 while [ "$#" -gt 0 ]; do
91 case "$1" in
92 -C)
93 shift
94 dir="$1"
95 ;;
96 -s)
97 silent=1
98 ;;
99 -a)
100 require_static=1
101 ;;
102 -so)
103 require_shared=1
104 ;;
105 -*)
106 bbfatal "oe_libinstall: unknown option: $1"
107 ;;
108 *)
109 break;
110 ;;
111 esac
112 shift
113 done
114
115 libname="$1"
116 shift
117 destpath="$1"
118 if [ -z "$destpath" ]; then
119 bbfatal "oe_libinstall: no destination path specified"
120 fi
121 if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null
122 then
123 staging_install=1
124 fi
125
126 __runcmd () {
127 if [ -z "$silent" ]; then
128 echo >&2 "oe_libinstall: $*"
129 fi
130 $*
131 }
132
133 if [ -z "$dir" ]; then
134 dir=`pwd`
135 fi
136
137 dotlai=$libname.lai
138
139 # Sanity check that the libname.lai is unique
140 number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
141 if [ $number_of_files -gt 1 ]; then
142 bbfatal "oe_libinstall: $dotlai is not unique in $dir"
143 fi
144
145
146 dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
147 olddir=`pwd`
148 __runcmd cd $dir
149
150 lafile=$libname.la
151
152 # If such file doesn't exist, try to cut version suffix
153 if [ ! -f "$lafile" ]; then
154 libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
155 lafile1=$libname.la
156 if [ -f "$lafile1" ]; then
157 libname=$libname1
158 lafile=$lafile1
159 fi
160 fi
161
162 if [ -f "$lafile" ]; then
163 # libtool archive
164 eval `cat $lafile|grep "^library_names="`
165 libtool=1
166 else
167 library_names="$libname.so* $libname.dll.a $libname.*.dylib"
168 fi
169
170 __runcmd install -d $destpath/
171 dota=$libname.a
172 if [ -f "$dota" -o -n "$require_static" ]; then
173 rm -f $destpath/$dota
174 __runcmd install -m 0644 $dota $destpath/
175 fi
176 if [ -f "$dotlai" -a -n "$libtool" ]; then
177 rm -f $destpath/$libname.la
178 __runcmd install -m 0644 $dotlai $destpath/$libname.la
179 fi
180
181 for name in $library_names; do
182 files=`eval echo $name`
183 for f in $files; do
184 if [ ! -e "$f" ]; then
185 if [ -n "$libtool" ]; then
186 bbfatal "oe_libinstall: $dir/$f not found."
187 fi
188 elif [ -L "$f" ]; then
189 __runcmd cp -P "$f" $destpath/
190 elif [ ! -L "$f" ]; then
191 libfile="$f"
192 rm -f $destpath/$libfile
193 __runcmd install -m 0755 $libfile $destpath/
194 fi
195 done
196 done
197
198 if [ -z "$libfile" ]; then
199 if [ -n "$require_shared" ]; then
200 bbfatal "oe_libinstall: unable to locate shared library"
201 fi
202 elif [ -z "$libtool" ]; then
203 # special case hack for non-libtool .so.#.#.# links
204 baselibfile=`basename "$libfile"`
205 if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
206 sonamelink=`${HOST_PREFIX}readelf -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
207 solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
208 if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
209 __runcmd ln -sf $baselibfile $destpath/$sonamelink
210 fi
211 __runcmd ln -sf $baselibfile $destpath/$solink
212 fi
213 fi
214
215 __runcmd cd "$olddir"
216}
217
218oe_machinstall() {
219 # Purpose: Install machine dependent files, if available
220 # If not available, check if there is a default
221 # If no default, just touch the destination
222 # Example:
223 # $1 $2 $3 $4
224 # oe_machinstall -m 0644 fstab ${D}/etc/fstab
225 #
226 # TODO: Check argument number?
227 #
228 filename=`basename $3`
229 dirname=`dirname $3`
230
231 for o in `echo ${OVERRIDES} | tr ':' ' '`; do
232 if [ -e $dirname/$o/$filename ]; then
233 bbnote $dirname/$o/$filename present, installing to $4
234 install $1 $2 $dirname/$o/$filename $4
235 return
236 fi
237 done
238# bbnote overrides specific file NOT present, trying default=$3...
239 if [ -e $3 ]; then
240 bbnote $3 present, installing to $4
241 install $1 $2 $3 $4
242 else
243 bbnote $3 NOT present, touching empty $4
244 touch $4
245 fi
246}
247
248create_cmdline_wrapper () {
249 # Create a wrapper script where commandline options are needed
250 #
251 # These are useful to work around relocation issues, by passing extra options
252 # to a program
253 #
254 # Usage: create_cmdline_wrapper FILENAME <extra-options>
255
256 cmd=$1
257 shift
258
259 echo "Generating wrapper script for $cmd"
260
261 mv $cmd $cmd.real
262 cmdname=`basename $cmd`
263 cat <<END >$cmd
264#!/bin/bash
265realpath=\`readlink -fn \$0\`
266exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $@ "\$@"
267END
268 chmod +x $cmd
269}
270
271create_wrapper () {
272 # Create a wrapper script where extra environment variables are needed
273 #
274 # These are useful to work around relocation issues, by setting environment
275 # variables which point to paths in the filesystem.
276 #
277 # Usage: create_wrapper FILENAME [[VAR=VALUE]..]
278
279 cmd=$1
280 shift
281
282 echo "Generating wrapper script for $cmd"
283
284 mv $cmd $cmd.real
285 cmdname=`basename $cmd`
286 cat <<END >$cmd
287#!/bin/bash
288realpath=\`readlink -fn \$0\`
289export $@
290exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real "\$@"
291END
292 chmod +x $cmd
293}
294
295# Copy files/directories from $1 to $2 but using hardlinks
296# (preserve symlinks)
297hardlinkdir () {
298 from=$1
299 to=$2
300 (cd $from; find . -print0 | cpio --null -pdlu $to)
301}
302
303
304def check_app_exists(app, d):
305 app = d.expand(app)
306 path = d.getVar('PATH', d, True)
307 return bool(bb.utils.which(path, app))
308
309def explode_deps(s):
310 return bb.utils.explode_deps(s)
311
312def base_set_filespath(path, d):
313 filespath = []
314 extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
315 # Remove default flag which was used for checking
316 extrapaths = extrapaths.replace("__default:", "")
317 # Don't prepend empty strings to the path list
318 if extrapaths != "":
319 path = extrapaths.split(":") + path
320 # The ":" ensures we have an 'empty' override
321 overrides = (":" + (d.getVar("FILESOVERRIDES", True) or "")).split(":")
322 overrides.reverse()
323 for o in overrides:
324 for p in path:
325 if p != "":
326 filespath.append(os.path.join(p, o))
327 return ":".join(filespath)
328
329def extend_variants(d, var, extend, delim=':'):
330 """Return a string of all bb class extend variants for the given extend"""
331 variants = []
332 whole = d.getVar(var, True) or ""
333 for ext in whole.split():
334 eext = ext.split(delim)
335 if len(eext) > 1 and eext[0] == extend:
336 variants.append(eext[1])
337 return " ".join(variants)
338
339def multilib_pkg_extend(d, pkg):
340 variants = (d.getVar("MULTILIB_VARIANTS", True) or "").split()
341 if not variants:
342 return pkg
343 pkgs = pkg
344 for v in variants:
345 pkgs = pkgs + " " + v + "-" + pkg
346 return pkgs
347
348def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
349 """Return a string of all ${var} in all multilib tune configuration"""
350 values = []
351 value = d.getVar(var, True) or ""
352 if value != "":
353 if need_split:
354 for item in value.split(delim):
355 values.append(item)
356 else:
357 values.append(value)
358 variants = d.getVar("MULTILIB_VARIANTS", True) or ""
359 for item in variants.split():
360 localdata = bb.data.createCopy(d)
361 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
362 localdata.setVar("OVERRIDES", overrides)
363 bb.data.update_data(localdata)
364 value = localdata.getVar(var, True) or ""
365 if value != "":
366 if need_split:
367 for item in value.split(delim):
368 values.append(item)
369 else:
370 values.append(value)
371 if unique:
372 #we do this to keep order as much as possible
373 ret = []
374 for value in values:
375 if not value in ret:
376 ret.append(value)
377 else:
378 ret = values
379 return " ".join(ret)
diff --git a/meta/classes/vala.bbclass b/meta/classes/vala.bbclass
new file mode 100644
index 0000000000..0b7803b251
--- /dev/null
+++ b/meta/classes/vala.bbclass
@@ -0,0 +1,21 @@
1# Vala has problems with multiple concurrent invocations
2PARALLEL_MAKE = ""
3
4# Everyone needs vala-native and targets need vala, too,
5# because that is where target builds look for .vapi files.
6#
7VALADEPENDS = ""
8VALADEPENDS_class-target = "vala"
9DEPENDS_append = " vala-native ${VALADEPENDS}"
10
11# Our patched version of Vala looks in STAGING_DATADIR for .vapi files
12export STAGING_DATADIR
13# Upstream Vala >= 0.11 looks in XDG_DATA_DIRS for .vapi files
14export XDG_DATA_DIRS = "${STAGING_DATADIR}"
15
16# Package additional files
17FILES_${PN}-dev += "\
18 ${datadir}/vala/vapi/*.vapi \
19 ${datadir}/vala/vapi/*.deps \
20 ${datadir}/gir-1.0 \
21"
diff --git a/meta/classes/waf.bbclass b/meta/classes/waf.bbclass
new file mode 100644
index 0000000000..3a221e7082
--- /dev/null
+++ b/meta/classes/waf.bbclass
@@ -0,0 +1,13 @@
1waf_do_configure() {
2 ${S}/waf configure --prefix=${prefix} ${EXTRA_OECONF}
3}
4
5waf_do_compile() {
6 ${S}/waf build ${PARALLEL_MAKE}
7}
8
9waf_do_install() {
10 ${S}/waf install --destdir=${D}
11}
12
13EXPORT_FUNCTIONS do_configure do_compile do_install