summaryrefslogtreecommitdiffstats
path: root/meta/classes-global
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes-global')
-rw-r--r--meta/classes-global/base.bbclass737
-rw-r--r--meta/classes-global/buildstats.bbclass305
-rw-r--r--meta/classes-global/debian.bbclass141
-rw-r--r--meta/classes-global/devshell.bbclass164
-rw-r--r--meta/classes-global/insane.bbclass1632
-rw-r--r--meta/classes-global/license.bbclass266
-rw-r--r--meta/classes-global/logging.bbclass117
-rw-r--r--meta/classes-global/mirrors.bbclass88
-rw-r--r--meta/classes-global/package.bbclass611
-rw-r--r--meta/classes-global/package_deb.bbclass333
-rw-r--r--meta/classes-global/package_ipk.bbclass300
-rw-r--r--meta/classes-global/package_pkgdata.bbclass173
-rw-r--r--meta/classes-global/package_rpm.bbclass776
-rw-r--r--meta/classes-global/packagedata.bbclass40
-rw-r--r--meta/classes-global/patch.bbclass169
-rw-r--r--meta/classes-global/retain.bbclass182
-rw-r--r--meta/classes-global/sanity.bbclass1114
-rw-r--r--meta/classes-global/sstate.bbclass1372
-rw-r--r--meta/classes-global/staging.bbclass702
-rw-r--r--meta/classes-global/uninative.bbclass183
-rw-r--r--meta/classes-global/utility-tasks.bbclass59
-rw-r--r--meta/classes-global/utils.bbclass379
-rw-r--r--meta/classes-global/yocto-check-layer.bbclass62
23 files changed, 0 insertions, 9905 deletions
diff --git a/meta/classes-global/base.bbclass b/meta/classes-global/base.bbclass
deleted file mode 100644
index 6de17d1bb5..0000000000
--- a/meta/classes-global/base.bbclass
+++ /dev/null
@@ -1,737 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7BB_DEFAULT_TASK ?= "build"
8CLASSOVERRIDE ?= "class-target"
9
10inherit patch
11inherit staging
12
13inherit mirrors
14inherit utils
15inherit utility-tasks
16inherit logging
17
18PACKAGECONFIG_CONFARGS ??= ""
19
20inherit metadata_scm
21
22PREFERRED_TOOLCHAIN_TARGET ??= "gcc"
23PREFERRED_TOOLCHAIN_NATIVE ??= "gcc"
24PREFERRED_TOOLCHAIN_SDK ??= "gcc"
25
26PREFERRED_TOOLCHAIN = "${PREFERRED_TOOLCHAIN_TARGET}"
27PREFERRED_TOOLCHAIN:class-native = "${PREFERRED_TOOLCHAIN_NATIVE}"
28PREFERRED_TOOLCHAIN:class-cross = "${PREFERRED_TOOLCHAIN_NATIVE}"
29PREFERRED_TOOLCHAIN:class-crosssdk = "${PREFERRED_TOOLCHAIN_SDK}"
30PREFERRED_TOOLCHAIN:class-nativesdk = "${PREFERRED_TOOLCHAIN_SDK}"
31
32TOOLCHAIN ??= "${PREFERRED_TOOLCHAIN}"
33TOOLCHAIN_NATIVE ??= "${PREFERRED_TOOLCHAIN_NATIVE}"
34
35inherit_defer toolchain/${TOOLCHAIN_NATIVE}-native
36inherit_defer toolchain/${TOOLCHAIN}
37
38def lsb_distro_identifier(d):
39 adjust = d.getVar('LSB_DISTRO_ADJUST')
40 adjust_func = None
41 if adjust:
42 try:
43 adjust_func = globals()[adjust]
44 except KeyError:
45 pass
46 return oe.lsb.distro_identifier(adjust_func)
47
48die() {
49 bbfatal_log "$*"
50}
51
52oe_runmake_call() {
53 bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
54 ${MAKE} ${EXTRA_OEMAKE} "$@"
55}
56
57oe_runmake() {
58 oe_runmake_call "$@" || die "oe_runmake failed"
59}
60
61
62def get_base_dep(d):
63 if d.getVar('INHIBIT_DEFAULT_DEPS', False):
64 return ""
65 return "${BASE_DEFAULT_DEPS}"
66
67BASE_DEFAULT_DEPS = "virtual/cross-cc virtual/compilerlibs virtual/libc"
68
69BASEDEPENDS = ""
70BASEDEPENDS:class-target = "${@get_base_dep(d)}"
71BASEDEPENDS:class-nativesdk = "${@get_base_dep(d)}"
72
73DEPENDS:prepend = "${BASEDEPENDS} "
74
75FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
76# THISDIR only works properly with imediate expansion as it has to run
77# in the context of the location its used (:=)
78THISDIR = "${@os.path.dirname(d.getVar('FILE'))}"
79
80def extra_path_elements(d):
81 path = ""
82 elements = (d.getVar('EXTRANATIVEPATH') or "").split()
83 for e in elements:
84 path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
85 return path
86
87PATH:prepend = "${@extra_path_elements(d)}"
88
89def get_lic_checksum_file_list(d):
90 filelist = []
91 lic_files = d.getVar("LIC_FILES_CHKSUM") or ''
92 tmpdir = d.getVar("TMPDIR")
93 s = d.getVar("S")
94 b = d.getVar("B")
95 workdir = d.getVar("WORKDIR")
96
97 urls = lic_files.split()
98 for url in urls:
99 # We only care about items that are absolute paths since
100 # any others should be covered by SRC_URI.
101 try:
102 (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
103 if method != "file" or not path:
104 raise bb.fetch.MalformedUrl(url)
105
106 if path[0] == '/':
107 if path.startswith((tmpdir, s, b, workdir)):
108 continue
109 filelist.append(path + ":" + str(os.path.exists(path)))
110 except bb.fetch.MalformedUrl:
111 bb.fatal(d.getVar('PN') + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
112 return " ".join(filelist)
113
114def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
115 tools = d.getVar(toolsvar).split()
116 origbbenv = d.getVar("BB_ORIGENV", False)
117 path = origbbenv.getVar("PATH")
118 # Need to ignore our own scripts directories to avoid circular links
119 for p in path.split(":"):
120 if p.endswith("/scripts"):
121 path = path.replace(p, "/ignoreme")
122 bb.utils.mkdirhier(dest)
123 notfound = []
124 for tool in tools:
125 desttool = os.path.join(dest, tool)
126 if not os.path.exists(desttool):
127 # clean up dead symlink
128 if os.path.islink(desttool):
129 os.unlink(desttool)
130 srctool = bb.utils.which(path, tool, executable=True)
131 # gcc/g++ may link to ccache on some hosts, e.g.,
132 # /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
133 # would return /usr/local/bin/ccache/gcc, but what we need is
134 # /usr/bin/gcc, this code can check and fix that.
135 if os.path.islink(srctool) and os.path.basename(os.readlink(srctool)) == 'ccache':
136 srctool = bb.utils.which(path, tool, executable=True, direction=1)
137 if srctool:
138 os.symlink(srctool, desttool)
139 else:
140 notfound.append(tool)
141
142 if notfound and fatal:
143 bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound))
144
145# We can't use vardepvalue against do_fetch directly since that would overwrite
146# the other task dependencies so we use an indirect function.
147python fetcher_hashes_dummyfunc() {
148 return
149}
150fetcher_hashes_dummyfunc[vardepvalue] = "${@bb.fetch.get_hashvalue(d)}"
151
152addtask fetch
153do_fetch[dirs] = "${DL_DIR}"
154do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
155do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
156do_fetch[prefuncs] += "fetcher_hashes_dummyfunc"
157do_fetch[network] = "1"
158do_fetch[umask] = "${OE_SHARED_UMASK}"
159python base_do_fetch() {
160
161 src_uri = (d.getVar('SRC_URI') or "").split()
162 if not src_uri:
163 return
164
165 try:
166 fetcher = bb.fetch2.Fetch(src_uri, d)
167 fetcher.download()
168 except bb.fetch2.BBFetchException as e:
169 bb.fatal("Bitbake Fetcher Error: " + repr(e))
170}
171
172addtask unpack after do_fetch
173do_unpack[cleandirs] = "${UNPACKDIR}"
174
175python base_do_unpack() {
176 import shutil
177
178 sourcedir = d.getVar('S')
179 # Intentionally keep SOURCE_BASEDIR internal to the task just for SDE
180 d.setVar("SOURCE_BASEDIR", sourcedir)
181
182 src_uri = (d.getVar('SRC_URI') or "").split()
183 if not src_uri:
184 return
185
186 basedir = None
187 unpackdir = d.getVar('UNPACKDIR')
188 if sourcedir.startswith(unpackdir):
189 basedir = sourcedir.replace(unpackdir, '').strip("/").split('/')[0]
190 if basedir:
191 d.setVar("SOURCE_BASEDIR", unpackdir + '/' + basedir)
192
193 try:
194 fetcher = bb.fetch2.Fetch(src_uri, d)
195 fetcher.unpack(d.getVar('UNPACKDIR'))
196 except bb.fetch2.BBFetchException as e:
197 bb.fatal("Bitbake Fetcher Error: " + repr(e))
198}
199
200SSTATETASKS += "do_deploy_source_date_epoch"
201
202do_deploy_source_date_epoch () {
203 mkdir -p ${SDE_DEPLOYDIR}
204 if [ -e ${SDE_FILE} ]; then
205 echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
206 cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
207 else
208 echo "${SDE_FILE} not found!"
209 fi
210}
211
212python do_deploy_source_date_epoch_setscene () {
213 sstate_setscene(d)
214 bb.utils.mkdirhier(d.getVar('SDE_DIR'))
215 sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
216 if os.path.exists(sde_file):
217 target = d.getVar('SDE_FILE')
218 bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
219 bb.utils.rename(sde_file, target)
220 else:
221 bb.debug(1, "%s not found!" % sde_file)
222}
223
224do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
225do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
226addtask do_deploy_source_date_epoch_setscene
227addtask do_deploy_source_date_epoch before do_configure after do_patch
228
229python create_source_date_epoch_stamp() {
230 # Version: 2
231 source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('SOURCE_BASEDIR') or d.getVar('S'))
232 oe.reproducible.epochfile_write(source_date_epoch, d.getVar('SDE_FILE'), d)
233}
234do_unpack[postfuncs] += "create_source_date_epoch_stamp"
235
236def get_source_date_epoch_value(d):
237 return oe.reproducible.epochfile_read(d.getVar('SDE_FILE'), d)
238
239def get_layers_branch_rev(d):
240 revisions = oe.buildcfg.get_layer_revisions(d)
241 layers_branch_rev = ["%-20s = \"%s:%s\"" % (r[1], r[2], r[3]) for r in revisions]
242 i = len(layers_branch_rev)-1
243 p1 = layers_branch_rev[i].find("=")
244 s1 = layers_branch_rev[i][p1:]
245 while i > 0:
246 p2 = layers_branch_rev[i-1].find("=")
247 s2= layers_branch_rev[i-1][p2:]
248 if s1 == s2:
249 layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
250 i -= 1
251 else:
252 i -= 1
253 p1 = layers_branch_rev[i].find("=")
254 s1= layers_branch_rev[i][p1:]
255 return layers_branch_rev
256
257
258BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
259BUILDCFG_FUNCS[type] = "list"
260
261def buildcfg_vars(d):
262 statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
263 for var in statusvars:
264 value = d.getVar(var)
265 if value is not None:
266 yield '%-20s = "%s"' % (var, value)
267
268def buildcfg_neededvars(d):
269 needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
270 pesteruser = []
271 for v in needed_vars:
272 val = d.getVar(v)
273 if not val or val == 'INVALID':
274 pesteruser.append(v)
275
276 if pesteruser:
277 bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
278
279addhandler base_eventhandler
280base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.event.RecipeParsed bb.event.RecipePreDeferredInherits"
281python base_eventhandler() {
282 if isinstance(e, bb.event.RecipePreDeferredInherits):
283 # Use this to snoop on class extensions and set these up before the deferred inherits
284 # are processed which allows overrides on conditional variables.
285 for c in ['native', 'nativesdk', 'crosssdk', 'cross']:
286 if c in e.inherits:
287 d.setVar('CLASSOVERRIDE', 'class-' + c)
288 break
289 return
290
291 if isinstance(e, bb.event.ConfigParsed):
292 if not d.getVar("NATIVELSBSTRING", False):
293 d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
294 d.setVar("ORIGNATIVELSBSTRING", d.getVar("NATIVELSBSTRING", False))
295 d.setVar('BB_VERSION', bb.__version__)
296
297 # There might be no bb.event.ConfigParsed event if bitbake server is
298 # running, so check bb.event.BuildStarted too to make sure ${HOSTTOOLS_DIR}
299 # exists.
300 if isinstance(e, bb.event.ConfigParsed) or \
301 (isinstance(e, bb.event.BuildStarted) and not os.path.exists(d.getVar('HOSTTOOLS_DIR'))):
302 # Works with the line in layer.conf which changes PATH to point here
303 setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
304 setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
305
306 if isinstance(e, bb.event.MultiConfigParsed):
307 # We need to expand SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS in each of the multiconfig data stores
308 # own contexts so the variables get expanded correctly for that arch, then inject back into
309 # the main data store.
310 deps = []
311 for config in e.mcdata:
312 deps.append(e.mcdata[config].getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS"))
313 deps = " ".join(deps)
314 e.mcdata[''].setVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", deps)
315
316 if isinstance(e, bb.event.BuildStarted):
317 localdata = bb.data.createCopy(d)
318 statuslines = []
319 for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
320 g = globals()
321 if func not in g:
322 bb.warn("Build configuration function '%s' does not exist" % func)
323 else:
324 flines = g[func](localdata)
325 if flines:
326 statuslines.extend(flines)
327
328 statusheader = d.getVar('BUILDCFG_HEADER')
329 if statusheader:
330 bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
331
332 if isinstance(e, bb.event.RecipeParsed):
333 #
334 # If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set
335 # skip parsing for all the other providers which will mean they get uninstalled from the
336 # sysroot since they're now "unreachable". This makes switching virtual/kernel work in
337 # particular.
338 #
339 pn = d.getVar('PN')
340 source_mirror_fetch = bb.utils.to_boolean(d.getVar('SOURCE_MIRROR_FETCH', False))
341 if not source_mirror_fetch:
342 provs = (d.getVar("PROVIDES") or "").split()
343 multiprovidersallowed = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
344 for p in provs:
345 if p.startswith("virtual/") and p not in multiprovidersallowed:
346 profprov = d.getVar("PREFERRED_PROVIDER_" + p)
347 if profprov and pn != profprov:
348 raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
349}
350
351CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
352CLEANBROKEN = "0"
353
354addtask configure after do_patch
355do_configure[dirs] = "${B}"
356base_do_configure() {
357 if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
358 if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
359 cd ${B}
360 if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
361 oe_runmake clean
362 fi
363 # -ignore_readdir_race does not work correctly with -delete;
364 # use xargs to avoid spurious build failures
365 find ${B} -ignore_readdir_race -name \*.la -type f -print0 | xargs -0 rm -f
366 fi
367 fi
368 if [ -n "${CONFIGURESTAMPFILE}" ]; then
369 mkdir -p `dirname ${CONFIGURESTAMPFILE}`
370 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
371 fi
372}
373
374addtask compile after do_configure
375do_compile[dirs] = "${B}"
376base_do_compile() {
377 if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
378 oe_runmake || die "make failed"
379 else
380 bbnote "nothing to compile"
381 fi
382}
383
384addtask install after do_compile
385do_install[dirs] = "${B}"
386# Remove and re-create ${D} so that it is guaranteed to be empty
387do_install[cleandirs] = "${D}"
388
389base_do_install() {
390 :
391}
392
393addtask build after do_populate_sysroot
394do_build[noexec] = "1"
395do_build[recrdeptask] += "do_deploy"
396do_build () {
397 :
398}
399
400def set_packagetriplet(d):
401 archs = []
402 tos = []
403 tvs = []
404
405 archs.append(d.getVar("PACKAGE_ARCHS").split())
406 tos.append(d.getVar("TARGET_OS"))
407 tvs.append(d.getVar("TARGET_VENDOR"))
408
409 def settriplet(d, varname, archs, tos, tvs):
410 triplets = []
411 for i in range(len(archs)):
412 for arch in archs[i]:
413 triplets.append(arch + tvs[i] + "-" + tos[i])
414 triplets.reverse()
415 d.setVar(varname, " ".join(triplets))
416
417 settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
418
419 variants = d.getVar("MULTILIB_VARIANTS") or ""
420 for item in variants.split():
421 localdata = bb.data.createCopy(d)
422 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
423 localdata.setVar("OVERRIDES", overrides)
424
425 archs.append(localdata.getVar("PACKAGE_ARCHS").split())
426 tos.append(localdata.getVar("TARGET_OS"))
427 tvs.append(localdata.getVar("TARGET_VENDOR"))
428
429 settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
430
431python () {
432 import string, re
433
434 # Handle backfilling
435 oe.utils.features_backfill("DISTRO_FEATURES", d)
436 oe.utils.features_backfill("MACHINE_FEATURES", d)
437
438 # To add a recipe to the skip list , set:
439 # SKIP_RECIPE[pn] = "message"
440 pn = d.getVar('PN')
441 skip_msg = d.getVarFlag('SKIP_RECIPE', pn)
442 if skip_msg:
443 bb.debug(1, "Skipping %s %s" % (pn, skip_msg))
444 raise bb.parse.SkipRecipe("Recipe will be skipped because: %s" % (skip_msg))
445
446 # Handle PACKAGECONFIG
447 #
448 # These take the form:
449 #
450 # PACKAGECONFIG ??= "<default options>"
451 # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends,foo_conflict_packageconfig"
452 pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
453 if pkgconfigflags:
454 pkgconfig = (d.getVar('PACKAGECONFIG') or "").split()
455 pn = d.getVar("PN")
456
457 mlprefix = d.getVar("MLPREFIX")
458
459 def expandFilter(appends, extension, prefix):
460 appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
461 newappends = []
462 for a in appends:
463 if a.endswith("-native") or ("-cross-" in a):
464 newappends.append(a)
465 elif a.startswith("virtual/"):
466 subs = a.split("/", 1)[1]
467 if subs.startswith(prefix):
468 newappends.append(a + extension)
469 else:
470 newappends.append("virtual/" + prefix + subs + extension)
471 else:
472 if a.startswith(prefix):
473 newappends.append(a + extension)
474 else:
475 newappends.append(prefix + a + extension)
476 return newappends
477
478 def appendVar(varname, appends):
479 if not appends:
480 return
481 if "DEPENDS" in varname or varname.startswith("RRECOMMENDS"):
482 if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d) :
483 appends = expandFilter(appends, "", "nativesdk-")
484 elif bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
485 appends = expandFilter(appends, "-native", "")
486 elif mlprefix:
487 appends = expandFilter(appends, "", mlprefix)
488 varname = d.expand(varname)
489 d.appendVar(varname, " " + " ".join(appends))
490
491 extradeps = []
492 extrardeps = []
493 extrarrecs = []
494 extraconf = []
495 for flag, flagval in sorted(pkgconfigflags.items()):
496 items = flagval.split(",")
497 num = len(items)
498 if num > 6:
499 bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend,conflict_packageconfig can be specified!"
500 % (d.getVar('PN'), flag))
501
502 if flag in pkgconfig:
503 if num >= 3 and items[2]:
504 extradeps.append(items[2])
505 if num >= 4 and items[3]:
506 extrardeps.append(items[3])
507 if num >= 5 and items[4]:
508 extrarrecs.append(items[4])
509 if num >= 1 and items[0]:
510 extraconf.append(items[0])
511 elif num >= 2 and items[1]:
512 extraconf.append(items[1])
513
514 if num >= 6 and items[5]:
515 conflicts = set(items[5].split())
516 invalid = conflicts.difference(set(pkgconfigflags.keys()))
517 if invalid:
518 bb.error("%s: PACKAGECONFIG[%s] Invalid conflict package config%s '%s' specified."
519 % (d.getVar('PN'), flag, 's' if len(invalid) > 1 else '', ' '.join(invalid)))
520
521 if flag in pkgconfig:
522 intersec = conflicts.intersection(set(pkgconfig))
523 if intersec:
524 bb.fatal("%s: PACKAGECONFIG[%s] Conflict package config%s '%s' set in PACKAGECONFIG."
525 % (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec)))
526
527 appendVar('DEPENDS', extradeps)
528 appendVar('RDEPENDS:${PN}', extrardeps)
529 appendVar('RRECOMMENDS:${PN}', extrarrecs)
530 appendVar('PACKAGECONFIG_CONFARGS', extraconf)
531
532 pn = d.getVar('PN')
533 license = d.getVar('LICENSE')
534 if license == "INVALID" and pn != "defaultpkgname":
535 bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
536
537 if bb.data.inherits_class('license', d):
538 oe.license.check_license_format(d)
539 unmatched_license_flags = oe.license.check_license_flags(d)
540 if unmatched_license_flags:
541 for unmatched in unmatched_license_flags:
542 message = "Has a restricted license '%s' which is not listed in your LICENSE_FLAGS_ACCEPTED." % unmatched
543 details = d.getVarFlag("LICENSE_FLAGS_DETAILS", unmatched)
544 if details:
545 message += "\n" + details
546 bb.debug(1, "Skipping %s: %s" % (pn, message))
547 raise bb.parse.SkipRecipe(message)
548
549 # If we're building a target package we need to use fakeroot (pseudo)
550 # in order to capture permissions, owners, groups and special files
551 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
552 d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
553 d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
554 d.setVarFlag('do_install', 'fakeroot', '1')
555 d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
556 d.setVarFlag('do_package', 'fakeroot', '1')
557 d.setVarFlag('do_package_setscene', 'fakeroot', '1')
558 d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
559 d.setVarFlag('do_devshell', 'fakeroot', '1')
560 d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
561
562 need_machine = d.getVar('COMPATIBLE_MACHINE')
563 if need_machine and not bb.utils.to_boolean(d.getVar('PARSE_ALL_RECIPES', False)):
564 import re
565 compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":")
566 for m in compat_machines:
567 if re.match(need_machine, m):
568 break
569 else:
570 raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
571
572 source_mirror_fetch = bb.utils.to_boolean(d.getVar('SOURCE_MIRROR_FETCH', False)) or \
573 bb.utils.to_boolean(d.getVar('PARSE_ALL_RECIPES', False))
574 if not source_mirror_fetch:
575 need_host = d.getVar('COMPATIBLE_HOST')
576 if need_host:
577 import re
578 this_host = d.getVar('HOST_SYS')
579 if not re.match(need_host, this_host):
580 raise bb.parse.SkipRecipe("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
581
582 bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
583
584 pkgs = d.getVar('PACKAGES').split()
585 if pkgs:
586 skipped_pkgs = oe.license.skip_incompatible_package_licenses(d, pkgs)
587 unskipped_pkgs = [p for p in pkgs if p not in skipped_pkgs]
588
589 if unskipped_pkgs:
590 for pkg in skipped_pkgs:
591 bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
592 for pkg in unskipped_pkgs:
593 bb.debug(1, "Including the package %s" % pkg)
594 else:
595 incompatible_lic = oe.license.incompatible_license(d, bad_licenses)
596 for pkg in skipped_pkgs:
597 incompatible_lic += skipped_pkgs[pkg]
598 incompatible_lic = sorted(list(set(incompatible_lic)))
599
600 if incompatible_lic:
601 bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
602 raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
603
604 srcuri = d.getVar('SRC_URI')
605 for uri_string in srcuri.split():
606 uri = bb.fetch.URI(uri_string)
607 # Also check downloadfilename as the URL path might not be useful for sniffing
608 path = uri.params.get("downloadfilename", uri.path)
609
610 # HTTP/FTP use the wget fetcher
611 if uri.scheme in ("http", "https", "ftp"):
612 d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot')
613
614 # Svn packages should DEPEND on subversion-native
615 if uri.scheme == "svn":
616 d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
617
618 # Git packages should DEPEND on git-native
619 elif uri.scheme in ("git", "gitsm"):
620 d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
621
622 # Mercurial packages should DEPEND on mercurial-native
623 elif uri.scheme == "hg":
624 d.appendVar("EXTRANATIVEPATH", ' python3-native ')
625 d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot ca-certificates-native:do_populate_sysroot')
626
627 # OSC packages should DEPEND on osc-native
628 elif uri.scheme == "osc":
629 d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
630
631 elif uri.scheme == "npm":
632 d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
633
634 elif uri.scheme == "repo":
635 d.appendVarFlag('do_fetch', 'depends', ' repo-native:do_populate_sysroot')
636
637 # *.lz4 should DEPEND on lz4-native for unpacking
638 if path.endswith('.lz4'):
639 d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
640
641 # *.zst should DEPEND on zstd-native for unpacking
642 elif path.endswith('.zst'):
643 d.appendVarFlag('do_unpack', 'depends', ' zstd-native:do_populate_sysroot')
644
645 # *.lz should DEPEND on lzip-native for unpacking
646 elif path.endswith('.lz'):
647 d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
648
649 # *.xz should DEPEND on xz-native for unpacking
650 elif path.endswith('.xz') or path.endswith('.txz'):
651 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
652
653 # .zip should DEPEND on unzip-native for unpacking
654 elif path.endswith('.zip') or path.endswith('.jar'):
655 d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
656
657 # Some rpm files may be compressed internally using xz (for example, rpms from Fedora)
658 elif path.endswith('.rpm'):
659 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
660
661 # *.deb should DEPEND on xz-native for unpacking
662 elif path.endswith('.deb'):
663 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
664
665 # *.7z should DEPEND on 7zip-native for unpacking
666 elif path.endswith('.7z'):
667 d.appendVarFlag('do_unpack', 'depends', ' 7zip-native:do_populate_sysroot')
668
669 set_packagetriplet(d)
670
671 # 'multimachine' handling
672 mach_arch = d.getVar('MACHINE_ARCH')
673 pkg_arch = d.getVar('PACKAGE_ARCH')
674
675 if (pkg_arch == mach_arch):
676 # Already machine specific - nothing further to do
677 return
678
679 #
680 # We always try to scan SRC_URI for urls with machine overrides
681 # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
682 #
683 override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH')
684 if override != '0':
685 paths = []
686 fpaths = (d.getVar('FILESPATH') or '').split(':')
687 machine = d.getVar('MACHINE')
688 for p in fpaths:
689 if os.path.basename(p) == machine and os.path.isdir(p):
690 paths.append(p)
691
692 if paths:
693 for s in srcuri.split():
694 if not s.startswith("file://"):
695 continue
696 fetcher = bb.fetch2.Fetch([s], d)
697 local = fetcher.localpath(s)
698 for mp in paths:
699 if local.startswith(mp):
700 #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
701 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
702 return
703
704 packages = d.getVar('PACKAGES').split()
705 for pkg in packages:
706 pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg)
707
708 # We could look for != PACKAGE_ARCH here but how to choose
709 # if multiple differences are present?
710 # Look through PACKAGE_ARCHS for the priority order?
711 if pkgarch and pkgarch == mach_arch:
712 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
713 bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN"))
714}
715
716addtask cleansstate after do_clean
717python do_cleansstate() {
718 sstate_clean_cachefiles(d)
719}
720addtask cleanall after do_cleansstate
721do_cleansstate[nostamp] = "1"
722
723python do_cleanall() {
724 src_uri = (d.getVar('SRC_URI') or "").split()
725 if not src_uri:
726 return
727
728 try:
729 fetcher = bb.fetch2.Fetch(src_uri, d)
730 fetcher.clean()
731 except bb.fetch2.BBFetchException as e:
732 bb.fatal(str(e))
733}
734do_cleanall[nostamp] = "1"
735
736
737EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install
diff --git a/meta/classes-global/buildstats.bbclass b/meta/classes-global/buildstats.bbclass
deleted file mode 100644
index fe64789e10..0000000000
--- a/meta/classes-global/buildstats.bbclass
+++ /dev/null
@@ -1,305 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
8
9################################################################################
10# Build statistics gathering.
11#
12# The CPU and Time gathering/tracking functions and bbevent inspiration
13# were written by Christopher Larson.
14#
15################################################################################
16
17def get_buildprocess_cputime(pid):
18 with open("/proc/%d/stat" % pid, "r") as f:
19 fields = f.readline().rstrip().split()
20 # 13: utime, 14: stime, 15: cutime, 16: cstime
21 return sum(int(field) for field in fields[13:16])
22
23def get_process_cputime(pid):
24 import resource
25 with open("/proc/%d/stat" % pid, "r") as f:
26 fields = f.readline().rstrip().split()
27 stats = {
28 'utime' : fields[13],
29 'stime' : fields[14],
30 'cutime' : fields[15],
31 'cstime' : fields[16],
32 }
33 iostats = {}
34 if os.path.isfile("/proc/%d/io" % pid):
35 with open("/proc/%d/io" % pid, "r") as f:
36 while True:
37 i = f.readline().strip()
38 if not i:
39 break
40 if not ":" in i:
41 # one more extra line is appended (empty or containing "0")
42 # most probably due to race condition in kernel while
43 # updating IO stats
44 break
45 i = i.split(": ")
46 iostats[i[0]] = i[1]
47 resources = resource.getrusage(resource.RUSAGE_SELF)
48 childres = resource.getrusage(resource.RUSAGE_CHILDREN)
49 return stats, iostats, resources, childres
50
51def get_cputime():
52 with open("/proc/stat", "r") as f:
53 fields = f.readline().rstrip().split()[1:]
54 return sum(int(field) for field in fields)
55
56def set_timedata(var, d, server_time):
57 d.setVar(var, server_time)
58
59def get_timedata(var, d, end_time):
60 oldtime = d.getVar(var, False)
61 if oldtime is None:
62 return
63 return end_time - oldtime
64
65def set_buildtimedata(var, d):
66 import time
67 time = time.time()
68 cputime = get_cputime()
69 proctime = get_buildprocess_cputime(os.getpid())
70 d.setVar(var, (time, cputime, proctime))
71
72def get_buildtimedata(var, d):
73 import time
74 timedata = d.getVar(var, False)
75 if timedata is None:
76 return
77 oldtime, oldcpu, oldproc = timedata
78 procdiff = get_buildprocess_cputime(os.getpid()) - oldproc
79 cpudiff = get_cputime() - oldcpu
80 end_time = time.time()
81 timediff = end_time - oldtime
82 if cpudiff > 0:
83 cpuperc = float(procdiff) * 100 / cpudiff
84 else:
85 cpuperc = None
86 return timediff, cpuperc
87
88def write_task_data(status, logfile, e, d):
89 with open(os.path.join(logfile), "a") as f:
90 elapsedtime = get_timedata("__timedata_task", d, e.time)
91 if elapsedtime:
92 f.write(d.expand("${PF}: %s\n" % e.task))
93 f.write(d.expand("Elapsed time: %0.2f seconds\n" % elapsedtime))
94 cpu, iostats, resources, childres = get_process_cputime(os.getpid())
95 if cpu:
96 f.write("utime: %s\n" % cpu['utime'])
97 f.write("stime: %s\n" % cpu['stime'])
98 f.write("cutime: %s\n" % cpu['cutime'])
99 f.write("cstime: %s\n" % cpu['cstime'])
100 for i in iostats:
101 f.write("IO %s: %s\n" % (i, iostats[i]))
102 rusages = ["ru_utime", "ru_stime", "ru_maxrss", "ru_minflt", "ru_majflt", "ru_inblock", "ru_oublock", "ru_nvcsw", "ru_nivcsw"]
103 for i in rusages:
104 f.write("rusage %s: %s\n" % (i, getattr(resources, i)))
105 for i in rusages:
106 f.write("Child rusage %s: %s\n" % (i, getattr(childres, i)))
107 if status == "passed":
108 f.write("Status: PASSED \n")
109 else:
110 f.write("Status: FAILED \n")
111 f.write("Ended: %0.2f \n" % e.time)
112
113def write_host_data(logfile, e, d, type):
114 import subprocess, os, datetime
115 # minimum time allowed for each command to run, in seconds
116 time_threshold = 0.5
117 limit = 10
118 # the total number of commands
119 num_cmds = 0
120 msg = ""
121 if type == "interval":
122 # interval at which data will be logged
123 interval = d.getVar("BB_HEARTBEAT_EVENT", False)
124 if interval is None:
125 bb.warn("buildstats: Collecting host data at intervals failed. Set BB_HEARTBEAT_EVENT=\"<interval>\" in conf/local.conf for the interval at which host data will be logged.")
126 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
127 return
128 interval = int(interval)
129 cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_INTERVAL')
130 msg = "Host Stats: Collecting data at %d second intervals.\n" % interval
131 if cmds is None:
132 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
133 bb.warn("buildstats: Collecting host data at intervals failed. Set BB_LOG_HOST_STAT_CMDS_INTERVAL=\"command1 ; command2 ; ... \" in conf/local.conf.")
134 return
135 if type == "failure":
136 cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_FAILURE')
137 msg = "Host Stats: Collecting data on failure.\n"
138 msg += "Failed at task: " + e.task + "\n"
139 if cmds is None:
140 d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
141 bb.warn("buildstats: Collecting host data on failure failed. Set BB_LOG_HOST_STAT_CMDS_FAILURE=\"command1 ; command2 ; ... \" in conf/local.conf.")
142 return
143 c_san = []
144 for cmd in cmds.split(";"):
145 if len(cmd) == 0:
146 continue
147 num_cmds += 1
148 c_san.append(cmd)
149 if num_cmds == 0:
150 if type == "interval":
151 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
152 if type == "failure":
153 d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
154 return
155
156 # return if the interval is not enough to run all commands within the specified BB_HEARTBEAT_EVENT interval
157 if type == "interval":
158 limit = interval / num_cmds
159 if limit <= time_threshold:
160 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
161 bb.warn("buildstats: Collecting host data failed. BB_HEARTBEAT_EVENT interval not enough to run the specified commands. Increase value of BB_HEARTBEAT_EVENT in conf/local.conf.")
162 return
163
164 # set the environment variables
165 path = d.getVar("PATH")
166 opath = d.getVar("BB_ORIGENV", False).getVar("PATH")
167 ospath = os.environ['PATH']
168 os.environ['PATH'] = path + ":" + opath + ":" + ospath
169 with open(logfile, "a") as f:
170 f.write("Event Time: %f\nDate: %s\n" % (e.time, datetime.datetime.now()))
171 f.write("%s" % msg)
172 for c in c_san:
173 try:
174 output = subprocess.check_output(c.split(), stderr=subprocess.STDOUT, timeout=limit).decode('utf-8')
175 except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError) as err:
176 output = "Error running command: %s\n%s\n" % (c, err)
177 f.write("%s\n%s\n" % (c, output))
178 # reset the environment
179 os.environ['PATH'] = ospath
180
181python run_buildstats () {
182 import bb.build
183 import bb.event
184 import time, subprocess, platform
185
186 bn = d.getVar('BUILDNAME')
187 ########################################################################
188 # bitbake fires HeartbeatEvent even before a build has been
189 # triggered, causing BUILDNAME to be None
190 ########################################################################
191 if bn is None:
192 return
193
194 bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
195 taskdir = os.path.join(bsdir, d.getVar('PF'))
196 if isinstance(e, bb.event.HeartbeatEvent):
197 if bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_INTERVAL")):
198 bb.utils.mkdirhier(bsdir)
199 write_host_data(os.path.join(bsdir, "host_stats_interval"), e, d, "interval")
200
201 elif isinstance(e, bb.event.BuildStarted):
202 ########################################################################
203 # If the kernel was not configured to provide I/O statistics, issue
204 # a one time warning.
205 ########################################################################
206 if not os.path.isfile("/proc/%d/io" % os.getpid()):
207 bb.warn("The Linux kernel on your build host was not configured to provide process I/O statistics. (CONFIG_TASK_IO_ACCOUNTING is not set)")
208
209 ########################################################################
210 # at first pass make the buildstats hierarchy and then
211 # set the buildname
212 ########################################################################
213 bb.utils.mkdirhier(bsdir)
214 set_buildtimedata("__timedata_build", d)
215 build_time = os.path.join(bsdir, "build_stats")
216 # write start of build into build_time
217 with open(build_time, "a") as f:
218 host_info = platform.uname()
219 f.write("Host Info: ")
220 for x in host_info:
221 if x:
222 f.write(x + " ")
223 f.write("\n")
224 f.write("Build Started: %0.2f \n" % d.getVar('__timedata_build', False)[0])
225
226 elif isinstance(e, bb.event.BuildCompleted):
227 build_time = os.path.join(bsdir, "build_stats")
228 with open(build_time, "a") as f:
229 ########################################################################
230 # Write build statistics for the build
231 ########################################################################
232 timedata = get_buildtimedata("__timedata_build", d)
233 if timedata:
234 time, cpu = timedata
235 # write end of build and cpu used into build_time
236 f.write("Elapsed time: %0.2f seconds \n" % (time))
237 if cpu:
238 f.write("CPU usage: %0.1f%% \n" % cpu)
239
240 elif isinstance(e, bb.build.TaskStarted):
241 set_timedata("__timedata_task", d, e.time)
242 bb.utils.mkdirhier(taskdir)
243 # write into the task event file the name and start time
244 with open(os.path.join(taskdir, e.task), "a") as f:
245 f.write("Event: %s \n" % bb.event.getName(e))
246 f.write("Started: %0.2f \n" % e.time)
247
248 elif isinstance(e, bb.build.TaskSucceeded):
249 write_task_data("passed", os.path.join(taskdir, e.task), e, d)
250 if e.task == "do_rootfs":
251 bs = os.path.join(bsdir, "build_stats")
252 with open(bs, "a") as f:
253 rootfs = d.getVar('IMAGE_ROOTFS')
254 if os.path.isdir(rootfs):
255 try:
256 rootfs_size = subprocess.check_output(["du", "-sh", rootfs],
257 stderr=subprocess.STDOUT).decode('utf-8')
258 f.write("Uncompressed Rootfs size: %s" % rootfs_size)
259 except subprocess.CalledProcessError as err:
260 bb.warn("Failed to get rootfs size: %s" % err.output.decode('utf-8'))
261
262 elif isinstance(e, bb.build.TaskFailed):
263 # Can have a failure before TaskStarted so need to mkdir here too
264 bb.utils.mkdirhier(taskdir)
265 write_task_data("failed", os.path.join(taskdir, e.task), e, d)
266 ########################################################################
267 # Lets make things easier and tell people where the build failed in
268 # build_status. We do this here because BuildCompleted triggers no
269 # matter what the status of the build actually is
270 ########################################################################
271 build_status = os.path.join(bsdir, "build_stats")
272 with open(build_status, "a") as f:
273 f.write(d.expand("Failed at: ${PF} at task: %s \n" % e.task))
274 if bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_FAILURE")):
275 write_host_data(os.path.join(bsdir, "host_stats_%s_failure" % e.task), e, d, "failure")
276}
277
278addhandler run_buildstats
279run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.event.HeartbeatEvent bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
280
281python runqueue_stats () {
282 import oe.buildstats
283 from bb import event, runqueue
284 # We should not record any samples before the first task has started,
285 # because that's the first activity shown in the process chart.
286 # Besides, at that point we are sure that the build variables
287 # are available that we need to find the output directory.
288 # The persistent SystemStats is stored in the datastore and
289 # closed when the build is done.
290 system_stats = d.getVar('_buildstats_system_stats', False)
291 if not system_stats and isinstance(e, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted)):
292 system_stats = oe.buildstats.SystemStats(d)
293 d.setVar('_buildstats_system_stats', system_stats)
294 if system_stats:
295 # Ensure that we sample at important events.
296 done = isinstance(e, bb.event.BuildCompleted)
297 if system_stats.sample(e, force=done):
298 d.setVar('_buildstats_system_stats', system_stats)
299 if done:
300 system_stats.close()
301 d.delVar('_buildstats_system_stats')
302}
303
304addhandler runqueue_stats
305runqueue_stats[eventmask] = "bb.runqueue.sceneQueueTaskStarted bb.runqueue.runQueueTaskStarted bb.event.HeartbeatEvent bb.event.BuildCompleted bb.event.MonitorDiskEvent"
diff --git a/meta/classes-global/debian.bbclass b/meta/classes-global/debian.bbclass
deleted file mode 100644
index e2a129d028..0000000000
--- a/meta/classes-global/debian.bbclass
+++ /dev/null
@@ -1,141 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Debian package renaming only occurs when a package is built
8# We therefore have to make sure we build all runtime packages
9# before building the current package to make the packages runtime
10# depends are correct
11#
12# Custom library package names can be defined setting
13# DEBIANNAME: + pkgname to the desired name.
14#
15# Better expressed as ensure all RDEPENDS package before we package
16# This means we can't have circular RDEPENDS/RRECOMMENDS
17
18AUTO_LIBNAME_PKGS = "${PACKAGES}"
19
20inherit package
21
22python debian_package_name_hook () {
23 import glob, copy, stat, errno, re, pathlib, subprocess
24
25 pkgdest = d.getVar("PKGDEST")
26 packages = d.getVar('PACKAGES')
27 so_re = re.compile(r"lib.*\.so")
28
29 def socrunch(s):
30 s = s.lower().replace('_', '-')
31 m = re.match(r"^(.*)(.)\.so\.(.*)$", s)
32 if m is None:
33 return None
34 if m.group(2) in '0123456789':
35 bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
36 else:
37 bin = m.group(1) + m.group(2) + m.group(3)
38 dev = m.group(1) + m.group(2)
39 return (bin, dev)
40
41 def isexec(path):
42 try:
43 s = os.stat(path)
44 except (os.error, AttributeError):
45 return 0
46 return (s[stat.ST_MODE] & stat.S_IEXEC)
47
48 def add_rprovides(pkg, d):
49 newpkg = d.getVar('PKG:' + pkg)
50 if newpkg and newpkg != pkg:
51 provs = (d.getVar('RPROVIDES:' + pkg) or "").split()
52 if pkg not in provs:
53 d.appendVar('RPROVIDES:' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
54
55 def auto_libname(packages, orig_pkg):
56 p = lambda var: pathlib.PurePath(d.getVar(var))
57 libdirs = (p("base_libdir"), p("libdir"))
58 bindirs = (p("base_bindir"), p("base_sbindir"), p("bindir"), p("sbindir"))
59
60 sonames = []
61 has_bins = 0
62 has_libs = 0
63 for f in pkgfiles[orig_pkg]:
64 # This is .../packages-split/orig_pkg/
65 pkgpath = pathlib.PurePath(pkgdest, orig_pkg)
66 # Strip pkgpath off the full path to a file in the package, re-root
67 # so it is absolute, and then get the parent directory of the file.
68 path = pathlib.PurePath("/") / (pathlib.PurePath(f).relative_to(pkgpath).parent)
69 if path in bindirs:
70 has_bins = 1
71 if path in libdirs:
72 has_libs = 1
73 if so_re.match(os.path.basename(f)):
74 try:
75 cmd = [d.expand("${TARGET_PREFIX}objdump"), "-p", f]
76 output = subprocess.check_output(cmd).decode("utf-8")
77 for m in re.finditer(r"\s+SONAME\s+([^\s]+)", output):
78 if m.group(1) not in sonames:
79 sonames.append(m.group(1))
80 except subprocess.CalledProcessError:
81 pass
82 bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
83 soname = None
84 if len(sonames) == 1:
85 soname = sonames[0]
86 elif len(sonames) > 1:
87 lead = d.getVar('LEAD_SONAME')
88 if lead:
89 r = re.compile(lead)
90 filtered = []
91 for s in sonames:
92 if r.match(s):
93 filtered.append(s)
94 if len(filtered) == 1:
95 soname = filtered[0]
96 elif len(filtered) > 1:
97 bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
98 else:
99 bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
100 else:
101 bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
102
103 if has_libs and not has_bins and soname:
104 soname_result = socrunch(soname)
105 if soname_result:
106 (pkgname, devname) = soname_result
107 for pkg in packages.split():
108 if (d.getVar('PKG:' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME:' + pkg, False)):
109 add_rprovides(pkg, d)
110 continue
111 debian_pn = d.getVar('DEBIANNAME:' + pkg, False)
112 if debian_pn:
113 newpkg = debian_pn
114 elif pkg == orig_pkg:
115 newpkg = pkgname
116 else:
117 newpkg = pkg.replace(orig_pkg, devname, 1)
118 mlpre=d.getVar('MLPREFIX')
119 if mlpre:
120 if not newpkg.find(mlpre) == 0:
121 newpkg = mlpre + newpkg
122 if newpkg != pkg:
123 bb.note("debian: renaming %s to %s" % (pkg, newpkg))
124 d.setVar('PKG:' + pkg, newpkg)
125 add_rprovides(pkg, d)
126 else:
127 add_rprovides(orig_pkg, d)
128
129 # reversed sort is needed when some package is substring of another
130 # ie in ncurses we get without reverse sort:
131 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
132 # and later
133 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
134 # so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
135 for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS') or "").split(), reverse=True):
136 auto_libname(packages, pkg)
137}
138
139EXPORT_FUNCTIONS package_name_hook
140
141DEBIAN_NAMES = "1"
diff --git a/meta/classes-global/devshell.bbclass b/meta/classes-global/devshell.bbclass
deleted file mode 100644
index 4c23049cf0..0000000000
--- a/meta/classes-global/devshell.bbclass
+++ /dev/null
@@ -1,164 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit terminal
8
9DEVSHELL = "${SHELL}"
10
11python do_devshell () {
12 if d.getVarFlag("do_devshell", "manualfakeroot"):
13 d.prependVar("DEVSHELL", "pseudo ")
14 fakeenv = d.getVar("FAKEROOTENV").split()
15 for f in fakeenv:
16 k = f.split("=")
17 d.setVar(k[0], k[1])
18 d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
19 d.delVarFlag("do_devshell", "fakeroot")
20
21 oe_terminal(d.getVar('DEVSHELL'), 'OpenEmbedded Developer Shell', d)
22}
23
24addtask devshell after do_patch do_prepare_recipe_sysroot
25
26# The directory that the terminal starts in
27DEVSHELL_STARTDIR ?= "${S}"
28do_devshell[dirs] = "${DEVSHELL_STARTDIR}"
29do_devshell[nostamp] = "1"
30do_devshell[network] = "1"
31
32# devshell and fakeroot/pseudo need careful handling since only the final
33# command should run under fakeroot emulation, any X connection should
34# be done as the normal user. We therfore carefully construct the envionment
35# manually
36python () {
37 if d.getVarFlag("do_devshell", "fakeroot"):
38 # We need to signal our code that we want fakeroot however we
39 # can't manipulate the environment and variables here yet (see YOCTO #4795)
40 d.setVarFlag("do_devshell", "manualfakeroot", "1")
41 d.delVarFlag("do_devshell", "fakeroot")
42}
43
44def pydevshell(d):
45
46 import code
47 import select
48 import signal
49 import termios
50
51 m, s = os.openpty()
52 sname = os.ttyname(s)
53
54 def noechoicanon(fd):
55 old = termios.tcgetattr(fd)
56 old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
57 # &~ termios.ISIG
58 termios.tcsetattr(fd, termios.TCSADRAIN, old)
59
60 # No echo or buffering over the pty
61 noechoicanon(s)
62
63 pid = os.fork()
64 if pid:
65 os.close(m)
66 oe_terminal("oepydevshell-internal.py %s %d" % (sname, pid), 'OpenEmbedded Developer PyShell', d)
67 os._exit(0)
68 else:
69 os.close(s)
70
71 os.dup2(m, sys.stdin.fileno())
72 os.dup2(m, sys.stdout.fileno())
73 os.dup2(m, sys.stderr.fileno())
74
75 bb.utils.nonblockingfd(sys.stdout)
76 bb.utils.nonblockingfd(sys.stderr)
77 bb.utils.nonblockingfd(sys.stdin)
78
79 _context = {
80 "os": os,
81 "bb": bb,
82 "time": time,
83 "d": d,
84 }
85
86 ps1 = "pydevshell> "
87 ps2 = "... "
88 buf = []
89 more = False
90
91 i = code.InteractiveInterpreter(locals=_context)
92 print("OE PyShell (PN = %s)\n" % d.getVar("PN"))
93
94 def prompt(more):
95 if more:
96 prompt = ps2
97 else:
98 prompt = ps1
99 sys.stdout.write(prompt)
100 sys.stdout.flush()
101
102 # Restore Ctrl+C since bitbake masks this
103 def signal_handler(signal, frame):
104 raise KeyboardInterrupt
105 signal.signal(signal.SIGINT, signal_handler)
106
107 child = None
108
109 prompt(more)
110 while True:
111 try:
112 try:
113 (r, _, _) = select.select([sys.stdin], [], [], 1)
114 if not r:
115 continue
116 line = sys.stdin.readline().strip()
117 if not line:
118 prompt(more)
119 continue
120 except EOFError as e:
121 sys.stdout.write("\n")
122 sys.stdout.flush()
123 except (OSError, IOError) as e:
124 if e.errno == 11:
125 continue
126 if e.errno == 5:
127 return
128 raise
129 else:
130 if not child:
131 child = int(line)
132 continue
133 buf.append(line)
134 source = "\n".join(buf)
135 more = i.runsource(source, "<pyshell>")
136 if not more:
137 buf = []
138 sys.stderr.flush()
139 prompt(more)
140 except KeyboardInterrupt:
141 i.write("\nKeyboardInterrupt\n")
142 buf = []
143 more = False
144 prompt(more)
145 except SystemExit:
146 # Easiest way to ensure everything exits
147 os.kill(child, signal.SIGTERM)
148 break
149
150python do_pydevshell() {
151 import signal
152
153 try:
154 pydevshell(d)
155 except SystemExit:
156 # Stop the SIGTERM above causing an error exit code
157 return
158 finally:
159 return
160}
161addtask pydevshell after do_patch
162
163do_pydevshell[nostamp] = "1"
164do_pydevshell[network] = "1"
diff --git a/meta/classes-global/insane.bbclass b/meta/classes-global/insane.bbclass
deleted file mode 100644
index fed8163c3e..0000000000
--- a/meta/classes-global/insane.bbclass
+++ /dev/null
@@ -1,1632 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# BB Class inspired by ebuild.sh
8#
9# This class will test files after installation for certain
10# security issues and other kind of issues.
11#
12# Checks we do:
13# -Check the ownership and permissions
14# -Check the RUNTIME path for the $TMPDIR
15# -Check if .la files wrongly point to workdir
16# -Check if .pc files wrongly point to workdir
17# -Check if packages contains .debug directories or .so files
18# where they should be in -dev or -dbg
19# -Check if config.log contains traces to broken autoconf tests
20# -Check invalid characters (non-utf8) on some package metadata
21# -Ensure that binaries in base_[bindir|sbindir|libdir] do not link
22# into exec_prefix
23# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
24# files under exec_prefix
25# -Check if the package name is upper case
26
27# These tests are required to be enabled and pass for Yocto Project Compatible Status
28# for a layer. To change this list, please contact the Yocto Project TSC.
29CHECKLAYER_REQUIRED_TESTS = "\
30 configure-gettext configure-unsafe debug-files dep-cmp expanded-d files-invalid \
31 host-user-contaminated incompatible-license infodir installed-vs-shipped invalid-chars \
32 invalid-packageconfig la \
33 license-checksum license-exception license-exists license-file-missing license-format license-no-generic license-syntax \
34 mime mime-xdg missing-update-alternatives multilib obsolete-license \
35 packages-list patch-fuzz patch-status perllocalpod perm-config perm-line perm-link recipe-naming \
36 pkgconfig pkgvarcheck pkgv-undefined pn-overrides shebang-size src-uri-bad symlink-to-sysroot \
37 unhandled-features-check unknown-configure-option unlisted-pkg-lics uppercase-pn useless-rpaths \
38 virtual-slash xorg-driver-abi"
39
40# Elect whether a given type of error is a warning or error, they may
41# have been set by other files.
42WARN_QA ?= "32bit-time native-last pep517-backend"
43ERROR_QA ?= "\
44 already-stripped arch buildpaths build-deps debug-deps dev-deps dev-elf dev-so empty-dirs file-rdeps \
45 ldflags libdir missing-ptest rpaths staticdev textrel version-going-backwards \
46 ${CHECKLAYER_REQUIRED_TESTS}"
47
48# Add usrmerge QA check based on distro feature
49ERROR_QA:append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
50WARN_QA:append:layer-core = " missing-metadata missing-maintainer"
51
52FAKEROOT_QA = "host-user-contaminated"
53FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
54enabled tests are listed here, the do_package_qa task will run under fakeroot."
55
56UNKNOWN_CONFIGURE_OPT_IGNORE ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --disable-static"
57
58# This is a list of directories that are expected to be empty.
59QA_EMPTY_DIRS ?= " \
60 /dev/pts \
61 /media \
62 /proc \
63 /run \
64 /tmp \
65 ${localstatedir}/run \
66 ${localstatedir}/volatile \
67"
68# It is possible to specify why a directory is expected to be empty by defining
69# QA_EMPTY_DIRS_RECOMMENDATION:<path>, which will then be included in the error
70# message if the directory is not empty. If it is not specified for a directory,
71# then "but it is expected to be empty" will be used.
72
73def package_qa_clean_path(path, d, pkg=None):
74 """
75 Remove redundant paths from the path for display. If pkg isn't set then
76 TMPDIR is stripped, otherwise PKGDEST/pkg is stripped.
77 """
78 if pkg:
79 path = path.replace(os.path.join(d.getVar("PKGDEST"), pkg), "/")
80 return path.replace(d.getVar("TMPDIR"), "/").replace("//", "/")
81
82QAPATHTEST[shebang-size] = "package_qa_check_shebang_size"
83def package_qa_check_shebang_size(path, name, d, elf):
84 global cpath
85
86 if elf or cpath.islink(path) or not cpath.isfile(path):
87 return
88
89 try:
90 with open(path, 'rb') as f:
91 stanza = f.readline(130)
92 except IOError:
93 return
94
95 if stanza.startswith(b'#!'):
96 try:
97 stanza.decode("utf-8")
98 except UnicodeDecodeError:
99 #If it is not a text file, it is not a script
100 return
101
102 if len(stanza) > 129:
103 oe.qa.handle_error("shebang-size", "%s: %s maximum shebang size exceeded, the maximum size is 128." % (name, package_qa_clean_path(path, d, name)), d)
104 return
105
106QAPATHTEST[libexec] = "package_qa_check_libexec"
107def package_qa_check_libexec(path,name, d, elf):
108
109 # Skip the case where the default is explicitly /usr/libexec
110 libexec = d.getVar('libexecdir')
111 if libexec == "/usr/libexec":
112 return
113
114 if 'libexec' in path.split(os.path.sep):
115 oe.qa.handle_error("libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d, name), libexec), d)
116
117QAPATHTEST[rpaths] = "package_qa_check_rpath"
118def package_qa_check_rpath(file, name, d, elf):
119 """
120 Check for dangerous RPATHs
121 """
122 if not elf:
123 return
124
125 bad_dirs = [d.getVar('BASE_WORKDIR'), d.getVar('STAGING_DIR_TARGET')]
126
127 phdrs = elf.run_objdump("-p", d)
128
129 import re
130 rpath_re = re.compile(r"\s+(?:RPATH|RUNPATH)\s+(.*)")
131 for line in phdrs.split("\n"):
132 m = rpath_re.match(line)
133 if m:
134 rpath = m.group(1)
135 for dir in bad_dirs:
136 if dir in rpath:
137 oe.qa.handle_error("rpaths", "%s: %s contains bad RPATH %s" % (name, package_qa_clean_path(file, d, name), rpath), d)
138
139QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
140def package_qa_check_useless_rpaths(file, name, d, elf):
141 """
142 Check for RPATHs that are useless but not dangerous
143 """
144 def rpath_eq(a, b):
145 return os.path.normpath(a) == os.path.normpath(b)
146
147 if not elf:
148 return
149
150 libdir = d.getVar("libdir")
151 base_libdir = d.getVar("base_libdir")
152
153 phdrs = elf.run_objdump("-p", d)
154
155 import re
156 rpath_re = re.compile(r"\s+(?:RPATH|RUNPATH)\s+(.*)")
157 for line in phdrs.split("\n"):
158 m = rpath_re.match(line)
159 if m:
160 rpath = m.group(1)
161 if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
162 # The dynamic linker searches both these places anyway. There is no point in
163 # looking there again.
164 oe.qa.handle_error("useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d, name), rpath), d)
165
166QAPATHTEST[dev-so] = "package_qa_check_dev"
167def package_qa_check_dev(path, name, d, elf):
168 """
169 Check for ".so" library symlinks in non-dev packages
170 """
171 global cpath
172 if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and cpath.islink(path):
173 oe.qa.handle_error("dev-so", "non -dev/-dbg/nativesdk- package %s contains symlink .so '%s'" % \
174 (name, package_qa_clean_path(path, d, name)), d)
175
176QAPATHTEST[dev-elf] = "package_qa_check_dev_elf"
177def package_qa_check_dev_elf(path, name, d, elf):
178 """
179 Check that -dev doesn't contain real shared libraries. The test has to
180 check that the file is not a link and is an ELF object as some recipes
181 install link-time .so files that are linker scripts.
182 """
183 global cpath
184 if name.endswith("-dev") and path.endswith(".so") and not cpath.islink(path) and elf:
185 oe.qa.handle_error("dev-elf", "-dev package %s contains non-symlink .so '%s'" % \
186 (name, package_qa_clean_path(path, d, name)), d)
187
188QAPATHTEST[staticdev] = "package_qa_check_staticdev"
189def package_qa_check_staticdev(path, name, d, elf):
190 """
191 Check for ".a" library in non-staticdev packages
192 There are a number of exceptions to this rule, -pic packages can contain
193 static libraries, the _nonshared.a belong with their -dev packages and
194 libgcc.a, libgcov.a will be skipped in their packages
195 """
196
197 if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a") and not '/usr/lib/debug-static/' in path and not '/.debug-static/' in path:
198 oe.qa.handle_error("staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
199 (name, package_qa_clean_path(path, d, name)), d)
200
201QAPATHTEST[mime] = "package_qa_check_mime"
202def package_qa_check_mime(path, name, d, elf):
203 """
204 Check if package installs mime types to /usr/share/mime/packages
205 while no inheriting mime.bbclass
206 """
207
208 if d.getVar("datadir") + "/mime/packages" in path and path.endswith('.xml') and not bb.data.inherits_class("mime", d):
209 oe.qa.handle_error("mime", "package contains mime types but does not inherit mime: %s path '%s'" % \
210 (name, package_qa_clean_path(path, d, name)), d)
211
212QAPATHTEST[mime-xdg] = "package_qa_check_mime_xdg"
213def package_qa_check_mime_xdg(path, name, d, elf):
214 """
215 Check if package installs desktop file containing MimeType and requires
216 mime-types.bbclass to create /usr/share/applications/mimeinfo.cache
217 """
218
219 if d.getVar("datadir") + "/applications" in path and path.endswith('.desktop') and not bb.data.inherits_class("mime-xdg", d):
220 mime_type_found = False
221 try:
222 with open(path, 'r') as f:
223 for line in f.read().split('\n'):
224 if 'MimeType' in line:
225 mime_type_found = True
226 break;
227 except:
228 # At least libreoffice installs symlinks with absolute paths that are dangling here.
229 # We could implement some magic but for few (one) recipes it is not worth the effort so just warn:
230 wstr = "%s cannot open %s - is it a symlink with absolute path?\n" % (name, package_qa_clean_path(path, d, name))
231 wstr += "Please check if (linked) file contains key 'MimeType'.\n"
232 pkgname = name
233 if name == d.getVar('PN'):
234 pkgname = '${PN}'
235 wstr += "If yes: add \'inhert mime-xdg\' and \'MIME_XDG_PACKAGES += \"%s\"\' / if no add \'INSANE_SKIP:%s += \"mime-xdg\"\' to recipe." % (pkgname, pkgname)
236 oe.qa.handle_error("mime-xdg", wstr, d)
237 if mime_type_found:
238 oe.qa.handle_error("mime-xdg", "%s: contains desktop file with key 'MimeType' but does not inhert mime-xdg: %s" % \
239 (name, package_qa_clean_path(path, d, name)), d)
240
241def package_qa_check_libdir(d):
242 """
243 Check for wrong library installation paths. For instance, catch
244 recipes installing /lib/bar.so when ${base_libdir}="lib32" or
245 installing in /usr/lib64 when ${libdir}="/usr/lib"
246 """
247 import re
248
249 pkgdest = d.getVar('PKGDEST')
250 base_libdir = d.getVar("base_libdir") + os.sep
251 libdir = d.getVar("libdir") + os.sep
252 libexecdir = d.getVar("libexecdir") + os.sep
253 exec_prefix = d.getVar("exec_prefix") + os.sep
254
255 messages = []
256
257 # The re's are purposely fuzzy, as some there are some .so.x.y.z files
258 # that don't follow the standard naming convention. It checks later
259 # that they are actual ELF files
260 lib_re = re.compile(r"^/lib.+\.so(\..+)?$")
261 exec_re = re.compile(r"^%s.*/lib.+\.so(\..+)?$" % exec_prefix)
262
263 for root, dirs, files in os.walk(pkgdest):
264 if root == pkgdest:
265 # Skip subdirectories for any packages with libdir in INSANE_SKIP
266 skippackages = []
267 for package in dirs:
268 if 'libdir' in (d.getVar('INSANE_SKIP:' + package) or "").split():
269 bb.note("Package %s skipping libdir QA test" % (package))
270 skippackages.append(package)
271 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory' and package.endswith("-dbg"):
272 bb.note("Package %s skipping libdir QA test for PACKAGE_DEBUG_SPLIT_STYLE equals debug-file-directory" % (package))
273 skippackages.append(package)
274 for package in skippackages:
275 dirs.remove(package)
276 for file in files:
277 full_path = os.path.join(root, file)
278 rel_path = os.path.relpath(full_path, pkgdest)
279 if os.sep in rel_path:
280 package, rel_path = rel_path.split(os.sep, 1)
281 rel_path = os.sep + rel_path
282 if lib_re.match(rel_path):
283 if base_libdir not in rel_path:
284 # make sure it's an actual ELF file
285 elf = oe.qa.ELFFile(full_path)
286 try:
287 elf.open()
288 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
289 except (oe.qa.NotELFFileError, FileNotFoundError):
290 pass
291 if exec_re.match(rel_path):
292 if libdir not in rel_path and libexecdir not in rel_path:
293 # make sure it's an actual ELF file
294 elf = oe.qa.ELFFile(full_path)
295 try:
296 elf.open()
297 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
298 except (oe.qa.NotELFFileError, FileNotFoundError):
299 pass
300
301 if messages:
302 oe.qa.handle_error("libdir", "\n".join(messages), d)
303
304QAPATHTEST[debug-files] = "package_qa_check_dbg"
305def package_qa_check_dbg(path, name, d, elf):
306 """
307 Check for ".debug" files or directories outside of the dbg package
308 """
309
310 if not "-dbg" in name and not "-ptest" in name:
311 if '.debug' in path.split(os.path.sep):
312 oe.qa.handle_error("debug-files", "%s: non debug package contains .debug directory %s" % \
313 (name, package_qa_clean_path(path, d, name)), d)
314
315QAPATHTEST[arch] = "package_qa_check_arch"
316def package_qa_check_arch(path,name,d, elf):
317 """
318 Check if archs are compatible
319 """
320 import re, oe.elf
321
322 if not elf:
323 return
324
325 host_os = d.getVar('HOST_OS')
326 host_arch = d.getVar('HOST_ARCH')
327 provides = d.getVar('PROVIDES')
328
329 if host_arch == "allarch":
330 oe.qa.handle_error("arch", "%s: inherits the allarch class, but has architecture-specific binaries %s" % \
331 (name, package_qa_clean_path(path, d, name)), d)
332 return
333
334 # If this throws an exception, the machine_dict needs expanding
335 (expected_machine, expected_osabi, expected_abiversion, expected_littleendian, expected_bits) \
336 = oe.elf.machine_dict(d)[host_os][host_arch]
337
338 actual_machine = elf.machine()
339 actual_bits = elf.abiSize()
340 actual_littleendian = elf.isLittleEndian()
341
342 # BPF don't match the target
343 if oe.qa.elf_machine_to_string(actual_machine) == "BPF":
344 return
345
346 # These targets have 32-bit userspace but 64-bit kernel, so fudge the expected values
347 if (("virtual/kernel" in provides) or bb.data.inherits_class("module", d)) and (host_os in ("linux-gnux32", "linux-muslx32", "linux-gnu_ilp32") or re.match(r'mips64.*32', d.getVar('DEFAULTTUNE'))):
348 expected_bits = 64
349
350 # Check the architecture and endiannes of the binary
351 if expected_machine != actual_machine:
352 oe.qa.handle_error("arch", "Architecture did not match (%s, expected %s) in %s" % \
353 (oe.qa.elf_machine_to_string(actual_machine), oe.qa.elf_machine_to_string(expected_machine), package_qa_clean_path(path, d, name)), d)
354
355 if expected_bits != actual_bits:
356 oe.qa.handle_error("arch", "Bit size did not match (%d, expected %d) in %s" % \
357 (actual_bits, expected_bits, package_qa_clean_path(path, d, name)), d)
358
359 if expected_littleendian != actual_littleendian:
360 oe.qa.handle_error("arch", "Endiannes did not match (%d, expected %d) in %s" % \
361 (actual_littleendian, expected_littleendian, package_qa_clean_path(path, d, name)), d)
362package_qa_check_arch[vardepsexclude] = "DEFAULTTUNE"
363
364QAPATHTEST[desktop] = "package_qa_check_desktop"
365def package_qa_check_desktop(path, name, d, elf):
366 """
367 Run all desktop files through desktop-file-validate.
368 """
369 if path.endswith(".desktop"):
370 desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE'),'desktop-file-validate')
371 output = os.popen("%s %s" % (desktop_file_validate, path))
372 # This only produces output on errors
373 for l in output:
374 oe.qa.handle_error("desktop", "Desktop file issue: " + l.strip(), d)
375
376QAPATHTEST[textrel] = "package_qa_textrel"
377def package_qa_textrel(path, name, d, elf):
378 """
379 Check if the binary contains relocations in .text
380 """
381
382 if not elf:
383 return
384
385 phdrs = elf.run_objdump("-p", d)
386
387 import re
388 textrel_re = re.compile(r"\s+TEXTREL\s+")
389 for line in phdrs.split("\n"):
390 if textrel_re.match(line):
391 path = package_qa_clean_path(path, d, name)
392 oe.qa.handle_error("textrel", "%s: ELF binary %s has relocations in .text" % (name, path), d)
393 return
394
395QAPATHTEST[ldflags] = "package_qa_hash_style"
396def package_qa_hash_style(path, name, d, elf):
397 """
398 Check if the binary has the right hash style...
399 """
400
401 if not elf:
402 return
403
404 gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS')
405 if not gnu_hash:
406 gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS')
407 if not gnu_hash:
408 return
409
410 sane = False
411 has_syms = False
412
413 phdrs = elf.run_objdump("-p", d)
414
415 # If this binary has symbols, we expect it to have GNU_HASH too.
416 for line in phdrs.split("\n"):
417 if "SYMTAB" in line:
418 has_syms = True
419 if "GNU_HASH" in line or "MIPS_XHASH" in line:
420 sane = True
421 if ("[mips32]" in line or "[mips64]" in line) and d.getVar('TCLIBC') == "musl":
422 sane = True
423 if has_syms and not sane:
424 path = package_qa_clean_path(path, d, name)
425 oe.qa.handle_error("ldflags", "File %s in package %s doesn't have GNU_HASH (didn't pass LDFLAGS?)" % (path, name), d)
426package_qa_hash_style[vardepsexclude] = "TCLIBC"
427
428
429QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
430def package_qa_check_buildpaths(path, name, d, elf):
431 """
432 Check for build paths inside target files and error if paths are not
433 explicitly ignored.
434 """
435 import stat
436 # Ignore symlinks/devs/fifos
437 mode = os.lstat(path).st_mode
438 if stat.S_ISLNK(mode) or stat.S_ISBLK(mode) or stat.S_ISFIFO(mode) or stat.S_ISCHR(mode) or stat.S_ISSOCK(mode):
439 return
440
441 tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
442 with open(path, 'rb') as f:
443 file_content = f.read()
444 if tmpdir in file_content:
445 path = package_qa_clean_path(path, d, name)
446 oe.qa.handle_error("buildpaths", "File %s in package %s contains reference to TMPDIR" % (path, name), d)
447
448
449QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
450def package_qa_check_xorg_driver_abi(path, name, d, elf):
451 """
452 Check that all packages containing Xorg drivers have ABI dependencies
453 """
454
455 # Skip dev, dbg or nativesdk packages
456 if name.endswith("-dev") or name.endswith("-dbg") or name.startswith("nativesdk-"):
457 return
458
459 driverdir = d.expand("${libdir}/xorg/modules/drivers/")
460 if driverdir in path and path.endswith(".so"):
461 mlprefix = d.getVar('MLPREFIX') or ''
462 for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + name) or ""):
463 if rdep.startswith("%sxorg-abi-" % mlprefix):
464 return
465 oe.qa.handle_error("xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)), d)
466
467QAPATHTEST[infodir] = "package_qa_check_infodir"
468def package_qa_check_infodir(path, name, d, elf):
469 """
470 Check that /usr/share/info/dir isn't shipped in a particular package
471 """
472 infodir = d.expand("${infodir}/dir")
473
474 if infodir in path:
475 oe.qa.handle_error("infodir", "The %s file is not meant to be shipped in a particular package." % infodir, d)
476
477QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
478def package_qa_check_symlink_to_sysroot(path, name, d, elf):
479 """
480 Check that the package doesn't contain any absolute symlinks to the sysroot.
481 """
482 global cpath
483 if cpath.islink(path):
484 target = os.readlink(path)
485 if os.path.isabs(target):
486 tmpdir = d.getVar('TMPDIR')
487 if target.startswith(tmpdir):
488 path = package_qa_clean_path(path, d, name)
489 oe.qa.handle_error("symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (path, name), d)
490
491QAPATHTEST[32bit-time] = "check_32bit_symbols"
492def check_32bit_symbols(path, packagename, d, elf):
493 """
494 Check that ELF files do not use any 32 bit time APIs from glibc.
495 """
496 thirtytwo_bit_time_archs = {'arm','armeb','mipsarcho32','powerpc','x86'}
497 overrides = set(d.getVar('OVERRIDES').split(':'))
498 if not (thirtytwo_bit_time_archs & overrides):
499 return
500
501 import re
502 # This list is manually constructed by searching the image folder of the
503 # glibc recipe for __USE_TIME_BITS64. There is no good way to do this
504 # automatically.
505 api32 = {
506 # /usr/include/time.h
507 "clock_getres", "clock_gettime", "clock_nanosleep", "clock_settime",
508 "ctime", "ctime_r", "difftime", "gmtime", "gmtime_r", "localtime",
509 "localtime_r", "mktime", "nanosleep", "time", "timegm", "timelocal",
510 "timer_gettime", "timer_settime", "timespec_get", "timespec_getres",
511 # /usr/include/bits/time.h
512 "clock_adjtime",
513 # /usr/include/signal.h
514 "sigtimedwait",
515 # /usr/include/sys/time.h
516 "adjtime",
517 "futimes", "futimesat", "getitimer", "gettimeofday", "lutimes",
518 "setitimer", "settimeofday", "utimes",
519 # /usr/include/sys/timex.h
520 "adjtimex", "ntp_adjtime", "ntp_gettime", "ntp_gettimex",
521 # /usr/include/sys/wait.h
522 "wait3", "wait4",
523 # /usr/include/sys/stat.h
524 "fstat", "fstat64", "fstatat", "fstatat64", "futimens", "lstat",
525 "lstat64", "stat", "stat64", "utimensat",
526 # /usr/include/sys/poll.h
527 "ppoll",
528 # /usr/include/sys/resource.h
529 "getrusage",
530 # /usr/include/sys/ioctl.h
531 "ioctl",
532 # /usr/include/sys/select.h
533 "select", "pselect",
534 # /usr/include/sys/prctl.h
535 "prctl",
536 # /usr/include/sys/epoll.h
537 "epoll_pwait2",
538 # /usr/include/sys/timerfd.h
539 "timerfd_gettime", "timerfd_settime",
540 # /usr/include/sys/socket.h
541 "getsockopt", "recvmmsg", "recvmsg", "sendmmsg", "sendmsg",
542 "setsockopt",
543 # /usr/include/sys/msg.h
544 "msgctl",
545 # /usr/include/sys/sem.h
546 "semctl", "semtimedop",
547 # /usr/include/sys/shm.h
548 "shmctl",
549 # /usr/include/pthread.h
550 "pthread_clockjoin_np", "pthread_cond_clockwait",
551 "pthread_cond_timedwait", "pthread_mutex_clocklock",
552 "pthread_mutex_timedlock", "pthread_rwlock_clockrdlock",
553 "pthread_rwlock_clockwrlock", "pthread_rwlock_timedrdlock",
554 "pthread_rwlock_timedwrlock", "pthread_timedjoin_np",
555 # /usr/include/semaphore.h
556 "sem_clockwait", "sem_timedwait",
557 # /usr/include/threads.h
558 "cnd_timedwait", "mtx_timedlock", "thrd_sleep",
559 # /usr/include/aio.h
560 "aio_cancel", "aio_error", "aio_read", "aio_return", "aio_suspend",
561 "aio_write", "lio_listio",
562 # /usr/include/mqueue.h
563 "mq_timedreceive", "mq_timedsend",
564 # /usr/include/glob.h
565 "glob", "glob64", "globfree", "globfree64",
566 # /usr/include/sched.h
567 "sched_rr_get_interval",
568 # /usr/include/fcntl.h
569 "fcntl", "fcntl64",
570 # /usr/include/utime.h
571 "utime",
572 # /usr/include/ftw.h
573 "ftw", "ftw64", "nftw", "nftw64",
574 # /usr/include/fts.h
575 "fts64_children", "fts64_close", "fts64_open", "fts64_read",
576 "fts64_set", "fts_children", "fts_close", "fts_open", "fts_read",
577 "fts_set",
578 # /usr/include/netdb.h
579 "gai_suspend",
580 }
581
582 ptrn = re.compile(
583 r'''
584 (?P<value>[\da-fA-F]+) \s+
585 (?P<flags>[lgu! ][w ][C ][W ][Ii ][dD ]F) \s+
586 (?P<section>\*UND\*) \s+
587 (?P<alignment>(?P<size>[\da-fA-F]+)) \s+
588 (?P<symbol>
589 ''' +
590 r'(?P<notag>' + r'|'.join(sorted(api32)) + r')' +
591 r'''
592 (@+(?P<tag>GLIBC_\d+\.\d+\S*)))
593 ''', re.VERBOSE
594 )
595
596 # elf is a oe.qa.ELFFile object
597 if elf:
598 phdrs = elf.run_objdump("-tw", d)
599 syms = re.finditer(ptrn, phdrs)
600 usedapis = {sym.group('notag') for sym in syms}
601 if usedapis:
602 elfpath = package_qa_clean_path(path, d, packagename)
603 # Remove any .debug dir, heuristic that probably works
604 # At this point, any symbol information is stripped into the debug
605 # package, so that is the only place we will find them.
606 elfpath = elfpath.replace('.debug/', '')
607 allowed = "32bit-time" in (d.getVar('INSANE_SKIP') or '').split()
608 if not allowed:
609 msgformat = elfpath + " uses 32-bit api '%s'"
610 for sym in usedapis:
611 oe.qa.handle_error('32bit-time', msgformat % sym, d)
612 oe.qa.handle_error('32bit-time', 'Suppress with INSANE_SKIP = "32bit-time"', d)
613check_32bit_symbols[vardepsexclude] = "OVERRIDES"
614
615# Check license variables
616do_populate_lic[postfuncs] += "populate_lic_qa_checksum"
617python populate_lic_qa_checksum() {
618 """
619 Check for changes in the license files.
620 """
621
622 lic_files = d.getVar('LIC_FILES_CHKSUM') or ''
623 lic = d.getVar('LICENSE')
624 pn = d.getVar('PN')
625
626 if lic == "CLOSED":
627 return
628
629 if not lic_files and d.getVar('SRC_URI'):
630 oe.qa.handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
631
632 srcdir = d.getVar('S')
633 corebase_licensefile = d.getVar('COREBASE') + "/LICENSE"
634 for url in lic_files.split():
635 try:
636 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
637 except bb.fetch.MalformedUrl:
638 oe.qa.handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
639 continue
640 srclicfile = os.path.join(srcdir, path)
641 if not os.path.isfile(srclicfile):
642 oe.qa.handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
643 continue
644
645 if (srclicfile == corebase_licensefile):
646 bb.warn("${COREBASE}/LICENSE is not a valid license file, please use '${COMMON_LICENSE_DIR}/MIT' for a MIT License file in LIC_FILES_CHKSUM. This will become an error in the future")
647
648 recipemd5 = parm.get('md5', '')
649 beginline, endline = 0, 0
650 if 'beginline' in parm:
651 beginline = int(parm['beginline'])
652 if 'endline' in parm:
653 endline = int(parm['endline'])
654
655 if (not beginline) and (not endline):
656 md5chksum = bb.utils.md5_file(srclicfile)
657 with open(srclicfile, 'r', errors='replace') as f:
658 license = f.read().splitlines()
659 else:
660 with open(srclicfile, 'rb') as f:
661 import hashlib
662 lineno = 0
663 license = []
664 try:
665 m = hashlib.new('MD5', usedforsecurity=False)
666 except TypeError:
667 m = hashlib.new('MD5')
668 for line in f:
669 lineno += 1
670 if (lineno >= beginline):
671 if ((lineno <= endline) or not endline):
672 m.update(line)
673 license.append(line.decode('utf-8', errors='replace').rstrip())
674 else:
675 break
676 md5chksum = m.hexdigest()
677 if recipemd5 == md5chksum:
678 bb.note (pn + ": md5 checksum matched for ", url)
679 else:
680 if recipemd5:
681 msg = pn + ": The LIC_FILES_CHKSUM does not match for " + url
682 msg = msg + "\n" + pn + ": The new md5 checksum is " + md5chksum
683 max_lines = int(d.getVar('QA_MAX_LICENSE_LINES') or 20)
684 if not license or license[-1] != '':
685 # Ensure that our license text ends with a line break
686 # (will be added with join() below).
687 license.append('')
688 remove = len(license) - max_lines
689 if remove > 0:
690 start = max_lines // 2
691 end = start + remove - 1
692 del license[start:end]
693 license.insert(start, '...')
694 msg = msg + "\n" + pn + ": Here is the selected license text:" + \
695 "\n" + \
696 "{:v^70}".format(" beginline=%d " % beginline if beginline else "") + \
697 "\n" + "\n".join(license) + \
698 "{:^^70}".format(" endline=%d " % endline if endline else "")
699 if beginline:
700 if endline:
701 srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
702 else:
703 srcfiledesc = "%s (beginning on line %d)" % (srclicfile, beginline)
704 elif endline:
705 srcfiledesc = "%s (ending on line %d)" % (srclicfile, endline)
706 else:
707 srcfiledesc = srclicfile
708 msg = msg + "\n" + pn + ": Check if the license information has changed in %s to verify that the LICENSE value \"%s\" remains valid" % (srcfiledesc, lic)
709
710 else:
711 msg = pn + ": LIC_FILES_CHKSUM is not specified for " + url
712 msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
713 oe.qa.handle_error("license-checksum", msg, d)
714
715 oe.qa.exit_if_errors(d)
716}
717
718def qa_check_staged(path,d):
719 """
720 Check staged la and pc files for common problems like references to the work
721 directory.
722
723 As this is run after every stage we should be able to find the one
724 responsible for the errors easily even if we look at every .pc and .la file.
725 """
726
727 tmpdir = d.getVar('TMPDIR')
728 workdir = os.path.join(tmpdir, "work")
729 recipesysroot = d.getVar("RECIPE_SYSROOT")
730
731 if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
732 pkgconfigcheck = workdir
733 else:
734 pkgconfigcheck = tmpdir
735
736 skip = (d.getVar('INSANE_SKIP') or "").split()
737 skip_la = False
738 if 'la' in skip:
739 bb.note("Recipe %s skipping qa checking: la" % d.getVar('PN'))
740 skip_la = True
741
742 skip_pkgconfig = False
743 if 'pkgconfig' in skip:
744 bb.note("Recipe %s skipping qa checking: pkgconfig" % d.getVar('PN'))
745 skip_pkgconfig = True
746
747 skip_shebang_size = False
748 if 'shebang-size' in skip:
749 bb.note("Recipe %s skipping qa checkking: shebang-size" % d.getVar('PN'))
750 skip_shebang_size = True
751
752 # find all .la and .pc files
753 # read the content
754 # and check for stuff that looks wrong
755 for root, dirs, files in os.walk(path):
756 for file in files:
757 path = os.path.join(root,file)
758 if file.endswith(".la") and not skip_la:
759 with open(path) as f:
760 file_content = f.read()
761 file_content = file_content.replace(recipesysroot, "")
762 if workdir in file_content:
763 error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
764 oe.qa.handle_error("la", error_msg, d)
765 elif file.endswith(".pc") and not skip_pkgconfig:
766 with open(path) as f:
767 file_content = f.read()
768 file_content = file_content.replace(recipesysroot, "")
769 if pkgconfigcheck in file_content:
770 error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
771 oe.qa.handle_error("pkgconfig", error_msg, d)
772
773 if not skip_shebang_size:
774 global cpath
775 cpath = oe.cachedpath.CachedPath()
776 package_qa_check_shebang_size(path, "", d, None)
777 cpath = None
778
779# Walk over all files in a directory and call func
780def package_qa_walk(checkfuncs, package, d):
781 global cpath
782
783 elves = {}
784 for path in pkgfiles[package]:
785 elf = None
786 if cpath.isfile(path) and not cpath.islink(path):
787 elf = oe.qa.ELFFile(path)
788 try:
789 elf.open()
790 elf.close()
791 except oe.qa.NotELFFileError:
792 elf = None
793 if elf:
794 elves[path] = elf
795
796 def prepopulate_objdump_p(elf, d):
797 output = elf.run_objdump("-p", d)
798 return (elf.name, output)
799
800 results = oe.utils.multiprocess_launch(prepopulate_objdump_p, elves.values(), d, extraargs=(d,))
801 for item in results:
802 elves[item[0]].set_objdump("-p", item[1])
803
804 for path in pkgfiles[package]:
805 elf = elves.get(path)
806 if elf:
807 elf.open()
808 for func in checkfuncs:
809 func(path, package, d, elf)
810 if elf:
811 elf.close()
812
813def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
814 # Don't do this check for kernel/module recipes, there aren't too many debug/development
815 # packages and you can get false positives e.g. on kernel-module-lirc-dev
816 if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
817 return
818
819 if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg:
820 localdata = bb.data.createCopy(d)
821 localdata.setVar('OVERRIDES', localdata.getVar('OVERRIDES') + ':' + pkg)
822
823 # Now check the RDEPENDS
824 rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS') or "")
825
826 # Now do the sanity check!!!
827 if "build-deps" not in skip:
828 def check_rdep(rdep_data, possible_pn):
829 if rdep_data and "PN" in rdep_data:
830 possible_pn.add(rdep_data["PN"])
831 return rdep_data["PN"] in taskdeps
832 return False
833
834 for rdepend in rdepends:
835 if rdepend.endswith("-dbg") and "debug-deps" not in skip:
836 error_msg = "%s rdepends on %s" % (pkg,rdepend)
837 oe.qa.handle_error("debug-deps", error_msg, d)
838 if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
839 error_msg = "%s rdepends on %s" % (pkg, rdepend)
840 oe.qa.handle_error("dev-deps", error_msg, d)
841 if rdepend not in packages:
842 possible_pn = set()
843 rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
844 if check_rdep(rdep_data, possible_pn):
845 continue
846
847 if any(check_rdep(rdep_data, possible_pn) for _, rdep_data in oe.packagedata.foreach_runtime_provider_pkgdata(d, rdepend)):
848 continue
849
850 if possible_pn:
851 error_msg = "%s rdepends on %s, but it isn't a build dependency, missing one of %s in DEPENDS or PACKAGECONFIG?" % (pkg, rdepend, ", ".join(possible_pn))
852 else:
853 error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
854 oe.qa.handle_error("build-deps", error_msg, d)
855
856 if "file-rdeps" not in skip:
857 ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
858 if bb.utils.contains('DISTRO_FEATURES', 'usrmerge', True, False, d):
859 ignored_file_rdeps |= set(['/usr/bin/sh'])
860 if bb.data.inherits_class('nativesdk', d):
861 ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl', 'perl'])
862 if bb.utils.contains('DISTRO_FEATURES', 'usrmerge', True, False, d):
863 ignored_file_rdeps |= set(['/usr/bin/bash'])
864 # For Saving the FILERDEPENDS
865 filerdepends = {}
866 rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
867 for key in rdep_data:
868 if key.startswith("FILERDEPENDS:"):
869 for subkey in bb.utils.explode_deps(rdep_data[key]):
870 if subkey not in ignored_file_rdeps and \
871 not subkey.startswith('perl('):
872 # We already know it starts with FILERDEPENDS_
873 filerdepends[subkey] = key[13:]
874
875 if filerdepends:
876 done = rdepends[:]
877 # Add the rprovides of itself
878 if pkg not in done:
879 done.insert(0, pkg)
880
881 # The python is not a package, but python-core provides it, so
882 # skip checking /usr/bin/python if python is in the rdeps, in
883 # case there is a RDEPENDS:pkg = "python" in the recipe.
884 for py in [ d.getVar('MLPREFIX') + "python", "python" ]:
885 if py in done:
886 filerdepends.pop("/usr/bin/python",None)
887 done.remove(py)
888 for rdep in done:
889 # The file dependencies may contain package names, e.g.,
890 # perl
891 filerdepends.pop(rdep,None)
892
893 for _, rdep_data in oe.packagedata.foreach_runtime_provider_pkgdata(d, rdep, True):
894 for key in rdep_data:
895 if key.startswith("FILERPROVIDES:") or key.startswith("RPROVIDES:"):
896 for subkey in bb.utils.explode_deps(rdep_data[key]):
897 filerdepends.pop(subkey,None)
898 # Add the files list to the rprovides
899 if key.startswith("FILES_INFO:"):
900 # Use eval() to make it as a dict
901 for subkey in eval(rdep_data[key]):
902 filerdepends.pop(subkey,None)
903
904 if not filerdepends:
905 # Break if all the file rdepends are met
906 break
907 if filerdepends:
908 for key in filerdepends:
909 error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS:%s?" % \
910 (filerdepends[key].replace(":%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
911 oe.qa.handle_error("file-rdeps", error_msg, d)
912package_qa_check_rdepends[vardepsexclude] = "OVERRIDES"
913
914def package_qa_check_deps(pkg, pkgdest, d):
915
916 localdata = bb.data.createCopy(d)
917 localdata.setVar('OVERRIDES', pkg)
918
919 def check_valid_deps(var):
920 try:
921 rvar = bb.utils.explode_dep_versions2(localdata.getVar(var) or "")
922 except ValueError as e:
923 bb.fatal("%s:%s: %s" % (var, pkg, e))
924 for dep in rvar:
925 for v in rvar[dep]:
926 if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
927 error_msg = "%s:%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
928 oe.qa.handle_error("dep-cmp", error_msg, d)
929
930 check_valid_deps('RDEPENDS')
931 check_valid_deps('RRECOMMENDS')
932 check_valid_deps('RSUGGESTS')
933 check_valid_deps('RPROVIDES')
934 check_valid_deps('RREPLACES')
935 check_valid_deps('RCONFLICTS')
936
937QAPKGTEST[usrmerge] = "package_qa_check_usrmerge"
938def package_qa_check_usrmerge(pkg, d):
939 global cpath
940 pkgdest = d.getVar('PKGDEST')
941 pkg_dir = pkgdest + os.sep + pkg + os.sep
942 merged_dirs = ['bin', 'sbin', 'lib'] + d.getVar('MULTILIB_VARIANTS').split()
943 for f in merged_dirs:
944 if cpath.exists(pkg_dir + f) and not cpath.islink(pkg_dir + f):
945 msg = "%s package is not obeying usrmerge distro feature. /%s should be relocated to /usr." % (pkg, f)
946 oe.qa.handle_error("usrmerge", msg, d)
947 return
948
949QAPKGTEST[perllocalpod] = "package_qa_check_perllocalpod"
950def package_qa_check_perllocalpod(pkg, d):
951 """
952 Check that the recipe didn't ship a perlocal.pod file, which shouldn't be
953 installed in a distribution package. cpan.bbclass sets NO_PERLLOCAL=1 to
954 handle this for most recipes.
955 """
956 import glob
957 pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
958 podpath = oe.path.join(pkgd, d.getVar("libdir"), "perl*", "*", "*", "perllocal.pod")
959
960 matches = glob.glob(podpath)
961 if matches:
962 matches = [package_qa_clean_path(path, d, pkg) for path in matches]
963 msg = "%s contains perllocal.pod (%s), should not be installed" % (pkg, " ".join(matches))
964 oe.qa.handle_error("perllocalpod", msg, d)
965
966QAPKGTEST[expanded-d] = "package_qa_check_expanded_d"
967def package_qa_check_expanded_d(package, d):
968 """
969 Check for the expanded D (${D}) value in pkg_* and FILES
970 variables, warn the user to use it correctly.
971 """
972 expanded_d = d.getVar('D')
973
974 for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
975 bbvar = d.getVar(var + ":" + package) or ""
976 if expanded_d in bbvar:
977 if var == 'FILES':
978 oe.qa.handle_error("expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package, d)
979 else:
980 oe.qa.handle_error("expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package), d)
981
982QAPKGTEST[unlisted-pkg-lics] = "package_qa_check_unlisted_pkg_lics"
983def package_qa_check_unlisted_pkg_lics(package, d):
984 """
985 Check that all licenses for a package are among the licenses for the recipe.
986 """
987 pkg_lics = d.getVar('LICENSE:' + package)
988 if not pkg_lics:
989 return
990
991 recipe_lics_set = oe.license.list_licenses(d.getVar('LICENSE'))
992 package_lics = oe.license.list_licenses(pkg_lics)
993 unlisted = package_lics - recipe_lics_set
994 if unlisted:
995 oe.qa.handle_error("unlisted-pkg-lics",
996 "LICENSE:%s includes licenses (%s) that are not "
997 "listed in LICENSE" % (package, ' '.join(unlisted)), d)
998 obsolete = set(oe.license.obsolete_license_list()) & package_lics - recipe_lics_set
999 if obsolete:
1000 oe.qa.handle_error("obsolete-license",
1001 "LICENSE:%s includes obsolete licenses %s" % (package, ' '.join(obsolete)), d)
1002
1003QAPKGTEST[empty-dirs] = "package_qa_check_empty_dirs"
1004def package_qa_check_empty_dirs(pkg, d):
1005 """
1006 Check for the existence of files in directories that are expected to be
1007 empty.
1008 """
1009
1010 global cpath
1011 pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
1012 for dir in (d.getVar('QA_EMPTY_DIRS') or "").split():
1013 empty_dir = oe.path.join(pkgd, dir)
1014 if cpath.exists(empty_dir) and os.listdir(empty_dir):
1015 recommendation = (d.getVar('QA_EMPTY_DIRS_RECOMMENDATION:' + dir) or
1016 "but it is expected to be empty")
1017 msg = "%s installs files in %s, %s" % (pkg, dir, recommendation)
1018 oe.qa.handle_error("empty-dirs", msg, d)
1019
1020def package_qa_check_encoding(keys, encode, d):
1021 def check_encoding(key, enc):
1022 sane = True
1023 value = d.getVar(key)
1024 if value:
1025 try:
1026 s = value.encode(enc)
1027 except UnicodeDecodeError as e:
1028 error_msg = "%s has non %s characters" % (key,enc)
1029 sane = False
1030 oe.qa.handle_error("invalid-chars", error_msg, d)
1031 return sane
1032
1033 for key in keys:
1034 sane = check_encoding(key, encode)
1035 if not sane:
1036 break
1037
1038HOST_USER_UID := "${@os.getuid()}"
1039HOST_USER_GID := "${@os.getgid()}"
1040
1041QAPATHTEST[host-user-contaminated] = "package_qa_check_host_user"
1042def package_qa_check_host_user(path, name, d, elf):
1043 """Check for paths outside of /home which are owned by the user running bitbake."""
1044 global cpath
1045
1046 if not cpath.lexists(path):
1047 return
1048
1049 dest = d.getVar('PKGDEST')
1050 pn = d.getVar('PN')
1051 home = os.path.join(dest, name, 'home')
1052 if path == home or path.startswith(home + os.sep):
1053 return
1054
1055 try:
1056 stat = os.lstat(path)
1057 except OSError as exc:
1058 import errno
1059 if exc.errno != errno.ENOENT:
1060 raise
1061 else:
1062 check_uid = int(d.getVar('HOST_USER_UID'))
1063 if stat.st_uid == check_uid:
1064 oe.qa.handle_error("host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_uid), d)
1065
1066 check_gid = int(d.getVar('HOST_USER_GID'))
1067 if stat.st_gid == check_gid:
1068 oe.qa.handle_error("host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_gid), d)
1069package_qa_check_host_user[vardepsexclude] = "HOST_USER_UID HOST_USER_GID"
1070
1071QARECIPETEST[unhandled-features-check] = "package_qa_check_unhandled_features_check"
1072def package_qa_check_unhandled_features_check(pn, d):
1073 if not bb.data.inherits_class('features_check', d):
1074 var_set = False
1075 for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
1076 for var in ['ANY_OF_' + kind + '_FEATURES', 'REQUIRED_' + kind + '_FEATURES', 'CONFLICT_' + kind + '_FEATURES']:
1077 if d.getVar(var) is not None or d.hasOverrides(var):
1078 var_set = True
1079 if var_set:
1080 oe.qa.handle_error("unhandled-features-check", "%s: recipe doesn't inherit features_check" % pn, d)
1081
1082QARECIPETEST[missing-update-alternatives] = "package_qa_check_missing_update_alternatives"
1083def package_qa_check_missing_update_alternatives(pn, d):
1084 # Look at all packages and find out if any of those sets ALTERNATIVE variable
1085 # without inheriting update-alternatives class
1086 for pkg in (d.getVar('PACKAGES') or '').split():
1087 if d.getVar('ALTERNATIVE:%s' % pkg) and not bb.data.inherits_class('update-alternatives', d):
1088 oe.qa.handle_error("missing-update-alternatives", "%s: recipe defines ALTERNATIVE:%s but doesn't inherit update-alternatives. This might fail during do_rootfs later!" % (pn, pkg), d)
1089
1090def parse_test_matrix(matrix_name, skip, d):
1091 testmatrix = d.getVarFlags(matrix_name) or {}
1092 g = globals()
1093 checks = []
1094 for w in (d.getVar("WARN_QA") or "").split():
1095 if w in skip:
1096 continue
1097 if w in testmatrix and testmatrix[w] in g:
1098 checks.append(g[testmatrix[w]])
1099
1100 for e in (d.getVar("ERROR_QA") or "").split():
1101 if e in skip:
1102 continue
1103 if e in testmatrix and testmatrix[e] in g:
1104 checks.append(g[testmatrix[e]])
1105 return checks
1106parse_test_matrix[vardepsexclude] = "ERROR_QA WARN_QA"
1107
1108
1109# The PACKAGE FUNC to scan each package
1110python do_package_qa () {
1111 import oe.packagedata
1112
1113 # Check for obsolete license references in main LICENSE (packages are checked below for any changes)
1114 main_licenses = oe.license.list_licenses(d.getVar('LICENSE'))
1115 obsolete = set(oe.license.obsolete_license_list()) & main_licenses
1116 if obsolete:
1117 oe.qa.handle_error("obsolete-license", "Recipe LICENSE includes obsolete licenses %s" % ' '.join(obsolete), d)
1118
1119 bb.build.exec_func("read_subpackage_metadata", d)
1120
1121 # Check non UTF-8 characters on recipe's metadata
1122 package_qa_check_encoding(['DESCRIPTION', 'SUMMARY', 'LICENSE', 'SECTION'], 'utf-8', d)
1123
1124 logdir = d.getVar('T')
1125 pn = d.getVar('PN')
1126
1127 # Scan the packages...
1128 packages = set((d.getVar('PACKAGES') or '').split())
1129 # no packages should be scanned
1130 if not packages:
1131 return
1132
1133 global pkgfiles, cpath
1134 pkgfiles = {}
1135 cpath = oe.cachedpath.CachedPath()
1136 pkgdest = d.getVar('PKGDEST')
1137 for pkg in packages:
1138 pkgdir = os.path.join(pkgdest, pkg)
1139 pkgfiles[pkg] = []
1140 for walkroot, dirs, files in os.walk(pkgdir):
1141 # Don't walk into top-level CONTROL or DEBIAN directories as these
1142 # are temporary directories created by do_package.
1143 if walkroot == pkgdir:
1144 for removedir in ("CONTROL", "DEBIAN"):
1145 try:
1146 dirs.remove(removedir)
1147 except ValueError:
1148 pass
1149 pkgfiles[pkg].extend((os.path.join(walkroot, f) for f in files))
1150
1151 import re
1152 # The package name matches the [a-z0-9.+-]+ regular expression
1153 pkgname_pattern = re.compile(r"^[a-z0-9.+-]+$")
1154
1155 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
1156 taskdeps = set()
1157 for dep in taskdepdata:
1158 taskdeps.add(taskdepdata[dep][0])
1159
1160 for package in packages:
1161 skip = set((d.getVar('INSANE_SKIP') or "").split() +
1162 (d.getVar('INSANE_SKIP:' + package) or "").split())
1163 if skip:
1164 bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
1165
1166 bb.note("Checking Package: %s" % package)
1167 # Check package name
1168 if not pkgname_pattern.match(package):
1169 oe.qa.handle_error("pkgname",
1170 "%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
1171
1172 checks = parse_test_matrix("QAPATHTEST", skip, d)
1173 package_qa_walk(checks, package, d)
1174
1175 checks = parse_test_matrix("QAPKGTEST", skip, d)
1176 for func in checks:
1177 func(package, d)
1178
1179 package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d)
1180 package_qa_check_deps(package, pkgdest, d)
1181
1182 checks = parse_test_matrix("QARECIPETEST", skip, d)
1183 for func in checks:
1184 func(pn, d)
1185
1186 package_qa_check_libdir(d)
1187
1188 cpath = None
1189 oe.qa.exit_if_errors(d)
1190}
1191
1192# binutils is used for most checks, so need to set as dependency
1193# POPULATESYSROOTDEPS is defined in staging class.
1194do_package_qa[depends] += "${POPULATESYSROOTDEPS}"
1195do_package_qa[vardeps] = "${@bb.utils.contains('ERROR_QA', 'empty-dirs', 'QA_EMPTY_DIRS', '', d)}"
1196do_package_qa[vardepsexclude] = "BB_TASKDEPDATA"
1197do_package_qa[rdeptask] = "do_packagedata"
1198addtask do_package_qa after do_packagedata do_package before do_build
1199
1200do_build[rdeptask] += "do_package_qa"
1201
1202# Add the package specific INSANE_SKIPs to the sstate dependencies
1203python() {
1204 pkgs = (d.getVar('PACKAGES') or '').split()
1205 for pkg in pkgs:
1206 d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP:{}".format(pkg))
1207 funcs = d.getVarFlags("QAPATHTEST")
1208 funcs.update(d.getVarFlags("QAPKGTEST"))
1209 funcs.update(d.getVarFlags("QARECIPETEST"))
1210 d.appendVarFlag("do_package_qa", "vardeps", " ".join(funcs.values()))
1211}
1212
1213SSTATETASKS += "do_package_qa"
1214do_package_qa[sstate-inputdirs] = ""
1215do_package_qa[sstate-outputdirs] = ""
1216python do_package_qa_setscene () {
1217 sstate_setscene(d)
1218}
1219addtask do_package_qa_setscene
1220
1221python do_qa_sysroot() {
1222 bb.note("QA checking do_populate_sysroot")
1223 sysroot_destdir = d.expand('${SYSROOT_DESTDIR}')
1224 for sysroot_dir in d.expand('${SYSROOT_DIRS}').split():
1225 qa_check_staged(sysroot_destdir + sysroot_dir, d)
1226 oe.qa.exit_with_message_if_errors("do_populate_sysroot for this recipe installed files with QA issues", d)
1227}
1228do_populate_sysroot[postfuncs] += "do_qa_sysroot"
1229
1230python do_qa_patch() {
1231 import subprocess
1232
1233 ###########################################################################
1234 # Check patch.log for fuzz warnings
1235 #
1236 # Further information on why we check for patch fuzz warnings:
1237 # http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html
1238 # https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450
1239 ###########################################################################
1240
1241 logdir = d.getVar('T')
1242 patchlog = os.path.join(logdir,"log.do_patch")
1243
1244 if os.path.exists(patchlog):
1245 fuzzheader = '--- Patch fuzz start ---'
1246 fuzzfooter = '--- Patch fuzz end ---'
1247 statement = "grep -e '%s' %s > /dev/null" % (fuzzheader, patchlog)
1248 if subprocess.call(statement, shell=True) == 0:
1249 msg = "Fuzz detected:\n\n"
1250 fuzzmsg = ""
1251 inFuzzInfo = False
1252 f = open(patchlog, "r")
1253 for line in f:
1254 if fuzzheader in line:
1255 inFuzzInfo = True
1256 fuzzmsg = ""
1257 elif fuzzfooter in line:
1258 fuzzmsg = fuzzmsg.replace('\n\n', '\n')
1259 msg += fuzzmsg
1260 msg += "\n"
1261 inFuzzInfo = False
1262 elif inFuzzInfo and not 'Now at patch' in line:
1263 fuzzmsg += line
1264 f.close()
1265 msg += "The context lines in the patches can be updated with devtool:\n"
1266 msg += "\n"
1267 msg += " devtool modify %s\n" % d.getVar('PN')
1268 msg += " devtool finish --force-patch-refresh %s <layer_path>\n\n" % d.getVar('PN')
1269 msg += "Don't forget to review changes done by devtool!\n"
1270 msg += "\nPatch log indicates that patches do not apply cleanly."
1271 oe.qa.handle_error("patch-fuzz", msg, d)
1272
1273 # Check if the patch contains a correctly formatted and spelled Upstream-Status
1274 import re
1275 from oe import patch
1276
1277 for url in patch.src_patches(d):
1278 (_, _, fullpath, _, _, _) = bb.fetch.decodeurl(url)
1279
1280 msg = oe.qa.check_upstream_status(fullpath)
1281 if msg:
1282 oe.qa.handle_error("patch-status", msg, d)
1283
1284 ###########################################################################
1285 # Check for missing ptests
1286 ###########################################################################
1287 def match_line_in_files(toplevel, filename_glob, line_regex):
1288 import pathlib
1289 try:
1290 toppath = pathlib.Path(toplevel)
1291 for entry in toppath.glob(filename_glob):
1292 try:
1293 with open(entry, 'r', encoding='utf-8', errors='ignore') as f:
1294 for line in f.readlines():
1295 if re.match(line_regex, line):
1296 return True
1297 except FileNotFoundError:
1298 # Broken symlink in source
1299 pass
1300 except FileNotFoundError:
1301 # pathlib.Path.glob() might throw this when file/directory
1302 # disappear while scanning.
1303 bb.note("unimplemented-ptest: FileNotFoundError exception while scanning (disappearing file while scanning?). Check was ignored." % d.getVar('PN'))
1304 pass
1305 return False
1306
1307 srcdir = d.getVar('S')
1308 if not bb.utils.contains('DISTRO_FEATURES', 'ptest', True, False, d):
1309 pass
1310 elif not (bb.utils.contains('ERROR_QA', 'unimplemented-ptest', True, False, d) or bb.utils.contains('WARN_QA', 'unimplemented-ptest', True, False, d)):
1311 pass
1312 elif bb.data.inherits_class('ptest', d):
1313 bb.note("Package %s QA: skipping unimplemented-ptest: ptest implementation detected" % d.getVar('PN'))
1314
1315 # Detect perl Test:: based tests
1316 elif os.path.exists(os.path.join(srcdir, "t")) and any(filename.endswith('.t') for filename in os.listdir(os.path.join(srcdir, 't'))):
1317 oe.qa.handle_error("unimplemented-ptest", "%s: perl Test:: based tests detected" % d.getVar('PN'), d)
1318
1319 # Detect pytest-based tests
1320 elif match_line_in_files(srcdir, "**/*.py", r'\s*(?:import\s*pytest|from\s*pytest)'):
1321 oe.qa.handle_error("unimplemented-ptest", "%s: pytest-based tests detected" % d.getVar('PN'), d)
1322
1323 # Detect meson-based tests
1324 elif os.path.exists(os.path.join(srcdir, "meson.build")) and match_line_in_files(srcdir, "**/meson.build", r'\s*test\s*\('):
1325 oe.qa.handle_error("unimplemented-ptest", "%s: meson-based tests detected" % d.getVar('PN'), d)
1326
1327 # Detect cmake-based tests
1328 elif os.path.exists(os.path.join(srcdir, "CMakeLists.txt")) and match_line_in_files(srcdir, "**/CMakeLists.txt", r'\s*(?:add_test|enable_testing)\s*\('):
1329 oe.qa.handle_error("unimplemented-ptest", "%s: cmake-based tests detected" % d.getVar('PN'), d)
1330
1331 # Detect autotools-based·tests
1332 elif os.path.exists(os.path.join(srcdir, "Makefile.in")) and (match_line_in_files(srcdir, "**/Makefile.in", r'\s*TESTS\s*\+?=') or match_line_in_files(srcdir,"**/*.at",r'.*AT_INIT')):
1333 oe.qa.handle_error("unimplemented-ptest", "%s: autotools-based tests detected" % d.getVar('PN'), d)
1334
1335 # Detect cargo-based tests
1336 elif os.path.exists(os.path.join(srcdir, "Cargo.toml")) and (
1337 match_line_in_files(srcdir, "**/*.rs", r'\s*#\s*\[\s*test\s*\]') or
1338 match_line_in_files(srcdir, "**/*.rs", r'\s*#\s*\[\s*cfg\s*\(\s*test\s*\)\s*\]')
1339 ):
1340 oe.qa.handle_error("unimplemented-ptest", "%s: cargo-based tests detected" % d.getVar('PN'), d)
1341
1342 # Last resort, detect a test directory in sources
1343 elif os.path.exists(srcdir) and any(filename.lower() in ["test", "tests"] for filename in os.listdir(srcdir)):
1344 oe.qa.handle_error("unimplemented-ptest", "%s: test subdirectory detected" % d.getVar('PN'), d)
1345
1346 oe.qa.exit_if_errors(d)
1347}
1348
1349python do_qa_configure() {
1350 import subprocess
1351
1352 ###########################################################################
1353 # Check config.log for cross compile issues
1354 ###########################################################################
1355
1356 configs = []
1357 workdir = d.getVar('WORKDIR')
1358
1359 skip = (d.getVar('INSANE_SKIP') or "").split()
1360 skip_configure_unsafe = False
1361 if 'configure-unsafe' in skip:
1362 bb.note("Recipe %s skipping qa checking: configure-unsafe" % d.getVar('PN'))
1363 skip_configure_unsafe = True
1364
1365 if bb.data.inherits_class('autotools', d) and not skip_configure_unsafe:
1366 bb.note("Checking autotools environment for common misconfiguration")
1367 for root, dirs, files in os.walk(workdir):
1368 statement = "grep -q -F -e 'is unsafe for cross-compilation' %s" % \
1369 os.path.join(root,"config.log")
1370 if "config.log" in files:
1371 if subprocess.call(statement, shell=True) == 0:
1372 error_msg = """This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
1373Rerun configure task after fixing this."""
1374 oe.qa.handle_error("configure-unsafe", error_msg, d)
1375
1376 if "configure.ac" in files:
1377 configs.append(os.path.join(root,"configure.ac"))
1378 if "configure.in" in files:
1379 configs.append(os.path.join(root, "configure.in"))
1380
1381 ###########################################################################
1382 # Check gettext configuration and dependencies are correct
1383 ###########################################################################
1384
1385 skip_configure_gettext = False
1386 if 'configure-gettext' in skip:
1387 bb.note("Recipe %s skipping qa checking: configure-gettext" % d.getVar('PN'))
1388 skip_configure_gettext = True
1389
1390 cnf = d.getVar('EXTRA_OECONF') or ""
1391 if not ("gettext" in d.getVar('P') or "gcc-runtime" in d.getVar('P') or \
1392 "--disable-nls" in cnf or skip_configure_gettext):
1393 ml = d.getVar("MLPREFIX") or ""
1394 if bb.data.inherits_class('cross-canadian', d):
1395 gt = "nativesdk-gettext"
1396 else:
1397 gt = "gettext-native"
1398 deps = bb.utils.explode_deps(d.getVar('DEPENDS') or "")
1399 if gt not in deps:
1400 for config in configs:
1401 gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
1402 if subprocess.call(gnu, shell=True) == 0:
1403 error_msg = "AM_GNU_GETTEXT used but no inherit gettext"
1404 oe.qa.handle_error("configure-gettext", error_msg, d)
1405
1406 ###########################################################################
1407 # Check unrecognised configure options (with a white list)
1408 ###########################################################################
1409 if bb.data.inherits_class("autotools", d):
1410 bb.note("Checking configure output for unrecognised options")
1411 try:
1412 if bb.data.inherits_class("autotools", d):
1413 flag = "WARNING: unrecognized options:"
1414 log = os.path.join(d.getVar('B'), 'config.log')
1415 output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ').replace('"', '')
1416 options = set()
1417 for line in output.splitlines():
1418 options |= set(line.partition(flag)[2].split())
1419 ignore_opts = set(d.getVar("UNKNOWN_CONFIGURE_OPT_IGNORE").split())
1420 options -= ignore_opts
1421 if options:
1422 pn = d.getVar('PN')
1423 error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
1424 oe.qa.handle_error("unknown-configure-option", error_msg, d)
1425 except subprocess.CalledProcessError:
1426 pass
1427
1428 oe.qa.exit_if_errors(d)
1429}
1430
1431python do_qa_unpack() {
1432 src_uri = d.getVar('SRC_URI')
1433 s_dir = d.getVar('S')
1434 s_dir_orig = d.getVar('S', False)
1435
1436 if s_dir_orig == '${WORKDIR}/git' or s_dir_orig == '${UNPACKDIR}/git':
1437 bb.fatal('Recipes that set S = "${WORKDIR}/git" or S = "${UNPACKDIR}/git" should remove that assignment, as S set by bitbake.conf in oe-core now works.')
1438
1439 if '${WORKDIR}' in s_dir_orig:
1440 bb.fatal('S should be set relative to UNPACKDIR, e.g. replace WORKDIR with UNPACKDIR in "S = {}"'.format(s_dir_orig))
1441
1442 if src_uri and not os.path.exists(s_dir):
1443 bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN'), d.getVar('S', False), s_dir))
1444}
1445
1446python do_recipe_qa() {
1447 import re
1448
1449 def test_naming(pn, d):
1450 if pn.endswith("-native") and not bb.data.inherits_class("native", d):
1451 oe.qa.handle_error("recipe-naming", "Recipe %s appears native but is not, should inherit native" % pn, d)
1452 if pn.startswith("nativesdk-") and not bb.data.inherits_class("nativesdk", d):
1453 oe.qa.handle_error("recipe-naming", "Recipe %s appears nativesdk but is not, should inherit nativesdk" % pn, d)
1454
1455 def test_missing_metadata(pn, d):
1456 fn = d.getVar("FILE")
1457 srcfile = d.getVar('SRC_URI').split()
1458 # Check that SUMMARY is not the same as the default from bitbake.conf
1459 if d.getVar('SUMMARY') == d.expand("${PN} version ${PV}-${PR}"):
1460 oe.qa.handle_error("missing-metadata", "Recipe {} in {} does not contain a SUMMARY. Please add an entry.".format(pn, fn), d)
1461 if not d.getVar('HOMEPAGE'):
1462 if srcfile and srcfile[0].startswith('file') or not d.getVar('SRC_URI'):
1463 # We are only interested in recipes SRC_URI fetched from external sources
1464 pass
1465 else:
1466 oe.qa.handle_error("missing-metadata", "Recipe {} in {} does not contain a HOMEPAGE. Please add an entry.".format(pn, fn), d)
1467
1468 def test_missing_maintainer(pn, d):
1469 fn = d.getVar("FILE")
1470 if pn.endswith("-native") or pn.startswith("nativesdk-") or "packagegroup-" in pn or "core-image-ptest-" in pn:
1471 return
1472 if not d.getVar('RECIPE_MAINTAINER'):
1473 oe.qa.handle_error("missing-maintainer", "Recipe {} in {} does not have an assigned maintainer. Please add an entry into meta/conf/distro/include/maintainers.inc.".format(pn, fn), d)
1474
1475 def test_srcuri(pn, d):
1476 skip = (d.getVar('INSANE_SKIP') or "").split()
1477 if 'src-uri-bad' in skip:
1478 bb.note("Recipe %s skipping qa checking: src-uri-bad" % pn)
1479 return
1480
1481 if "${PN}" in d.getVar("SRC_URI", False):
1482 oe.qa.handle_error("src-uri-bad", "%s: SRC_URI uses PN not BPN" % pn, d)
1483
1484 for url in d.getVar("SRC_URI").split():
1485 # Search for github and gitlab URLs that pull unstable archives (comment for future greppers)
1486 if re.search(r"git(hu|la)b\.com/.+/.+/archive/.+", url) or "//codeload.github.com/" in url:
1487 oe.qa.handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub/GitLab archives, convert recipe to use git protocol" % pn, d)
1488
1489 def test_packageconfig(pn, d):
1490 pkgconfigs = (d.getVar("PACKAGECONFIG") or "").split()
1491 if pkgconfigs:
1492 pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
1493 invalid_pkgconfigs = set(pkgconfigs) - set(pkgconfigflags)
1494 if invalid_pkgconfigs:
1495 error_msg = "%s: invalid PACKAGECONFIG(s): %s" % (pn, " ".join(sorted(invalid_pkgconfigs)))
1496 oe.qa.handle_error("invalid-packageconfig", error_msg, d)
1497
1498 pn = d.getVar('PN')
1499 test_naming(pn, d)
1500 test_missing_metadata(pn, d)
1501 test_missing_maintainer(pn, d)
1502 test_srcuri(pn, d)
1503 test_packageconfig(pn, d)
1504 oe.qa.exit_if_errors(d)
1505}
1506
1507addtask do_recipe_qa before do_fetch do_package_qa do_build
1508
1509SSTATETASKS += "do_recipe_qa"
1510do_recipe_qa[sstate-inputdirs] = ""
1511do_recipe_qa[sstate-outputdirs] = ""
1512python do_recipe_qa_setscene () {
1513 sstate_setscene(d)
1514}
1515addtask do_recipe_qa_setscene
1516
1517# Check for patch fuzz
1518do_patch[postfuncs] += "do_qa_patch "
1519
1520# Check broken config.log files, for packages requiring Gettext which
1521# don't have it in DEPENDS.
1522#addtask qa_configure after do_configure before do_compile
1523do_configure[postfuncs] += "do_qa_configure "
1524
1525# Check does S exist.
1526do_unpack[postfuncs] += "do_qa_unpack"
1527
1528python () {
1529 import re
1530
1531 if bb.utils.contains('ERROR_QA', 'desktop', True, False, d) or bb.utils.contains('WARN_QA', 'desktop', True, False, d):
1532 d.appendVar("PACKAGE_DEPENDS", " desktop-file-utils-native")
1533
1534 ###########################################################################
1535 # Check various variables
1536 ###########################################################################
1537
1538 # Checking ${FILESEXTRAPATHS}
1539 extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
1540 if '__default' not in extrapaths.split(":"):
1541 msg = "FILESEXTRAPATHS-variable, must always use :prepend (or :append)\n"
1542 msg += "type of assignment, and don't forget the colon.\n"
1543 msg += "Please assign it with the format of:\n"
1544 msg += " FILESEXTRAPATHS:append := \":${THISDIR}/Your_Files_Path\" or\n"
1545 msg += " FILESEXTRAPATHS:prepend := \"${THISDIR}/Your_Files_Path:\"\n"
1546 msg += "in your bbappend file\n\n"
1547 msg += "Your incorrect assignment is:\n"
1548 msg += "%s\n" % extrapaths
1549 bb.warn(msg)
1550
1551 overrides = d.getVar('OVERRIDES').split(':')
1552 pn = d.getVar('PN')
1553 if pn in overrides:
1554 msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE"), pn)
1555 oe.qa.handle_error("pn-overrides", msg, d)
1556 prog = re.compile(r'[A-Z]')
1557 if prog.search(pn):
1558 oe.qa.handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
1559
1560 sourcedir = d.getVar("S")
1561 builddir = d.getVar("B")
1562 workdir = d.getVar("WORKDIR")
1563 unpackdir = d.getVar("UNPACKDIR")
1564 if sourcedir == workdir:
1565 bb.fatal("Using S = ${WORKDIR} is no longer supported")
1566 if builddir == workdir:
1567 bb.fatal("Using B = ${WORKDIR} is no longer supported")
1568 if unpackdir == workdir:
1569 bb.fatal("Using UNPACKDIR = ${WORKDIR} is not supported")
1570 if sourcedir[-1] == '/':
1571 bb.warn("Recipe %s sets S variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("S")))
1572 if builddir[-1] == '/':
1573 bb.warn("Recipe %s sets B variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("B")))
1574
1575 # Some people mistakenly use DEPENDS:${PN} instead of DEPENDS and wonder
1576 # why it doesn't work.
1577 if (d.getVar(d.expand('DEPENDS:${PN}'))):
1578 oe.qa.handle_error("pkgvarcheck", "recipe uses DEPENDS:${PN}, should use DEPENDS", d)
1579
1580 # virtual/ is meaningless for these variables
1581 for k in ['RDEPENDS', 'RPROVIDES']:
1582 for var in bb.utils.explode_deps(d.getVar(k + ':' + pn) or ""):
1583 if var.startswith("virtual/"):
1584 oe.qa.handle_error("virtual-slash", "%s is set to %s but the substring 'virtual/' holds no meaning in this context. It only works for build time dependencies, not runtime ones. It is suggested to use 'VIRTUAL-RUNTIME_' variables instead." % (k, var), d)
1585
1586 issues = []
1587 if (d.getVar('PACKAGES') or "").split():
1588 for dep in (d.getVar('QADEPENDS') or "").split():
1589 d.appendVarFlag('do_package_qa', 'depends', " %s:do_populate_sysroot" % dep)
1590 for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY':
1591 if d.getVar(var, False):
1592 issues.append(var)
1593
1594 if bb.utils.contains('ERROR_QA', 'host-user-contaminated', True, False, d) or bb.utils.contains('WARN_QA', 'host-user-contaminated', True, False, d):
1595 d.setVarFlag('do_package_qa', 'fakeroot', '1')
1596 d.appendVarFlag('do_package_qa', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
1597 else:
1598 d.setVarFlag('do_package_qa', 'rdeptask', '')
1599 for i in issues:
1600 oe.qa.handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
1601
1602 if 'native-last' not in (d.getVar('INSANE_SKIP') or "").split():
1603 for native_class in ['native', 'nativesdk']:
1604 if bb.data.inherits_class(native_class, d):
1605
1606 inherited_classes = d.getVar('__inherit_cache', False) or []
1607 needle = "/" + native_class
1608
1609 bbclassextend = (d.getVar('BBCLASSEXTEND') or '').split()
1610 # BBCLASSEXTEND items are always added in the end
1611 skip_classes = bbclassextend
1612 if bb.data.inherits_class('native', d) or 'native' in bbclassextend:
1613 # native also inherits nopackages and relocatable bbclasses
1614 skip_classes.extend(['nopackages', 'relocatable'])
1615
1616 broken_order = []
1617 for class_item in reversed(inherited_classes):
1618 if needle not in class_item:
1619 for extend_item in skip_classes:
1620 if '/%s.bbclass' % extend_item in class_item:
1621 break
1622 else:
1623 pn = d.getVar('PN')
1624 broken_order.append(os.path.basename(class_item))
1625 else:
1626 break
1627 if broken_order:
1628 oe.qa.handle_error("native-last", "%s: native/nativesdk class is not inherited last, this can result in unexpected behaviour. "
1629 "Classes inherited after native/nativesdk: %s" % (pn, " ".join(broken_order)), d)
1630
1631 oe.qa.exit_if_errors(d)
1632}
diff --git a/meta/classes-global/license.bbclass b/meta/classes-global/license.bbclass
deleted file mode 100644
index af5f1ed41d..0000000000
--- a/meta/classes-global/license.bbclass
+++ /dev/null
@@ -1,266 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
8# LIC_FILES_CHKSUM.
9# TODO:
10# - There is a real issue revolving around license naming standards.
11
12LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
13LICSSTATEDIR = "${WORKDIR}/license-destdir/"
14
15# Create extra package with license texts and add it to RRECOMMENDS:${PN}
16LICENSE_CREATE_PACKAGE[type] = "boolean"
17LICENSE_CREATE_PACKAGE ??= "0"
18LICENSE_PACKAGE_SUFFIX ??= "-lic"
19LICENSE_FILES_DIRECTORY ??= "${datadir}/licenses/"
20
21LICENSE_DEPLOY_PATHCOMPONENT = "${SSTATE_PKGARCH}"
22LICENSE_DEPLOY_PATHCOMPONENT:class-cross = "native"
23LICENSE_DEPLOY_PATHCOMPONENT:class-native = "native"
24# Ensure the *value* of SSTATE_PKGARCH is captured as it is used in the output paths
25LICENSE_DEPLOY_PATHCOMPONENT[vardepvalue] += "${LICENSE_DEPLOY_PATHCOMPONENT}"
26
27addtask populate_lic after do_patch before do_build
28do_populate_lic[dirs] = "${LICSSTATEDIR}/${LICENSE_DEPLOY_PATHCOMPONENT}/${PN}"
29do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
30
31python do_populate_lic() {
32 """
33 Populate LICENSE_DIRECTORY with licenses.
34 """
35 lic_files_paths = find_license_files(d)
36
37 # The base directory we wrangle licenses to
38 destdir = os.path.join(d.getVar('LICSSTATEDIR'), d.getVar('LICENSE_DEPLOY_PATHCOMPONENT'), d.getVar('PN'))
39 copy_license_files(lic_files_paths, destdir)
40 info = get_recipe_info(d)
41 with open(os.path.join(destdir, "recipeinfo"), "w") as f:
42 for key in sorted(info.keys()):
43 f.write("%s: %s\n" % (key, info[key]))
44 oe.qa.exit_if_errors(d)
45}
46
47# it would be better to copy them in do_install:append, but find_license_files is python
48python perform_packagecopy:prepend () {
49 enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
50 if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
51 lic_files_paths = find_license_files(d)
52
53 # LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
54 destdir = d.getVar('D') + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY'), d.getVar('PN'))
55 copy_license_files(lic_files_paths, destdir)
56 add_package_and_files(d)
57}
58perform_packagecopy[vardeps] += "LICENSE_CREATE_PACKAGE"
59
60def get_recipe_info(d):
61 info = {}
62 info["PV"] = d.getVar("PV")
63 info["PR"] = d.getVar("PR")
64 info["LICENSE"] = d.getVar("LICENSE")
65 return info
66
67def add_package_and_files(d):
68 packages = d.getVar('PACKAGES')
69 files = d.getVar('LICENSE_FILES_DIRECTORY')
70 pn = d.getVar('PN')
71 pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX', False))
72 if pn_lic in packages.split():
73 bb.warn("%s package already existed in %s." % (pn_lic, pn))
74 else:
75 # first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
76 d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
77 d.setVar('FILES:' + pn_lic, files)
78
79def copy_license_files(lic_files_paths, destdir):
80 import shutil
81 import errno
82
83 bb.utils.mkdirhier(destdir)
84 for (basename, path, beginline, endline) in lic_files_paths:
85 try:
86 src = path
87 dst = os.path.join(destdir, basename)
88 if os.path.exists(dst):
89 os.remove(dst)
90 if os.path.islink(src):
91 src = os.path.realpath(src)
92 canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev) and beginline is None and endline is None
93 if canlink:
94 try:
95 os.link(src, dst)
96 except OSError as err:
97 if err.errno == errno.EXDEV:
98 # Copy license files if hardlink is not possible even if st_dev is the
99 # same on source and destination (docker container with device-mapper?)
100 canlink = False
101 else:
102 raise
103 # Only chown if we did hardlink and we're running under pseudo
104 if canlink and os.environ.get('PSEUDO_DISABLED') == '0':
105 os.chown(dst,0,0)
106 if not canlink:
107 begin_idx = max(0, int(beginline) - 1) if beginline is not None else None
108 end_idx = max(0, int(endline)) if endline is not None else None
109 if begin_idx is None and end_idx is None:
110 shutil.copyfile(src, dst)
111 else:
112 with open(src, 'rb') as src_f:
113 with open(dst, 'wb') as dst_f:
114 dst_f.write(b''.join(src_f.readlines()[begin_idx:end_idx]))
115
116 except Exception as e:
117 bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
118
119def find_license_files(d):
120 """
121 Creates list of files used in LIC_FILES_CHKSUM and generic LICENSE files.
122 """
123 import shutil
124 import oe.license
125 from collections import defaultdict, OrderedDict
126
127 # All the license files for the package
128 lic_files = d.getVar('LIC_FILES_CHKSUM') or ""
129 pn = d.getVar('PN')
130 # The license files are located in S/LIC_FILE_CHECKSUM.
131 srcdir = d.getVar('S')
132 # Directory we store the generic licenses as set in the distro configuration
133 generic_directory = d.getVar('COMMON_LICENSE_DIR')
134 # List of basename, path tuples
135 lic_files_paths = []
136 # hash for keep track generic lics mappings
137 non_generic_lics = {}
138 # Entries from LIC_FILES_CHKSUM
139 lic_chksums = {}
140 license_source_dirs = []
141 license_source_dirs.append(generic_directory)
142 try:
143 additional_lic_dirs = d.getVar('LICENSE_PATH').split()
144 for lic_dir in additional_lic_dirs:
145 license_source_dirs.append(lic_dir)
146 except:
147 pass
148
149 class FindVisitor(oe.license.LicenseVisitor):
150 def visit_Str(self, node):
151 #
152 # Until I figure out what to do with
153 # the two modifiers I support (or greater = +
154 # and "with exceptions" being *
155 # we'll just strip out the modifier and put
156 # the base license.
157 find_licenses(node.s.replace("+", "").replace("*", ""))
158 self.generic_visit(node)
159
160 def visit_Constant(self, node):
161 find_licenses(node.value.replace("+", "").replace("*", ""))
162 self.generic_visit(node)
163
164 def find_licenses(license_type):
165 try:
166 bb.utils.mkdirhier(gen_lic_dest)
167 except:
168 pass
169 spdx_generic = None
170 license_source = None
171 # If the generic does not exist we need to check to see if there is an SPDX mapping to it,
172 # unless NO_GENERIC_LICENSE is set.
173 for lic_dir in license_source_dirs:
174 if not os.path.isfile(os.path.join(lic_dir, license_type)):
175 if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
176 # Great, there is an SPDXLICENSEMAP. We can copy!
177 bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
178 spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
179 license_source = lic_dir
180 break
181 elif os.path.isfile(os.path.join(lic_dir, license_type)):
182 spdx_generic = license_type
183 license_source = lic_dir
184 break
185
186 non_generic_lic = d.getVarFlag('NO_GENERIC_LICENSE', license_type)
187 if spdx_generic and license_source:
188 # we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
189 # audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
190
191 lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic),
192 None, None))
193
194 # The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
195 # and should not be allowed, warn the user in this case.
196 if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
197 oe.qa.handle_error("license-no-generic",
198 "%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type), d)
199
200 elif non_generic_lic and non_generic_lic in lic_chksums:
201 # if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
202 # of the package rather than the license_source_dirs.
203 lic_files_paths.append(("generic_" + license_type,
204 os.path.join(srcdir, non_generic_lic), None, None))
205 non_generic_lics[non_generic_lic] = license_type
206 else:
207 # Explicitly avoid the CLOSED license because this isn't generic
208 if license_type != 'CLOSED':
209 # And here is where we warn people that their licenses are lousy
210 oe.qa.handle_error("license-exists",
211 "%s: No generic license file exists for: %s in any provider" % (pn, license_type), d)
212 pass
213
214 if not generic_directory:
215 bb.fatal("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
216
217 for url in lic_files.split():
218 try:
219 (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
220 if method != "file" or not path:
221 raise bb.fetch.MalformedUrl()
222 except bb.fetch.MalformedUrl:
223 bb.fatal("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF'), url))
224 # We want the license filename and path
225 chksum = parm.get('md5', None)
226 beginline = parm.get('beginline')
227 endline = parm.get('endline')
228 lic_chksums[path] = (chksum, beginline, endline)
229
230 v = FindVisitor()
231 try:
232 v.visit_string(d.getVar('LICENSE'))
233 except oe.license.InvalidLicense as exc:
234 bb.fatal('%s: %s' % (d.getVar('PF'), exc))
235 except SyntaxError:
236 oe.qa.handle_error("license-syntax",
237 "%s: Failed to parse LICENSE: %s" % (d.getVar('PF'), d.getVar('LICENSE')), d)
238 # Add files from LIC_FILES_CHKSUM to list of license files
239 lic_chksum_paths = defaultdict(OrderedDict)
240 for path, data in sorted(lic_chksums.items()):
241 lic_chksum_paths[os.path.basename(path)][data] = (os.path.join(srcdir, path), data[1], data[2])
242 for basename, files in lic_chksum_paths.items():
243 if len(files) == 1:
244 # Don't copy again a LICENSE already handled as non-generic
245 if basename in non_generic_lics:
246 continue
247 data = list(files.values())[0]
248 lic_files_paths.append(tuple([basename] + list(data)))
249 else:
250 # If there are multiple different license files with identical
251 # basenames we rename them to <file>.0, <file>.1, ...
252 for i, data in enumerate(files.values()):
253 lic_files_paths.append(tuple(["%s.%d" % (basename, i)] + list(data)))
254
255 return lic_files_paths
256
257SSTATETASKS += "do_populate_lic"
258do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
259do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
260
261IMAGE_CLASSES:append = " license_image"
262
263python do_populate_lic_setscene () {
264 sstate_setscene(d)
265}
266addtask do_populate_lic_setscene
diff --git a/meta/classes-global/logging.bbclass b/meta/classes-global/logging.bbclass
deleted file mode 100644
index 136f1e1733..0000000000
--- a/meta/classes-global/logging.bbclass
+++ /dev/null
@@ -1,117 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# The following logging mechanisms are to be used in bash functions of recipes.
8# They are intended to map one to one in intention and output format with the
9# python recipe logging functions of a similar naming convention: bb.plain(),
10# bb.note(), etc.
11
12LOGFIFO = "${T}/fifo.${@os.getpid()}"
13
14# Print the output exactly as it is passed in. Typically used for output of
15# tasks that should be seen on the console. Use sparingly.
16# Output: logs console
17bbplain() {
18 if [ -p ${LOGFIFO} ] ; then
19 printf "%b\0" "bbplain $*" > ${LOGFIFO}
20 else
21 echo "$*"
22 fi
23}
24
25# Notify the user of a noteworthy condition.
26# Output: logs
27bbnote() {
28 if [ -p ${LOGFIFO} ] ; then
29 printf "%b\0" "bbnote $*" > ${LOGFIFO}
30 else
31 echo "NOTE: $*"
32 fi
33}
34
35# Notify the user of a noteworthy condition.
36# Output: logs console
37bbverbnote() {
38 if [ -p ${LOGFIFO} ]; then
39 printf "%b\0" "bbverbnote $*" > ${LOGFIFO}
40 else
41 echo "NOTE: $*"
42 fi
43}
44
45# Print a warning to the log. Warnings are non-fatal, and do not
46# indicate a build failure.
47# Output: logs console
48bbwarn() {
49 if [ -p ${LOGFIFO} ] ; then
50 printf "%b\0" "bbwarn $*" > ${LOGFIFO}
51 else
52 echo "WARNING: $*"
53 fi
54}
55
56# Print an error to the log. Errors are non-fatal in that the build can
57# continue, but they do indicate a build failure.
58# Output: logs console
59bberror() {
60 if [ -p ${LOGFIFO} ] ; then
61 printf "%b\0" "bberror $*" > ${LOGFIFO}
62 else
63 echo "ERROR: $*"
64 fi
65}
66
67# Print a fatal error to the log. Fatal errors indicate build failure
68# and halt the build, exiting with an error code.
69# Output: logs console
70bbfatal() {
71 if [ -p ${LOGFIFO} ] ; then
72 printf "%b\0" "bbfatal $*" > ${LOGFIFO}
73 else
74 echo "ERROR: $*"
75 fi
76 exit 1
77}
78
79# Like bbfatal, except prevents the suppression of the error log by
80# bitbake's UI.
81# Output: logs console
82bbfatal_log() {
83 if [ -p ${LOGFIFO} ] ; then
84 printf "%b\0" "bbfatal_log $*" > ${LOGFIFO}
85 else
86 echo "ERROR: $*"
87 fi
88 exit 1
89}
90
91# Print debug messages. These are appropriate for progress checkpoint
92# messages to the logs. Depending on the debug log level, they may also
93# go to the console.
94# Output: logs console
95# Usage: bbdebug 1 "first level debug message"
96# bbdebug 2 "second level debug message"
97bbdebug() {
98 USAGE='Usage: bbdebug [123] "message"'
99 if [ $# -lt 2 ]; then
100 bbfatal "$USAGE"
101 fi
102
103 # Strip off the debug level and ensure it is an integer
104 DBGLVL=$1; shift
105 NONDIGITS=$(echo "$DBGLVL" | tr -d "[:digit:]")
106 if [ "$NONDIGITS" ]; then
107 bbfatal "$USAGE"
108 fi
109
110 # All debug output is printed to the logs
111 if [ -p ${LOGFIFO} ] ; then
112 printf "%b\0" "bbdebug $DBGLVL $*" > ${LOGFIFO}
113 else
114 echo "DEBUG: $*"
115 fi
116}
117
diff --git a/meta/classes-global/mirrors.bbclass b/meta/classes-global/mirrors.bbclass
deleted file mode 100644
index 75a0003ce5..0000000000
--- a/meta/classes-global/mirrors.bbclass
+++ /dev/null
@@ -1,88 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7MIRRORS += "\
8${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \
9${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20250101T023759Z/pool \
10${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \
11${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \
12${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \
13${GNU_MIRROR} https://mirrors.edge.kernel.org/gnu \
14${KERNELORG_MIRROR} http://www.edge.kernel.org/pub \
15${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \
16${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \
17${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \
18ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \
19ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \
20ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \
21ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \
22http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \
23http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \
24${APACHE_MIRROR} http://www.us.apache.org/dist \
25${APACHE_MIRROR} http://archive.apache.org/dist \
26http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \
27${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \
28${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \
29ftp://sourceware.org/pub http://mirrors.edge.kernel.org/sourceware \
30ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \
31ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \
32cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
33svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
34git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
35gitsm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
36hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
37bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
38p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
39osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
40https?://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
41ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
42npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \
43cvs://.*/.* http://sources.openembedded.org/ \
44svn://.*/.* http://sources.openembedded.org/ \
45git://.*/.* http://sources.openembedded.org/ \
46gitsm://.*/.* http://sources.openembedded.org/ \
47hg://.*/.* http://sources.openembedded.org/ \
48bzr://.*/.* http://sources.openembedded.org/ \
49p4://.*/.* http://sources.openembedded.org/ \
50osc://.*/.* http://sources.openembedded.org/ \
51https?://.*/.* http://sources.openembedded.org/ \
52ftp://.*/.* http://sources.openembedded.org/ \
53npm://.*/?.* http://sources.openembedded.org/ \
54${CPAN_MIRROR} https://cpan.metacpan.org/ \
55https?://downloads.yoctoproject.org/releases/uninative/ https://mirrors.edge.kernel.org/yocto/uninative/ \
56https?://downloads.yoctoproject.org/mirror/sources/ https://mirrors.edge.kernel.org/yocto-sources/ \
57"
58
59# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
60# where git native protocol fetches may fail due to local firewall rules, etc.
61
62MIRRORS += "\
63git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \
64git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \
65git://git.infradead.org/.* git://git.infraroot.at/PATH;protocol=https \
66git://.*/.* git://HOST/PATH;protocol=https \
67git://.*/.* git://HOST/git/PATH;protocol=https \
68"
69
70# Switch llvm, glibc and binutils recipes to use shallow clones as they're large and this
71# improves user experience whilst allowing the flexibility of git urls in the recipes
72BB_GIT_SHALLOW:pn-binutils = "1"
73BB_GIT_SHALLOW:pn-binutils-cross-${TARGET_ARCH} = "1"
74BB_GIT_SHALLOW:pn-binutils-cross-canadian-${TRANSLATED_TARGET_ARCH} = "1"
75BB_GIT_SHALLOW:pn-binutils-testsuite = "1"
76BB_GIT_SHALLOW:pn-binutils-crosssdk-${SDK_SYS} = "1"
77BB_GIT_SHALLOW:pn-binutils-native = "1"
78BB_GIT_SHALLOW:pn-nativesdk-binutils = "1"
79
80BB_GIT_SHALLOW:pn-cross-localedef-native = "1"
81BB_GIT_SHALLOW:pn-glibc = "1"
82BB_GIT_SHALLOW:pn-glibc-tests = "1"
83PREMIRRORS += "git://sourceware.org/git/glibc.git https://downloads.yoctoproject.org/mirror/sources/ \
84 git://sourceware.org/git/binutils-gdb.git https://downloads.yoctoproject.org/mirror/sources/"
85
86BB_GIT_SHALLOW:pn-llvm = "1"
87BB_GIT_SHALLOW:pn-llvm-native = "1"
88BB_GIT_SHALLOW:pn-nativesdk-llvm = "1"
diff --git a/meta/classes-global/package.bbclass b/meta/classes-global/package.bbclass
deleted file mode 100644
index d7ff333f20..0000000000
--- a/meta/classes-global/package.bbclass
+++ /dev/null
@@ -1,611 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Packaging process
9#
10# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS
11# Taking D and splitting it up into the packages listed in PACKAGES, placing the
12# resulting output in PKGDEST.
13#
14# There are the following default steps but PACKAGEFUNCS can be extended:
15#
16# a) package_convert_pr_autoinc - convert AUTOINC in PKGV to ${PRSERV_PV_AUTOINC}
17#
18# b) perform_packagecopy - Copy D into PKGD
19#
20# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES
21#
22# d) split_and_strip_files - split the files into runtime and debug and strip them.
23# Debug files include debug info split, and associated sources that end up in -dbg packages
24#
25# e) fixup_perms - Fix up permissions in the package before we split it.
26#
27# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/<pkgname>
28# Also triggers the binary stripping code to put files in -dbg packages.
29#
30# g) package_do_filedeps - Collect perfile run-time dependency metadata
31# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with
32# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
33#
34# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
35# dependencies found. Also stores the package name so anyone else using this library
36# knows which package to depend on.
37#
38# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
39#
40# j) read_shlibdeps - Reads the stored shlibs information into the metadata
41#
42# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages
43#
44# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later
45# packaging steps
46
47inherit packagedata
48inherit chrpath
49inherit package_pkgdata
50inherit insane
51
52PKGD = "${WORKDIR}/package"
53PKGDEST = "${WORKDIR}/packages-split"
54
55LOCALE_SECTION ?= ''
56
57ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
58
59# rpm is used for the per-file dependency identification
60# dwarfsrcfiles is used to determine the list of debug source files
61PACKAGE_DEPENDS += "rpm-native dwarfsrcfiles-native"
62
63# If your postinstall can execute at rootfs creation time rather than on
64# target but depends on a native/cross tool in order to execute, you need to
65# list that tool in PACKAGE_WRITE_DEPS. Target package dependencies belong
66# in the package dependencies as normal, this is just for native/cross support
67# tools at rootfs build time.
68PACKAGE_WRITE_DEPS ??= ""
69
70def legitimize_package_name(s):
71 return oe.package.legitimize_package_name(s)
72
73def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False, summary=None):
74 """
75 Used in .bb files to split up dynamically generated subpackages of a
76 given package, usually plugins or modules.
77
78 Arguments:
79 root -- the path in which to search
80 file_regex -- regular expression to match searched files. Use
81 parentheses () to mark the part of this expression
82 that should be used to derive the module name (to be
83 substituted where %s is used in other function
84 arguments as noted below)
85 output_pattern -- pattern to use for the package names. Must include %s.
86 description -- description to set for each package. Must include %s.
87 postinst -- postinstall script to use for all packages (as a
88 string)
89 recursive -- True to perform a recursive search - default False
90 hook -- a hook function to be called for every match. The
91 function will be called with the following arguments
92 (in the order listed):
93 f: full path to the file/directory match
94 pkg: the package name
95 file_regex: as above
96 output_pattern: as above
97 modulename: the module name derived using file_regex
98 extra_depends -- extra runtime dependencies (RDEPENDS) to be set for
99 all packages. The default value of None causes a
100 dependency on the main package (${PN}) - if you do
101 not want this, pass '' for this parameter.
102 aux_files_pattern -- extra item(s) to be added to FILES for each
103 package. Can be a single string item or a list of
104 strings for multiple items. Must include %s.
105 postrm -- postrm script to use for all packages (as a string)
106 allow_dirs -- True allow directories to be matched - default False
107 prepend -- if True, prepend created packages to PACKAGES instead
108 of the default False which appends them
109 match_path -- match file_regex on the whole relative path to the
110 root rather than just the file name
111 aux_files_pattern_verbatim -- extra item(s) to be added to FILES for
112 each package, using the actual derived module name
113 rather than converting it to something legal for a
114 package name. Can be a single string item or a list
115 of strings for multiple items. Must include %s.
116 allow_links -- True to allow symlinks to be matched - default False
117 summary -- Summary to set for each package. Must include %s;
118 defaults to description if not set.
119
120 """
121
122 dvar = d.getVar('PKGD')
123 root = d.expand(root)
124 output_pattern = d.expand(output_pattern)
125 extra_depends = d.expand(extra_depends)
126
127 # If the root directory doesn't exist, don't error out later but silently do
128 # no splitting.
129 if not os.path.exists(dvar + root):
130 return []
131
132 ml = d.getVar("MLPREFIX")
133 if ml:
134 if not output_pattern.startswith(ml):
135 output_pattern = ml + output_pattern
136
137 newdeps = []
138 for dep in (extra_depends or "").split():
139 if dep.startswith(ml):
140 newdeps.append(dep)
141 else:
142 newdeps.append(ml + dep)
143 if newdeps:
144 extra_depends = " ".join(newdeps)
145
146
147 packages = d.getVar('PACKAGES').split()
148 split_packages = set()
149
150 if postinst:
151 postinst = '#!/bin/sh\n' + postinst + '\n'
152 if postrm:
153 postrm = '#!/bin/sh\n' + postrm + '\n'
154 if not recursive:
155 objs = os.listdir(dvar + root)
156 else:
157 objs = []
158 for walkroot, dirs, files in os.walk(dvar + root):
159 for file in files:
160 relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
161 if relpath:
162 objs.append(relpath)
163
164 if extra_depends == None:
165 extra_depends = d.getVar("PN")
166
167 if not summary:
168 summary = description
169
170 for o in sorted(objs):
171 import re, stat
172 if match_path:
173 m = re.match(file_regex, o)
174 else:
175 m = re.match(file_regex, os.path.basename(o))
176
177 if not m:
178 continue
179 f = os.path.join(dvar + root, o)
180 mode = os.lstat(f).st_mode
181 if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
182 continue
183 on = oe.package.legitimize_package_name(m.group(1))
184 pkg = output_pattern % on
185 split_packages.add(pkg)
186 if not pkg in packages:
187 if prepend:
188 packages = [pkg] + packages
189 else:
190 packages.append(pkg)
191 oldfiles = d.getVar('FILES:' + pkg)
192 newfile = os.path.join(root, o)
193 # These names will be passed through glob() so if the filename actually
194 # contains * or ? (rare, but possible) we need to handle that specially
195 newfile = newfile.replace('*', '[*]')
196 newfile = newfile.replace('?', '[?]')
197 if not oldfiles:
198 the_files = [newfile]
199 if aux_files_pattern:
200 if type(aux_files_pattern) is list:
201 for fp in aux_files_pattern:
202 the_files.append(fp % on)
203 else:
204 the_files.append(aux_files_pattern % on)
205 if aux_files_pattern_verbatim:
206 if type(aux_files_pattern_verbatim) is list:
207 for fp in aux_files_pattern_verbatim:
208 the_files.append(fp % m.group(1))
209 else:
210 the_files.append(aux_files_pattern_verbatim % m.group(1))
211 d.setVar('FILES:' + pkg, " ".join(the_files))
212 else:
213 d.setVar('FILES:' + pkg, oldfiles + " " + newfile)
214 if extra_depends != '':
215 d.appendVar('RDEPENDS:' + pkg, ' ' + extra_depends)
216 if not d.getVar('DESCRIPTION:' + pkg):
217 d.setVar('DESCRIPTION:' + pkg, description % on)
218 if not d.getVar('SUMMARY:' + pkg):
219 d.setVar('SUMMARY:' + pkg, summary % on)
220 if postinst:
221 d.setVar('pkg_postinst:' + pkg, postinst)
222 if postrm:
223 d.setVar('pkg_postrm:' + pkg, postrm)
224 if callable(hook):
225 hook(f, pkg, file_regex, output_pattern, m.group(1))
226
227 d.setVar('PACKAGES', ' '.join(packages))
228 return list(split_packages)
229
230PACKAGE_DEPENDS += "file-native"
231
232python () {
233 if d.getVar('PACKAGES') != '':
234 deps = ""
235 for dep in (d.getVar('PACKAGE_DEPENDS') or "").split():
236 deps += " %s:do_populate_sysroot" % dep
237 if bb.utils.contains('DISTRO_FEATURES', 'minidebuginfo', True, False, d):
238 deps += ' xz-native:do_populate_sysroot'
239 d.appendVarFlag('do_package', 'depends', deps)
240
241 # shlibs requires any DEPENDS to have already packaged for the *.list files
242 d.appendVarFlag('do_package', 'deptask', " do_packagedata")
243}
244
245
246PRSERV_ACTIVE = "${@bool(d.getVar("PRSERV_HOST"))}"
247PRSERV_ACTIVE[vardepvalue] = "${PRSERV_ACTIVE}"
248package_get_auto_pr[vardepsexclude] = "BB_TASKDEPDATA"
249package_get_auto_pr[vardeps] += "PRSERV_ACTIVE"
250python package_get_auto_pr() {
251 import oe.prservice
252
253 def get_do_package_hash(pn):
254 if d.getVar("BB_RUNTASK") != "do_package":
255 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
256 for dep in taskdepdata:
257 if taskdepdata[dep][1] == "do_package" and taskdepdata[dep][0] == pn:
258 return taskdepdata[dep][6]
259 return None
260
261 # Support per recipe PRSERV_HOST
262 pn = d.getVar('PN')
263 host = d.getVar("PRSERV_HOST_" + pn)
264 if not (host is None):
265 d.setVar("PRSERV_HOST", host)
266
267 pkgv = d.getVar("PKGV")
268
269 # PR Server not active, handle AUTOINC
270 if not d.getVar('PRSERV_HOST'):
271 d.setVar("PRSERV_PV_AUTOINC", "0")
272 return
273
274 auto_pr = None
275 pv = d.getVar("PV")
276 version = d.getVar("PRAUTOINX")
277 pkgarch = d.getVar("PACKAGE_ARCH")
278 checksum = get_do_package_hash(pn)
279
280 # If do_package isn't in the dependencies, we can't get the checksum...
281 if not checksum:
282 bb.warn('Task %s requested do_package unihash, but it was not available.' % d.getVar('BB_RUNTASK'))
283 #taskdepdata = d.getVar("BB_TASKDEPDATA", False)
284 #for dep in taskdepdata:
285 # bb.warn('%s:%s = %s' % (taskdepdata[dep][0], taskdepdata[dep][1], taskdepdata[dep][6]))
286 return
287
288 if d.getVar('PRSERV_LOCKDOWN'):
289 auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
290 if auto_pr is None:
291 bb.fatal("Can NOT get PRAUTO from lockdown exported file")
292 d.setVar('PRAUTO',str(auto_pr))
293 return
294
295 try:
296 conn = oe.prservice.prserv_make_conn(d)
297 if conn is not None:
298 if "AUTOINC" in pkgv:
299 srcpv = bb.fetch2.get_srcrev(d)
300 base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
301 value = conn.getPR(base_ver, pkgarch, srcpv)
302 d.setVar("PRSERV_PV_AUTOINC", str(value))
303
304 auto_pr = conn.getPR(version, pkgarch, checksum)
305 conn.close()
306 except Exception as e:
307 bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
308 if auto_pr is None:
309 bb.fatal("Can NOT get PRAUTO from remote PR service")
310 d.setVar('PRAUTO',str(auto_pr))
311}
312
313#
314# Package functions suitable for inclusion in PACKAGEFUNCS
315#
316
317python package_setup_pkgv() {
318 pkgv = d.getVar("PKGV")
319 # Expand SRCPV into PKGV if not present
320 srcpv = bb.fetch.get_pkgv_string(d)
321 if srcpv and "+" in pkgv:
322 d.appendVar("PKGV", srcpv)
323 pkgv = d.getVar("PKGV")
324
325 # Adjust pkgv as necessary...
326 if 'AUTOINC' in pkgv:
327 d.setVar("PKGV", pkgv.replace("AUTOINC", "${PRSERV_PV_AUTOINC}"))
328}
329
330
331python package_convert_pr_autoinc() {
332 # Change PRSERV_PV_AUTOINC and EXTENDPRAUTO usage to special values
333 d.setVar('PRSERV_PV_AUTOINC', '@PRSERV_PV_AUTOINC@')
334 d.setVar('EXTENDPRAUTO', '@EXTENDPRAUTO@')
335}
336
337LOCALEBASEPN ??= "${PN}"
338LOCALE_PATHS ?= "${datadir}/locale"
339
340python package_do_split_locales() {
341 oe.package.split_locales(d)
342}
343
344python perform_packagecopy () {
345 import subprocess
346 import shutil
347
348 dest = d.getVar('D')
349 dvar = d.getVar('PKGD')
350
351 # Start by package population by taking a copy of the installed
352 # files to operate on
353 # Preserve sparse files and hard links
354 cmd = 'tar --exclude=./sysroot-only -cf - -C %s -p -S . | tar -xf - -C %s' % (dest, dvar)
355 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
356
357 # replace RPATHs for the nativesdk binaries, to make them relocatable
358 if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
359 rpath_replace (dvar, d)
360}
361perform_packagecopy[cleandirs] = "${PKGD}"
362perform_packagecopy[dirs] = "${PKGD}"
363
364python populate_packages () {
365 oe.package.populate_packages(d)
366}
367populate_packages[dirs] = "${D}"
368
369python package_fixsymlinks () {
370 oe.package.process_fixsymlinks(pkgfiles, d)
371}
372
373python package_package_name_hook() {
374 """
375 A package_name_hook function can be used to rewrite the package names by
376 changing PKG. For an example, see debian.bbclass.
377 """
378 pass
379}
380
381EXPORT_FUNCTIONS package_name_hook
382
383
384PKGDESTWORK = "${WORKDIR}/pkgdata"
385
386PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS RPROVIDES RRECOMMENDS RSUGGESTS RREPLACES RCONFLICTS SECTION PKG ALLOW_EMPTY FILES CONFFILES FILES_INFO PACKAGE_ADD_METADATA pkg_postinst pkg_postrm pkg_preinst pkg_prerm"
387
388python emit_pkgdata() {
389 import oe.packagedata
390 oe.packagedata.emit_pkgdata(pkgfiles, d)
391}
392emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides ${PKGDESTWORK}/extended"
393
394ldconfig_postinst_fragment() {
395if [ x"$D" = "x" ]; then
396 if [ -x /sbin/ldconfig ]; then /sbin/ldconfig ; fi
397fi
398}
399
400RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps --define '__font_provides %{nil}'"
401
402python package_do_filedeps() {
403 oe.package.process_filedeps(pkgfiles, d)
404}
405
406SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
407SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
408
409python package_do_shlibs() {
410 oe.package.process_shlibs(pkgfiles, d)
411}
412
413python package_do_pkgconfig () {
414 oe.package.process_pkgconfig(pkgfiles, d)
415}
416
417python read_shlibdeps () {
418 pkglibdeps = oe.package.read_libdep_files(d)
419
420 packages = d.getVar('PACKAGES').split()
421 for pkg in packages:
422 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
423 for dep in sorted(pkglibdeps[pkg]):
424 # Add the dep if it's not already there, or if no comparison is set
425 if dep not in rdepends:
426 rdepends[dep] = []
427 for v in pkglibdeps[pkg][dep]:
428 if v not in rdepends[dep]:
429 rdepends[dep].append(v)
430 d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
431}
432
433python package_depchains() {
434 oe.package.process_depchains(pkgfiles, d)
435}
436
437# Since bitbake can't determine which variables are accessed during package
438# iteration, we need to list them here:
439PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm pkg_postinst_ontarget INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE SKIP_FILEDEPS PRIVATE_LIBS PACKAGE_ADD_METADATA"
440
441def gen_packagevar(d, pkgvars="PACKAGEVARS"):
442 ret = []
443 pkgs = (d.getVar("PACKAGES") or "").split()
444 vars = (d.getVar(pkgvars) or "").split()
445 for v in vars:
446 ret.append(v)
447 for p in pkgs:
448 for v in vars:
449 ret.append(v + ":" + p)
450 return " ".join(ret)
451
452
453# Functions for setting up PKGD
454PACKAGE_PREPROCESS_FUNCS ?= ""
455# Functions which split PKGD up into separate packages
456PACKAGESPLITFUNCS ?= " \
457 package_do_split_locales \
458 populate_packages"
459# Functions which process metadata based on split packages
460PACKAGEFUNCS += " \
461 package_fixsymlinks \
462 package_name_hook \
463 package_do_filedeps \
464 package_do_shlibs \
465 package_do_pkgconfig \
466 read_shlibdeps \
467 package_depchains \
468 emit_pkgdata"
469
470python do_package () {
471 # Change the following version to cause sstate to invalidate the package
472 # cache. This is useful if an item this class depends on changes in a
473 # way that the output of this class changes. rpmdeps is a good example
474 # as any change to rpmdeps requires this to be rerun.
475 # PACKAGE_BBCLASS_VERSION = "6"
476
477 # Init cachedpath
478 global cpath
479 cpath = oe.cachedpath.CachedPath()
480
481 ###########################################################################
482 # Sanity test the setup
483 ###########################################################################
484
485 packages = (d.getVar('PACKAGES') or "").split()
486 if len(packages) < 1:
487 bb.debug(1, "No packages to build, skipping do_package")
488 return
489
490 workdir = d.getVar('WORKDIR')
491 outdir = d.getVar('DEPLOY_DIR')
492 dest = d.getVar('D')
493 dvar = d.getVar('PKGD')
494 pn = d.getVar('PN')
495
496 if not workdir or not outdir or not dest or not dvar or not pn:
497 bb.fatal("WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package")
498 return
499
500 bb.build.exec_func("package_setup_pkgv", d)
501 bb.build.exec_func("package_convert_pr_autoinc", d)
502
503 # Check for conflict between renamed packages and existing ones
504 # for each package in PACKAGES, check if it will be renamed to an existing one
505 for p in packages:
506 rename = d.getVar('PKG:%s' % p)
507 if rename and rename in packages:
508 bb.fatal('package "%s" is renamed to "%s" using PKG:%s, but package name already exists' % (p, rename, p))
509
510 ###########################################################################
511 # Optimisations
512 ###########################################################################
513
514 # Continually expanding complex expressions is inefficient, particularly
515 # when we write to the datastore and invalidate the expansion cache. This
516 # code pre-expands some frequently used variables
517
518 def expandVar(x, d):
519 d.setVar(x, d.getVar(x))
520
521 for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
522 expandVar(x, d)
523
524 ###########################################################################
525 # Setup PKGD (from D)
526 ###########################################################################
527
528 bb.build.exec_func("package_prepare_pkgdata", d)
529 bb.build.exec_func("perform_packagecopy", d)
530 for f in (d.getVar('PACKAGE_PREPROCESS_FUNCS') or '').split():
531 bb.build.exec_func(f, d)
532 oe.package.process_split_and_strip_files(d)
533 oe.package.fixup_perms(d)
534
535 ###########################################################################
536 # Split up PKGD into PKGDEST
537 ###########################################################################
538
539 cpath = oe.cachedpath.CachedPath()
540
541 for f in (d.getVar('PACKAGESPLITFUNCS') or '').split():
542 bb.build.exec_func(f, d)
543
544 ###########################################################################
545 # Process PKGDEST
546 ###########################################################################
547
548 # Build global list of files in each split package
549 global pkgfiles
550 pkgfiles = {}
551 packages = d.getVar('PACKAGES').split()
552 pkgdest = d.getVar('PKGDEST')
553 for pkg in packages:
554 pkgfiles[pkg] = []
555 for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
556 for file in files:
557 pkgfiles[pkg].append(walkroot + os.sep + file)
558
559 for f in (d.getVar('PACKAGEFUNCS') or '').split():
560 bb.build.exec_func(f, d)
561
562 oe.qa.exit_if_errors(d)
563}
564
565do_package[dirs] = "${SHLIBSWORKDIR} ${D}"
566do_package[vardeps] += "${PACKAGE_PREPROCESS_FUNCS} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
567addtask package after do_install
568
569SSTATETASKS += "do_package"
570do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
571do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
572do_package_setscene[dirs] = "${STAGING_DIR}"
573
574python do_package_setscene () {
575 sstate_setscene(d)
576}
577addtask do_package_setscene
578
579# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
580# do_package_setscene and do_packagedata_setscene leading to races
581python do_packagedata () {
582 bb.build.exec_func("package_setup_pkgv", d)
583 bb.build.exec_func("package_get_auto_pr", d)
584
585 src = d.expand("${PKGDESTWORK}")
586 dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
587 oe.path.copyhardlinktree(src, dest)
588
589 bb.build.exec_func("packagedata_translate_pr_autoinc", d)
590}
591do_packagedata[cleandirs] += "${WORKDIR}/pkgdata-pdata-input"
592
593# Translate the EXTENDPRAUTO and AUTOINC to the final values
594packagedata_translate_pr_autoinc() {
595 find ${WORKDIR}/pkgdata-pdata-input -type f | xargs --no-run-if-empty \
596 sed -e 's,@PRSERV_PV_AUTOINC@,${PRSERV_PV_AUTOINC},g' \
597 -e 's,@EXTENDPRAUTO@,${EXTENDPRAUTO},g' -i
598}
599
600addtask packagedata before do_build after do_package
601
602SSTATETASKS += "do_packagedata"
603do_packagedata[sstate-inputdirs] = "${WORKDIR}/pkgdata-pdata-input"
604do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
605do_packagedata[stamp-extra-info] = "${MACHINE_ARCH}"
606
607python do_packagedata_setscene () {
608 sstate_setscene(d)
609}
610addtask do_packagedata_setscene
611
diff --git a/meta/classes-global/package_deb.bbclass b/meta/classes-global/package_deb.bbclass
deleted file mode 100644
index 1f10b15a00..0000000000
--- a/meta/classes-global/package_deb.bbclass
+++ /dev/null
@@ -1,333 +0,0 @@
1#
2# Copyright 2006-2008 OpenedHand Ltd.
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit package
8
9IMAGE_PKGTYPE ?= "deb"
10
11DPKG_BUILDCMD ??= "dpkg-deb"
12
13DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'))}"
14DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
15
16PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
17
18APTCONF_TARGET = "${WORKDIR}"
19
20APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
21
22def debian_arch_map(arch, tune):
23 tune_features = tune.split()
24 if arch == "allarch":
25 return "all"
26 if arch in ["i586", "i686"]:
27 return "i386"
28 if arch == "x86_64":
29 if "mx32" in tune_features:
30 return "x32"
31 return "amd64"
32 if arch.startswith("mips"):
33 endian = ["el", ""]["bigendian" in tune_features]
34 if "n64" in tune_features:
35 return "mips64" + endian
36 if "n32" in tune_features:
37 return "mipsn32" + endian
38 return "mips" + endian
39 if arch == "powerpc":
40 return arch + ["", "spe"]["spe" in tune_features]
41 if arch == "aarch64":
42 return "arm64"
43 if arch == "arm":
44 return arch + ["el", "hf"]["callconvention-hard" in tune_features]
45 return arch
46
47python do_package_deb () {
48 packages = d.getVar('PACKAGES')
49 if not packages:
50 bb.debug(1, "PACKAGES not defined, nothing to package")
51 return
52
53 tmpdir = d.getVar('TMPDIR')
54 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
55 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
56
57 oe.utils.multiprocess_launch(deb_write_pkg, packages.split(), d, extraargs=(d,))
58}
59do_package_deb[vardeps] += "deb_write_pkg"
60do_package_deb[vardepsexclude] = "BB_NUMBER_THREADS"
61
62def deb_write_pkg(pkg, d):
63 import re, copy
64 import textwrap
65 import subprocess
66 import collections
67 import codecs
68
69 outdir = d.getVar('PKGWRITEDIRDEB')
70 pkgdest = d.getVar('PKGDEST')
71
72 def cleanupcontrol(root):
73 for p in ['CONTROL', 'DEBIAN']:
74 p = os.path.join(root, p)
75 if os.path.exists(p):
76 bb.utils.prunedir(p)
77
78 localdata = bb.data.createCopy(d)
79 root = "%s/%s" % (pkgdest, pkg)
80
81 lf = bb.utils.lockfile(root + ".lock")
82 try:
83
84 localdata.setVar('ROOT', '')
85 localdata.setVar('ROOT_%s' % pkg, root)
86 pkgname = localdata.getVar('PKG:%s' % pkg)
87 if not pkgname:
88 pkgname = pkg
89 localdata.setVar('PKG', pkgname)
90
91 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
92
93 basedir = os.path.join(os.path.dirname(root))
94
95 pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH'))
96 bb.utils.mkdirhier(pkgoutdir)
97
98 os.chdir(root)
99 cleanupcontrol(root)
100 from glob import glob
101 g = glob('*')
102 if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
103 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
104 return
105
106 controldir = os.path.join(root, 'DEBIAN')
107 bb.utils.mkdirhier(controldir)
108 os.chmod(controldir, 0o755)
109
110 ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
111
112 fields = []
113 pe = d.getVar('PKGE')
114 if pe and int(pe) > 0:
115 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
116 else:
117 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
118 fields.append(["Description: %s\n", ['DESCRIPTION']])
119 fields.append(["Section: %s\n", ['SECTION']])
120 fields.append(["Priority: %s\n", ['PRIORITY']])
121 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
122 fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
123 fields.append(["OE: %s\n", ['PN']])
124 fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
125 if d.getVar('HOMEPAGE'):
126 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
127
128 # Package, Version, Maintainer, Description - mandatory
129 # Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
130
131
132 def pullData(l, d):
133 l2 = []
134 for i in l:
135 data = d.getVar(i)
136 if data is None:
137 raise KeyError(i)
138 if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH') == 'all':
139 data = 'all'
140 elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
141 # The params in deb package control don't allow character
142 # `_', so change the arch's `_' to `-'. Such as `x86_64'
143 # -->`x86-64'
144 data = data.replace('_', '-')
145 l2.append(data)
146 return l2
147
148 ctrlfile.write("Package: %s\n" % pkgname)
149 if d.getVar('PACKAGE_ARCH') == "all":
150 ctrlfile.write("Multi-Arch: foreign\n")
151 # check for required fields
152 for (c, fs) in fields:
153 # Special behavior for description...
154 if 'DESCRIPTION' in fs:
155 summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
156 ctrlfile.write('Description: %s\n' % summary)
157 description = localdata.getVar('DESCRIPTION') or "."
158 description = textwrap.dedent(description).strip()
159 if '\\n' in description:
160 # Manually indent
161 for t in description.split('\\n'):
162 ctrlfile.write(' %s\n' % (t.strip() or '.'))
163 else:
164 # Auto indent
165 ctrlfile.write('%s\n' % textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))
166
167 else:
168 ctrlfile.write(c % tuple(pullData(fs, localdata)))
169
170 # more fields
171
172 custom_fields_chunk = oe.packagedata.get_package_additional_metadata("deb", localdata)
173 if custom_fields_chunk:
174 ctrlfile.write(custom_fields_chunk)
175 ctrlfile.write("\n")
176
177 oe.packagedata.mapping_rename_hook(localdata)
178
179 def debian_cmp_remap(var):
180 # dpkg does not allow for '(', ')' or ':' in a dependency name
181 # Replace any instances of them with '__'
182 #
183 # In debian '>' and '<' do not mean what it appears they mean
184 # '<' = less or equal
185 # '>' = greater or equal
186 # adjust these to the '<<' and '>>' equivalents
187 # Also, "=" specifiers only work if they have the PR in, so 1.2.3 != 1.2.3-r0
188 # so to avoid issues, map this to ">= 1.2.3 << 1.2.3.0"
189 for dep in list(var.keys()):
190 if '(' in dep or '/' in dep:
191 newdep = re.sub(r'[(:)/]', '__', dep)
192 if newdep.startswith("__"):
193 newdep = "A" + newdep
194 if newdep != dep:
195 var[newdep] = var[dep]
196 del var[dep]
197 for dep in var:
198 for i, v in enumerate(var[dep]):
199 if (v or "").startswith("< "):
200 var[dep][i] = var[dep][i].replace("< ", "<< ")
201 elif (v or "").startswith("> "):
202 var[dep][i] = var[dep][i].replace("> ", ">> ")
203 elif (v or "").startswith("= ") and "-r" not in v:
204 ver = var[dep][i].replace("= ", "")
205 var[dep][i] = var[dep][i].replace("= ", ">= ")
206 var[dep].append("<< " + ver + ".0")
207
208 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
209 debian_cmp_remap(rdepends)
210 for dep in list(rdepends.keys()):
211 if dep == pkg:
212 del rdepends[dep]
213 continue
214 if '*' in dep:
215 del rdepends[dep]
216 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
217 debian_cmp_remap(rrecommends)
218 for dep in list(rrecommends.keys()):
219 if '*' in dep:
220 del rrecommends[dep]
221 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
222 debian_cmp_remap(rsuggests)
223 # Deliberately drop version information here, not wanted/supported by deb
224 rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
225 # Remove file paths if any from rprovides, debian does not support custom providers
226 for key in list(rprovides.keys()):
227 if key.startswith('/'):
228 del rprovides[key]
229 rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
230 debian_cmp_remap(rprovides)
231 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
232 debian_cmp_remap(rreplaces)
233 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
234 debian_cmp_remap(rconflicts)
235 if rdepends:
236 ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
237 if rsuggests:
238 ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
239 if rrecommends:
240 ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
241 if rprovides:
242 ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
243 if rreplaces:
244 ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
245 if rconflicts:
246 ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
247 ctrlfile.close()
248
249 for script in ["preinst", "postinst", "prerm", "postrm"]:
250 scriptvar = localdata.getVar('pkg_%s' % script)
251 if not scriptvar:
252 continue
253 scriptvar = scriptvar.strip()
254 scriptfile = open(os.path.join(controldir, script), 'w')
255
256 if scriptvar.startswith("#!"):
257 pos = scriptvar.find("\n") + 1
258 scriptfile.write(scriptvar[:pos])
259 else:
260 pos = 0
261 scriptfile.write("#!/bin/sh\n")
262
263 # Prevent the prerm/postrm scripts from being run during an upgrade
264 if script in ('prerm', 'postrm'):
265 scriptfile.write('[ "$1" != "upgrade" ] || exit 0\n')
266
267 scriptfile.write(scriptvar[pos:])
268 scriptfile.write('\n')
269 scriptfile.close()
270 os.chmod(os.path.join(controldir, script), 0o755)
271
272 conffiles_str = ' '.join(oe.package.get_conffiles(pkg, d))
273 if conffiles_str:
274 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
275 for f in conffiles_str.split():
276 if os.path.exists(oe.path.join(root, f)):
277 conffiles.write('%s\n' % f)
278 conffiles.close()
279
280 os.chdir(basedir)
281 subprocess.check_output("PATH=\"%s\" %s -b %s %s" % (localdata.getVar("PATH"), localdata.getVar("DPKG_BUILDCMD"),
282 root, pkgoutdir),
283 stderr=subprocess.STDOUT,
284 shell=True)
285
286 finally:
287 cleanupcontrol(root)
288 bb.utils.unlockfile(lf)
289
290# Otherwise allarch packages may change depending on override configuration
291deb_write_pkg[vardepsexclude] = "OVERRIDES"
292
293# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
294DEBEXTRAVARS = "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE PACKAGE_ADD_METADATA_DEB"
295do_package_write_deb[vardeps] += "${@gen_packagevar(d, 'DEBEXTRAVARS')}"
296
297SSTATETASKS += "do_package_write_deb"
298do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
299do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
300
301python do_package_write_deb_setscene () {
302 tmpdir = d.getVar('TMPDIR')
303
304 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
305 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
306
307 sstate_setscene(d)
308}
309addtask do_package_write_deb_setscene
310
311python () {
312 if d.getVar('PACKAGES') != '':
313 deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
314 d.appendVarFlag('do_package_write_deb', 'depends', deps)
315 d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
316
317 # Needed to ensure PKG_xxx renaming of dependency packages works
318 d.setVarFlag('do_package_write_deb', 'deptask', "do_packagedata")
319 d.setVarFlag('do_package_write_deb', 'rdeptask', "do_packagedata")
320}
321
322python do_package_write_deb () {
323 bb.build.exec_func("read_subpackage_metadata", d)
324 bb.build.exec_func("do_package_deb", d)
325}
326do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
327do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
328do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
329addtask package_write_deb after do_packagedata do_package do_deploy_source_date_epoch before do_build
330do_build[rdeptask] += "do_package_write_deb"
331
332PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
333PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
diff --git a/meta/classes-global/package_ipk.bbclass b/meta/classes-global/package_ipk.bbclass
deleted file mode 100644
index 3e72c4c494..0000000000
--- a/meta/classes-global/package_ipk.bbclass
+++ /dev/null
@@ -1,300 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit package
8
9IMAGE_PKGTYPE ?= "ipk"
10
11IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
12IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
13IPKGCONF_SDK_TARGET = "${WORKDIR}/opkg-sdk-target.conf"
14
15PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
16
17# Program to be used to build opkg packages
18OPKGBUILDCMD ??= 'opkg-build -Z zstd -a "${ZSTD_DEFAULTS}"'
19
20OPKG_ARGS += "--force-postinstall --prefer-arch-to-version"
21OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
22OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "").strip() != ""]}"
23
24OPKGLIBDIR ??= "${localstatedir}/lib"
25
26python do_package_ipk () {
27 workdir = d.getVar('WORKDIR')
28 outdir = d.getVar('PKGWRITEDIRIPK')
29 tmpdir = d.getVar('TMPDIR')
30 pkgdest = d.getVar('PKGDEST')
31 if not workdir or not outdir or not tmpdir:
32 bb.error("Variables incorrectly set, unable to package")
33 return
34
35 packages = d.getVar('PACKAGES')
36 if not packages or packages == '':
37 bb.debug(1, "No packages; nothing to do")
38 return
39
40 # We're about to add new packages so the index needs to be checked
41 # so remove the appropriate stamp file.
42 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
43 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
44
45 oe.utils.multiprocess_launch(ipk_write_pkg, packages.split(), d, extraargs=(d,))
46}
47do_package_ipk[vardeps] += "ipk_write_pkg"
48do_package_ipk[vardepsexclude] = "BB_NUMBER_THREADS"
49
50# FILE isn't included by default but we want the recipe to change if basename() changes
51IPK_RECIPE_FILE = "${@os.path.basename(d.getVar('FILE'))}"
52IPK_RECIPE_FILE[vardepvalue] = "${IPK_RECIPE_FILE}"
53
54def ipk_write_pkg(pkg, d):
55 import re, copy
56 import subprocess
57 import textwrap
58 import collections
59 import glob
60
61 def cleanupcontrol(root):
62 for p in ['CONTROL', 'DEBIAN']:
63 p = os.path.join(root, p)
64 if os.path.exists(p):
65 bb.utils.prunedir(p)
66
67 outdir = d.getVar('PKGWRITEDIRIPK')
68 pkgdest = d.getVar('PKGDEST')
69 recipesource = d.getVar('IPK_RECIPE_FILE')
70
71 localdata = bb.data.createCopy(d)
72 root = "%s/%s" % (pkgdest, pkg)
73
74 lf = bb.utils.lockfile(root + ".lock")
75 try:
76 localdata.setVar('ROOT', '')
77 localdata.setVar('ROOT_%s' % pkg, root)
78 pkgname = localdata.getVar('PKG:%s' % pkg)
79 if not pkgname:
80 pkgname = pkg
81 localdata.setVar('PKG', pkgname)
82
83 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
84
85 basedir = os.path.join(os.path.dirname(root))
86 arch = localdata.getVar('PACKAGE_ARCH')
87
88 if localdata.getVar('IPK_HIERARCHICAL_FEED', False) == "1":
89 # Spread packages across subdirectories so each isn't too crowded
90 if pkgname.startswith('lib'):
91 pkg_prefix = 'lib' + pkgname[3]
92 else:
93 pkg_prefix = pkgname[0]
94
95 # Keep -dbg, -dev, -doc, -staticdev, -locale and -locale-* packages
96 # together. These package suffixes are taken from the definitions of
97 # PACKAGES and PACKAGES_DYNAMIC in meta/conf/bitbake.conf
98 if pkgname[-4:] in ('-dbg', '-dev', '-doc'):
99 pkg_subdir = pkgname[:-4]
100 elif pkgname.endswith('-staticdev'):
101 pkg_subdir = pkgname[:-10]
102 elif pkgname.endswith('-locale'):
103 pkg_subdir = pkgname[:-7]
104 elif '-locale-' in pkgname:
105 pkg_subdir = pkgname[:pkgname.find('-locale-')]
106 else:
107 pkg_subdir = pkgname
108
109 pkgoutdir = "%s/%s/%s/%s" % (outdir, arch, pkg_prefix, pkg_subdir)
110 else:
111 pkgoutdir = "%s/%s" % (outdir, arch)
112
113 bb.utils.mkdirhier(pkgoutdir)
114 os.chdir(root)
115 cleanupcontrol(root)
116 g = glob.glob('*')
117 if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
118 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
119 return
120
121 controldir = os.path.join(root, 'CONTROL')
122 bb.utils.mkdirhier(controldir)
123 ctrlfile = open(os.path.join(controldir, 'control'), 'w')
124
125 fields = []
126 pe = d.getVar('PKGE')
127 if pe and int(pe) > 0:
128 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
129 else:
130 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
131 fields.append(["Description: %s\n", ['DESCRIPTION']])
132 fields.append(["Section: %s\n", ['SECTION']])
133 fields.append(["Priority: %s\n", ['PRIORITY']])
134 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
135 fields.append(["License: %s\n", ['LICENSE']])
136 fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
137 fields.append(["OE: %s\n", ['PN']])
138 if d.getVar('HOMEPAGE'):
139 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
140
141 def pullData(l, d):
142 l2 = []
143 for i in l:
144 l2.append(d.getVar(i))
145 return l2
146
147 ctrlfile.write("Package: %s\n" % pkgname)
148 # check for required fields
149 for (c, fs) in fields:
150 for f in fs:
151 if localdata.getVar(f, False) is None:
152 raise KeyError(f)
153 # Special behavior for description...
154 if 'DESCRIPTION' in fs:
155 summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
156 ctrlfile.write('Description: %s\n' % summary)
157 description = localdata.getVar('DESCRIPTION') or "."
158 description = textwrap.dedent(description).strip()
159 if '\\n' in description:
160 # Manually indent: multiline description includes a leading space
161 for t in description.split('\\n'):
162 ctrlfile.write(' %s\n' % (t.strip() or ' .'))
163 else:
164 # Auto indent
165 ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
166 else:
167 ctrlfile.write(c % tuple(pullData(fs, localdata)))
168
169 custom_fields_chunk = oe.packagedata.get_package_additional_metadata("ipk", localdata)
170 if custom_fields_chunk is not None:
171 ctrlfile.write(custom_fields_chunk)
172 ctrlfile.write("\n")
173
174 oe.packagedata.mapping_rename_hook(localdata)
175
176 def debian_cmp_remap(var):
177 # In debian '>' and '<' do not mean what it appears they mean
178 # '<' = less or equal
179 # '>' = greater or equal
180 # adjust these to the '<<' and '>>' equivalents
181 # Also, "=" specifiers only work if they have the PR in, so 1.2.3 != 1.2.3-r0
182 # so to avoid issues, map this to ">= 1.2.3 << 1.2.3.0"
183 for dep in var:
184 for i, v in enumerate(var[dep]):
185 if (v or "").startswith("< "):
186 var[dep][i] = var[dep][i].replace("< ", "<< ")
187 elif (v or "").startswith("> "):
188 var[dep][i] = var[dep][i].replace("> ", ">> ")
189 elif (v or "").startswith("= ") and "-r" not in v:
190 ver = var[dep][i].replace("= ", "")
191 var[dep][i] = var[dep][i].replace("= ", ">= ")
192 var[dep].append("<< " + ver + ".0")
193
194 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
195 debian_cmp_remap(rdepends)
196 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
197 debian_cmp_remap(rrecommends)
198 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
199 debian_cmp_remap(rsuggests)
200 # Deliberately drop version information here, not wanted/supported by ipk
201 rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
202 rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
203 debian_cmp_remap(rprovides)
204 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
205 debian_cmp_remap(rreplaces)
206 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
207 debian_cmp_remap(rconflicts)
208
209 if rdepends:
210 ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
211 if rsuggests:
212 ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
213 if rrecommends:
214 ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
215 if rprovides:
216 ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
217 if rreplaces:
218 ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
219 if rconflicts:
220 ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
221 ctrlfile.write("Source: %s\n" % recipesource)
222 ctrlfile.close()
223
224 for script in ["preinst", "postinst", "prerm", "postrm"]:
225 scriptvar = localdata.getVar('pkg_%s' % script)
226 if not scriptvar:
227 continue
228 scriptfile = open(os.path.join(controldir, script), 'w')
229 scriptfile.write(scriptvar)
230 scriptfile.close()
231 os.chmod(os.path.join(controldir, script), 0o755)
232
233 conffiles_str = ' '.join(oe.package.get_conffiles(pkg, d))
234 if conffiles_str:
235 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
236 for f in conffiles_str.split():
237 if os.path.exists(oe.path.join(root, f)):
238 conffiles.write('%s\n' % f)
239 conffiles.close()
240
241 os.chdir(basedir)
242 subprocess.check_output("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH"),
243 d.getVar("OPKGBUILDCMD"), pkg, pkgoutdir),
244 stderr=subprocess.STDOUT,
245 shell=True)
246
247 if d.getVar('IPK_SIGN_PACKAGES') == '1':
248 ipkver = "%s-%s" % (localdata.getVar('PKGV'), localdata.getVar('PKGR'))
249 ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, localdata.getVar('PACKAGE_ARCH'))
250 sign_ipk(d, ipk_to_sign)
251
252 finally:
253 cleanupcontrol(root)
254 bb.utils.unlockfile(lf)
255
256# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
257IPKEXTRAVARS = "PRIORITY MAINTAINER PACKAGE_ARCH HOMEPAGE PACKAGE_ADD_METADATA_IPK"
258ipk_write_pkg[vardeps] += "${@gen_packagevar(d, 'IPKEXTRAVARS')}"
259
260# Otherwise allarch packages may change depending on override configuration
261ipk_write_pkg[vardepsexclude] = "OVERRIDES"
262
263
264SSTATETASKS += "do_package_write_ipk"
265do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
266do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
267
268python do_package_write_ipk_setscene () {
269 tmpdir = d.getVar('TMPDIR')
270
271 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
272 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
273
274 sstate_setscene(d)
275}
276addtask do_package_write_ipk_setscene
277
278python () {
279 if d.getVar('PACKAGES') != '':
280 deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot zstd-native:do_populate_sysroot'
281 d.appendVarFlag('do_package_write_ipk', 'depends', deps)
282 d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
283
284 # Needed to ensure PKG_xxx renaming of dependency packages works
285 d.setVarFlag('do_package_write_ipk', 'deptask', "do_packagedata")
286 d.setVarFlag('do_package_write_ipk', 'rdeptask', "do_packagedata")
287}
288
289python do_package_write_ipk () {
290 bb.build.exec_func("read_subpackage_metadata", d)
291 bb.build.exec_func("do_package_ipk", d)
292}
293do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
294do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
295do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
296addtask package_write_ipk after do_packagedata do_package do_deploy_source_date_epoch before do_build
297do_build[rdeptask] += "do_package_write_ipk"
298
299PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
300PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
diff --git a/meta/classes-global/package_pkgdata.bbclass b/meta/classes-global/package_pkgdata.bbclass
deleted file mode 100644
index f653bd9240..0000000000
--- a/meta/classes-global/package_pkgdata.bbclass
+++ /dev/null
@@ -1,173 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7WORKDIR_PKGDATA = "${WORKDIR}/pkgdata-sysroot"
8
9def package_populate_pkgdata_dir(pkgdatadir, d):
10 import glob
11
12 postinsts = []
13 seendirs = set()
14 stagingdir = d.getVar("PKGDATA_DIR")
15 pkgarchs = ['${MACHINE_ARCH}']
16 pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
17 pkgarchs.append('allarch')
18
19 bb.utils.mkdirhier(pkgdatadir)
20 for pkgarch in pkgarchs:
21 for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.packagedata" % pkgarch)):
22 with open(manifest, "r") as f:
23 for l in f:
24 l = l.strip()
25 dest = l.replace(stagingdir, "")
26 if l.endswith("/"):
27 staging_copydir(l, pkgdatadir, dest, seendirs)
28 continue
29 try:
30 staging_copyfile(l, pkgdatadir, dest, postinsts, seendirs)
31 except FileExistsError:
32 continue
33
34python package_prepare_pkgdata() {
35 import copy
36 import glob
37
38 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
39 mytaskname = d.getVar("BB_RUNTASK")
40 if mytaskname.endswith("_setscene"):
41 mytaskname = mytaskname.replace("_setscene", "")
42 workdir = d.getVar("WORKDIR")
43 pn = d.getVar("PN")
44 stagingdir = d.getVar("PKGDATA_DIR")
45 pkgdatadir = d.getVar("WORKDIR_PKGDATA")
46
47 # Detect bitbake -b usage
48 nodeps = d.getVar("BB_LIMITEDDEPS") or False
49 if nodeps:
50 staging_package_populate_pkgdata_dir(pkgdatadir, d)
51 return
52
53 start = None
54 configuredeps = []
55 for dep in taskdepdata:
56 data = taskdepdata[dep]
57 if data[1] == mytaskname and data[0] == pn:
58 start = dep
59 break
60 if start is None:
61 bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
62
63 # We need to figure out which sysroot files we need to expose to this task.
64 # This needs to match what would get restored from sstate, which is controlled
65 # ultimately by calls from bitbake to setscene_depvalid().
66 # That function expects a setscene dependency tree. We build a dependency tree
67 # condensed to inter-sstate task dependencies, similar to that used by setscene
68 # tasks. We can then call into setscene_depvalid() and decide
69 # which dependencies we can "see" and should expose in the recipe specific sysroot.
70 setscenedeps = copy.deepcopy(taskdepdata)
71
72 start = set([start])
73
74 sstatetasks = d.getVar("SSTATETASKS").split()
75 # Add recipe specific tasks referenced by setscene_depvalid()
76 sstatetasks.append("do_stash_locale")
77
78 # If start is an sstate task (like do_package) we need to add in its direct dependencies
79 # else the code below won't recurse into them.
80 for dep in set(start):
81 for dep2 in setscenedeps[dep][3]:
82 start.add(dep2)
83 start.remove(dep)
84
85 # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
86 for dep in taskdepdata:
87 data = setscenedeps[dep]
88 if data[1] not in sstatetasks:
89 for dep2 in setscenedeps:
90 data2 = setscenedeps[dep2]
91 if dep in data2[3]:
92 data2[3].update(setscenedeps[dep][3])
93 data2[3].remove(dep)
94 if dep in start:
95 start.update(setscenedeps[dep][3])
96 start.remove(dep)
97 del setscenedeps[dep]
98
99 # Remove circular references
100 for dep in setscenedeps:
101 if dep in setscenedeps[dep][3]:
102 setscenedeps[dep][3].remove(dep)
103
104 # Direct dependencies should be present and can be depended upon
105 for dep in set(start):
106 if setscenedeps[dep][1] == "do_packagedata":
107 if dep not in configuredeps:
108 configuredeps.append(dep)
109
110 msgbuf = []
111 # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
112 # for ones that would be restored from sstate.
113 done = list(start)
114 next = list(start)
115 while next:
116 new = []
117 for dep in next:
118 data = setscenedeps[dep]
119 for datadep in data[3]:
120 if datadep in done:
121 continue
122 taskdeps = {}
123 taskdeps[dep] = setscenedeps[dep][:2]
124 taskdeps[datadep] = setscenedeps[datadep][:2]
125 retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
126 done.append(datadep)
127 new.append(datadep)
128 if retval:
129 msgbuf.append("Skipping setscene dependency %s" % datadep)
130 continue
131 if datadep not in configuredeps and setscenedeps[datadep][1] == "do_packagedata":
132 configuredeps.append(datadep)
133 msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
134 else:
135 msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
136 next = new
137
138 # This logging is too verbose for day to day use sadly
139 #bb.debug(2, "\n".join(msgbuf))
140
141 seendirs = set()
142 postinsts = []
143 multilibs = {}
144 manifests = {}
145
146 msg_adding = []
147
148 for dep in configuredeps:
149 c = setscenedeps[dep][0]
150 msg_adding.append(c)
151
152 manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "packagedata", d, multilibs)
153 destsysroot = pkgdatadir
154
155 if manifest:
156 targetdir = destsysroot
157 with open(manifest, "r") as f:
158 manifests[dep] = manifest
159 for l in f:
160 l = l.strip()
161 dest = targetdir + l.replace(stagingdir, "")
162 if l.endswith("/"):
163 staging_copydir(l, targetdir, dest, seendirs)
164 continue
165 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
166
167 bb.note("Installed into pkgdata-sysroot: %s" % str(msg_adding))
168
169}
170package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
171package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA SSTATETASKS"
172
173
diff --git a/meta/classes-global/package_rpm.bbclass b/meta/classes-global/package_rpm.bbclass
deleted file mode 100644
index f383ed140e..0000000000
--- a/meta/classes-global/package_rpm.bbclass
+++ /dev/null
@@ -1,776 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit package
8
9IMAGE_PKGTYPE ?= "rpm"
10
11RPM = "rpm"
12RPMBUILD = "rpmbuild"
13RPMBUILD_COMPMODE ?= "${@'w%dT%d.zstdio' % (int(d.getVar('ZSTD_COMPRESSION_LEVEL')), int(d.getVar('ZSTD_THREADS')))}"
14
15PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
16
17# Maintaining the perfile dependencies has significant overhead when writing the
18# packages. When set, this value merges them for efficiency.
19MERGEPERFILEDEPS = "1"
20
21# Filter dependencies based on a provided function.
22def filter_deps(var, f):
23 import collections
24
25 depends_dict = bb.utils.explode_dep_versions2(var)
26 newdeps_dict = collections.OrderedDict()
27 for dep in depends_dict:
28 if f(dep):
29 newdeps_dict[dep] = depends_dict[dep]
30 return bb.utils.join_deps(newdeps_dict, commasep=False)
31
32# Filter out absolute paths (typically /bin/sh and /usr/bin/env) and any perl
33# dependencies for nativesdk packages.
34def filter_nativesdk_deps(srcname, var):
35 if var and srcname.startswith("nativesdk-"):
36 var = filter_deps(var, lambda dep: not dep.startswith('/') and dep != 'perl' and not dep.startswith('perl('))
37 return var
38
39# Construct per file dependencies file
40def write_rpm_perfiledata(srcname, d):
41 import oe.package
42 workdir = d.getVar('WORKDIR')
43 packages = d.getVar('PACKAGES')
44 pkgd = d.getVar('PKGD')
45
46 def dump_filerdeps(varname, outfile, d):
47 outfile.write("#!/usr/bin/env python3\n\n")
48 outfile.write("# Dependency table\n")
49 outfile.write('deps = {\n')
50 for pkg in packages.split():
51 dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
52 dependsflist = (d.getVar(dependsflist_key) or "")
53 for dfile in dependsflist.split():
54 key = "FILE" + varname + ":" + dfile + ":" + pkg
55 deps = filter_nativesdk_deps(srcname, d.getVar(key) or "")
56 depends_dict = bb.utils.explode_dep_versions(deps)
57 file = oe.package.file_reverse_translate(dfile)
58 outfile.write('"' + pkgd + file + '" : "')
59 for dep in depends_dict:
60 ver = depends_dict[dep]
61 if dep and ver:
62 ver = ver.replace("(", "")
63 ver = ver.replace(")", "")
64 outfile.write(dep + " " + ver + " ")
65 else:
66 outfile.write(dep + " ")
67 outfile.write('",\n')
68 outfile.write('}\n\n')
69 outfile.write("import sys\n")
70 outfile.write("while 1:\n")
71 outfile.write("\tline = sys.stdin.readline().strip()\n")
72 outfile.write("\tif not line:\n")
73 outfile.write("\t\tsys.exit(0)\n")
74 outfile.write("\tif line in deps:\n")
75 outfile.write("\t\tprint(deps[line] + '\\n')\n")
76
77 # OE-core dependencies a.k.a. RPM requires
78 outdepends = workdir + "/" + srcname + ".requires"
79
80 dependsfile = open(outdepends, 'w')
81
82 dump_filerdeps('RDEPENDS', dependsfile, d)
83
84 dependsfile.close()
85 os.chmod(outdepends, 0o755)
86
87 # OE-core / RPM Provides
88 outprovides = workdir + "/" + srcname + ".provides"
89
90 providesfile = open(outprovides, 'w')
91
92 dump_filerdeps('RPROVIDES', providesfile, d)
93
94 providesfile.close()
95 os.chmod(outprovides, 0o755)
96
97 return (outdepends, outprovides)
98
99
100python write_specfile () {
101 import oe.packagedata
102 import os,pwd,grp,stat
103
104 # append information for logs and patches to %prep
105 def add_prep(d, spec_files_bottom):
106 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
107 spec_files_bottom.append('%%prep')
108 spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
109 spec_files_bottom.append('')
110
111 # append the name of tarball to key word 'SOURCE' in xxx.spec.
112 def tail_source(d):
113 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
114 ar_outdir = d.getVar('ARCHIVER_OUTDIR')
115 if not os.path.exists(ar_outdir):
116 return
117 source_list = os.listdir(ar_outdir)
118 source_number = 0
119 for source in source_list:
120 # do_deploy_archives may have already run (from sstate) meaning a .src.rpm may already
121 # exist in ARCHIVER_OUTDIR so skip if present.
122 if source.endswith(".src.rpm"):
123 continue
124 # The rpmbuild doesn't need the root permission, but it needs
125 # to know the file's user and group name, the only user and
126 # group in fakeroot is "root" when working in fakeroot.
127 f = os.path.join(ar_outdir, source)
128 os.chown(f, 0, 0)
129 spec_preamble_top.append('Source%s: %s' % (source_number, source))
130 source_number += 1
131
132 # In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
133 # This format is similar to OE, however there are restrictions on the
134 # characters that can be in a field. In the Version field, "-"
135 # characters are not allowed. "-" is allowed in the Release field.
136 #
137 # We translate the "-" in the version to a "+", by loading the PKGV
138 # from the dependent recipe, replacing the - with a +, and then using
139 # that value to do a replace inside of this recipe's dependencies.
140 # This preserves the "-" separator between the version and release, as
141 # well as any "-" characters inside of the release field.
142 #
143 # All of this has to happen BEFORE the mapping_rename_hook as
144 # after renaming we cannot look up the dependencies in the packagedata
145 # store.
146 def translate_vers(varname, d):
147 depends = d.getVar(varname)
148 if depends:
149 depends_dict = bb.utils.explode_dep_versions2(depends)
150 newdeps_dict = {}
151 for dep in depends_dict:
152 verlist = []
153 for ver in depends_dict[dep]:
154 if '-' in ver:
155 subd = oe.packagedata.read_subpkgdata_dict(dep, d)
156 if 'PKGV' in subd:
157 pv = subd['PV']
158 pkgv = subd['PKGV']
159 reppv = pkgv.replace('-', '+')
160 if ver.startswith(pv):
161 ver = ver.replace(pv, reppv)
162 ver = ver.replace(pkgv, reppv)
163 if 'PKGR' in subd:
164 # Make sure PKGR rather than PR in ver
165 pr = '-' + subd['PR']
166 pkgr = '-' + subd['PKGR']
167 if pkgr not in ver:
168 ver = ver.replace(pr, pkgr)
169 verlist.append(ver)
170 else:
171 verlist.append(ver)
172 newdeps_dict[dep] = verlist
173 depends = bb.utils.join_deps(newdeps_dict)
174 d.setVar(varname, depends.strip())
175
176 # We need to change the style the dependency from BB to RPM
177 # This needs to happen AFTER the mapping_rename_hook
178 def print_deps(variable, tag, array, d):
179 depends = variable
180 if depends:
181 depends_dict = bb.utils.explode_dep_versions2(depends)
182 for dep in depends_dict:
183 for ver in depends_dict[dep]:
184 ver = ver.replace('(', '')
185 ver = ver.replace(')', '')
186 array.append("%s: %s %s" % (tag, dep, ver))
187 if not len(depends_dict[dep]):
188 array.append("%s: %s" % (tag, dep))
189
190 def walk_files(walkpath, target, conffiles):
191 # We can race against the ipk/deb backends which create CONTROL or DEBIAN directories
192 # when packaging. We just ignore these files which are created in
193 # packages-split/ and not package/
194 # We have the odd situation where the CONTROL/DEBIAN directory can be removed in the middle of
195 # of the walk, the isdir() test would then fail and the walk code would assume its a file
196 # hence we check for the names in files too.
197 for rootpath, dirs, files in os.walk(walkpath):
198 def get_attr(path):
199 stat_f = os.stat(rootpath + "/" + path, follow_symlinks=False)
200 mode = stat.S_IMODE(stat_f.st_mode)
201 try:
202 owner = pwd.getpwuid(stat_f.st_uid).pw_name
203 except Exception as e:
204 filename = d.getVar('RECIPE_SYSROOT') + '/etc/passwd'
205 if os.path.exists(filename):
206 bb.error("Content of /etc/passwd in sysroot:\n{}".format(
207 open(filename).read()))
208 else:
209 bb.error("File {} doesn't exist in sysroot!".format(filename))
210 raise e
211 try:
212 group = grp.getgrgid(stat_f.st_gid).gr_name
213 except Exception as e:
214 filename = d.getVar("RECIPE_SYSROOT") +"/etc/group"
215 if os.path.exists(filename):
216 bb.error("Content of /etc/group in sysroot:\n{}".format(
217 open(filename).read()))
218 else:
219 bb.error("File {} doesn't exists in sysroot!".format(filename))
220 raise e
221 return "%attr({:o},{},{}) ".format(mode, owner, group)
222
223 def escape_chars(p):
224 return p.replace("%", "%%").replace("\\", "\\\\").replace('"', '\\"')
225
226 path = rootpath.replace(walkpath, "")
227 if path.endswith("DEBIAN") or path.endswith("CONTROL"):
228 continue
229
230 # Treat all symlinks to directories as normal files.
231 # os.walk() lists them as directories.
232 def move_to_files(dir):
233 if os.path.islink(os.path.join(rootpath, dir)):
234 files.append(dir)
235 return True
236 else:
237 return False
238 dirs[:] = [dir for dir in dirs if not move_to_files(dir)]
239
240 for dir in dirs:
241 if dir == "CONTROL" or dir == "DEBIAN":
242 continue
243 p = path + '/' + dir
244 # All packages own the directories their files are in...
245 target.append(get_attr(dir) + '%dir "' + escape_chars(p) + '"')
246
247 for file in files:
248 if file == "CONTROL" or file == "DEBIAN":
249 continue
250 attr = get_attr(file)
251 p = path + '/' + file
252 if conffiles.count(p):
253 target.append(attr + '%config "' + escape_chars(p) + '"')
254 else:
255 target.append(attr + '"' + escape_chars(p) + '"')
256
257 # Prevent the prerm/postrm scripts from being run during an upgrade
258 def wrap_uninstall(scriptvar):
259 scr = scriptvar.strip()
260 if scr.startswith("#!"):
261 pos = scr.find("\n") + 1
262 else:
263 pos = 0
264 scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi'
265 return scr
266
267 def get_perfile(varname, pkg, d):
268 deps = []
269 dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
270 dependsflist = (d.getVar(dependsflist_key) or "")
271 for dfile in dependsflist.split():
272 key = "FILE" + varname + ":" + dfile + ":" + pkg
273 depends = d.getVar(key)
274 if depends:
275 deps.append(depends)
276 return " ".join(deps)
277
278 def append_description(spec_preamble, text):
279 """
280 Add the description to the spec file.
281 """
282 import textwrap
283 dedent_text = textwrap.dedent(text).strip()
284 # Bitbake saves "\n" as "\\n"
285 if '\\n' in dedent_text:
286 for t in dedent_text.split('\\n'):
287 spec_preamble.append(t.strip())
288 else:
289 spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
290
291 packages = d.getVar('PACKAGES')
292 if not packages or packages == '':
293 bb.debug(1, "No packages; nothing to do")
294 return
295
296 pkgdest = d.getVar('PKGDEST')
297 if not pkgdest:
298 bb.fatal("No PKGDEST")
299
300 outspecfile = d.getVar('OUTSPECFILE')
301 if not outspecfile:
302 bb.fatal("No OUTSPECFILE")
303
304 # Construct the SPEC file...
305 srcname = d.getVar('PN')
306 localdata = bb.data.createCopy(d)
307 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + srcname)
308 srcsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
309 srcversion = localdata.getVar('PKGV').replace('-', '+')
310 srcrelease = localdata.getVar('PKGR')
311 srcepoch = (localdata.getVar('PKGE') or "")
312 srclicense = localdata.getVar('LICENSE')
313 srcsection = localdata.getVar('SECTION')
314 srcmaintainer = localdata.getVar('MAINTAINER')
315 srchomepage = localdata.getVar('HOMEPAGE')
316 srcdescription = localdata.getVar('DESCRIPTION') or "."
317 srccustomtagschunk = oe.packagedata.get_package_additional_metadata("rpm", localdata)
318
319 srcdepends = d.getVar('DEPENDS')
320 srcrdepends = ""
321 srcrrecommends = ""
322 srcrsuggests = ""
323 srcrprovides = ""
324 srcrreplaces = ""
325 srcrconflicts = ""
326 srcrobsoletes = ""
327
328 srcrpreinst = []
329 srcrpostinst = []
330 srcrprerm = []
331 srcrpostrm = []
332
333 spec_preamble_top = []
334 spec_preamble_bottom = []
335
336 spec_scriptlets_top = []
337 spec_scriptlets_bottom = []
338
339 spec_files_top = []
340 spec_files_bottom = []
341
342 perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
343 extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA") or "0") == "1"
344
345 for pkg in packages.split():
346 localdata = bb.data.createCopy(d)
347
348 root = "%s/%s" % (pkgdest, pkg)
349
350 localdata.setVar('ROOT', '')
351 localdata.setVar('ROOT_%s' % pkg, root)
352 pkgname = localdata.getVar('PKG:%s' % pkg)
353 if not pkgname:
354 pkgname = pkg
355 localdata.setVar('PKG', pkgname)
356
357 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
358
359 conffiles = oe.package.get_conffiles(pkg, d)
360
361 splitname = pkgname
362
363 splitsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
364 splitversion = (localdata.getVar('PKGV') or "").replace('-', '+')
365 splitrelease = (localdata.getVar('PKGR') or "")
366 splitepoch = (localdata.getVar('PKGE') or "")
367 splitlicense = (localdata.getVar('LICENSE') or "")
368 splitsection = (localdata.getVar('SECTION') or "")
369 splitdescription = (localdata.getVar('DESCRIPTION') or ".")
370 splitcustomtagschunk = oe.packagedata.get_package_additional_metadata("rpm", localdata)
371
372 translate_vers('RDEPENDS', localdata)
373 translate_vers('RRECOMMENDS', localdata)
374 translate_vers('RSUGGESTS', localdata)
375 translate_vers('RPROVIDES', localdata)
376 translate_vers('RREPLACES', localdata)
377 translate_vers('RCONFLICTS', localdata)
378
379 # Map the dependencies into their final form
380 oe.packagedata.mapping_rename_hook(localdata)
381
382 splitrdepends = localdata.getVar('RDEPENDS') or ""
383 splitrrecommends = localdata.getVar('RRECOMMENDS') or ""
384 splitrsuggests = localdata.getVar('RSUGGESTS') or ""
385 splitrprovides = localdata.getVar('RPROVIDES') or ""
386 splitrreplaces = localdata.getVar('RREPLACES') or ""
387 splitrconflicts = localdata.getVar('RCONFLICTS') or ""
388 splitrobsoletes = ""
389
390 splitrpreinst = localdata.getVar('pkg_preinst')
391 splitrpostinst = localdata.getVar('pkg_postinst')
392 splitrprerm = localdata.getVar('pkg_prerm')
393 splitrpostrm = localdata.getVar('pkg_postrm')
394
395
396 if not perfiledeps:
397 # Add in summary of per file dependencies
398 splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
399 splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
400
401 splitrdepends = filter_nativesdk_deps(srcname, splitrdepends)
402
403 # Gather special src/first package data
404 if srcname == splitname:
405 archiving = d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and \
406 bb.data.inherits_class('archiver', d)
407 if archiving and srclicense != splitlicense:
408 bb.warn("The SRPM produced may not have the correct overall source license in the License tag. This is due to the LICENSE for the primary package and SRPM conflicting.")
409
410 srclicense = splitlicense
411 srcrdepends = splitrdepends
412 srcrrecommends = splitrrecommends
413 srcrsuggests = splitrsuggests
414 srcrprovides = splitrprovides
415 srcrreplaces = splitrreplaces
416 srcrconflicts = splitrconflicts
417
418 srcrpreinst = splitrpreinst
419 srcrpostinst = splitrpostinst
420 srcrprerm = splitrprerm
421 srcrpostrm = splitrpostrm
422
423 file_list = []
424 walk_files(root, file_list, conffiles)
425 if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
426 bb.note("Not creating empty RPM package for %s" % splitname)
427 else:
428 spec_files_top.append('%files')
429 if extra_pkgdata:
430 package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
431 spec_files_top.append('%defattr(-,-,-,-)')
432 if file_list:
433 bb.note("Creating RPM package for %s" % splitname)
434 spec_files_top.extend(file_list)
435 else:
436 bb.note("Creating empty RPM package for %s" % splitname)
437 spec_files_top.append('')
438 continue
439
440 # Process subpackage data
441 spec_preamble_bottom.append('%%package -n %s' % splitname)
442 spec_preamble_bottom.append('Summary: %s' % splitsummary)
443 if srcversion != splitversion:
444 spec_preamble_bottom.append('Version: %s' % splitversion)
445 if srcrelease != splitrelease:
446 spec_preamble_bottom.append('Release: %s' % splitrelease)
447 if srcepoch != splitepoch:
448 spec_preamble_bottom.append('Epoch: %s' % splitepoch)
449 spec_preamble_bottom.append('License: %s' % splitlicense)
450 spec_preamble_bottom.append('Group: %s' % splitsection)
451
452 if srccustomtagschunk != splitcustomtagschunk:
453 spec_preamble_bottom.append(splitcustomtagschunk)
454
455 # Replaces == Obsoletes && Provides
456 robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes)
457 rprovides = bb.utils.explode_dep_versions2(splitrprovides)
458 rreplaces = bb.utils.explode_dep_versions2(splitrreplaces)
459 for dep in rreplaces:
460 if dep not in robsoletes:
461 robsoletes[dep] = rreplaces[dep]
462 if dep not in rprovides:
463 rprovides[dep] = rreplaces[dep]
464 splitrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
465 splitrprovides = bb.utils.join_deps(rprovides, commasep=False)
466
467 print_deps(splitrdepends, "Requires", spec_preamble_bottom, d)
468 if splitrpreinst:
469 print_deps(splitrdepends, "Requires(pre)", spec_preamble_bottom, d)
470 if splitrpostinst:
471 print_deps(splitrdepends, "Requires(post)", spec_preamble_bottom, d)
472 if splitrprerm:
473 print_deps(splitrdepends, "Requires(preun)", spec_preamble_bottom, d)
474 if splitrpostrm:
475 print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
476
477 print_deps(splitrrecommends, "Recommends", spec_preamble_bottom, d)
478 print_deps(splitrsuggests, "Suggests", spec_preamble_bottom, d)
479 print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
480 print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
481 print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
482
483 spec_preamble_bottom.append('')
484
485 spec_preamble_bottom.append('%%description -n %s' % splitname)
486 append_description(spec_preamble_bottom, splitdescription)
487
488 spec_preamble_bottom.append('')
489
490 # Now process scriptlets
491 if splitrpreinst:
492 spec_scriptlets_bottom.append('%%pre -n %s' % splitname)
493 spec_scriptlets_bottom.append('# %s - preinst' % splitname)
494 spec_scriptlets_bottom.append(splitrpreinst)
495 spec_scriptlets_bottom.append('')
496 if splitrpostinst:
497 spec_scriptlets_bottom.append('%%post -n %s' % splitname)
498 spec_scriptlets_bottom.append('# %s - postinst' % splitname)
499 spec_scriptlets_bottom.append(splitrpostinst)
500 spec_scriptlets_bottom.append('')
501 if splitrprerm:
502 spec_scriptlets_bottom.append('%%preun -n %s' % splitname)
503 spec_scriptlets_bottom.append('# %s - prerm' % splitname)
504 scriptvar = wrap_uninstall(splitrprerm)
505 spec_scriptlets_bottom.append(scriptvar)
506 spec_scriptlets_bottom.append('')
507 if splitrpostrm:
508 spec_scriptlets_bottom.append('%%postun -n %s' % splitname)
509 spec_scriptlets_bottom.append('# %s - postrm' % splitname)
510 scriptvar = wrap_uninstall(splitrpostrm)
511 spec_scriptlets_bottom.append(scriptvar)
512 spec_scriptlets_bottom.append('')
513
514 # Now process files
515 file_list = []
516 walk_files(root, file_list, conffiles)
517 if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
518 bb.note("Not creating empty RPM package for %s" % splitname)
519 else:
520 spec_files_bottom.append('%%files -n %s' % splitname)
521 if extra_pkgdata:
522 package_rpm_extra_pkgdata(splitname, spec_files_bottom, localdata)
523 spec_files_bottom.append('%defattr(-,-,-,-)')
524 if file_list:
525 bb.note("Creating RPM package for %s" % splitname)
526 spec_files_bottom.extend(file_list)
527 else:
528 bb.note("Creating empty RPM package for %s" % splitname)
529 spec_files_bottom.append('')
530
531 del localdata
532
533 add_prep(d, spec_files_bottom)
534 spec_preamble_top.append('Summary: %s' % srcsummary)
535 spec_preamble_top.append('Name: %s' % srcname)
536 spec_preamble_top.append('Version: %s' % srcversion)
537 spec_preamble_top.append('Release: %s' % srcrelease)
538 if srcepoch and srcepoch.strip() != "":
539 spec_preamble_top.append('Epoch: %s' % srcepoch)
540 spec_preamble_top.append('License: %s' % srclicense)
541 spec_preamble_top.append('Group: %s' % srcsection)
542 spec_preamble_top.append('Packager: %s' % srcmaintainer)
543 if srchomepage:
544 spec_preamble_top.append('URL: %s' % srchomepage)
545 if srccustomtagschunk:
546 spec_preamble_top.append(srccustomtagschunk)
547 tail_source(d)
548
549 # Replaces == Obsoletes && Provides
550 robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes)
551 rprovides = bb.utils.explode_dep_versions2(srcrprovides)
552 rreplaces = bb.utils.explode_dep_versions2(srcrreplaces)
553 for dep in rreplaces:
554 if dep not in robsoletes:
555 robsoletes[dep] = rreplaces[dep]
556 if dep not in rprovides:
557 rprovides[dep] = rreplaces[dep]
558 srcrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
559 srcrprovides = bb.utils.join_deps(rprovides, commasep=False)
560
561 print_deps(srcdepends, "BuildRequires", spec_preamble_top, d)
562 print_deps(srcrdepends, "Requires", spec_preamble_top, d)
563 if srcrpreinst:
564 print_deps(srcrdepends, "Requires(pre)", spec_preamble_top, d)
565 if srcrpostinst:
566 print_deps(srcrdepends, "Requires(post)", spec_preamble_top, d)
567 if srcrprerm:
568 print_deps(srcrdepends, "Requires(preun)", spec_preamble_top, d)
569 if srcrpostrm:
570 print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
571
572 print_deps(srcrrecommends, "Recommends", spec_preamble_top, d)
573 print_deps(srcrsuggests, "Suggests", spec_preamble_top, d)
574 print_deps(srcrprovides, "Provides", spec_preamble_top, d)
575 print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
576 print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
577
578 spec_preamble_top.append('')
579
580 spec_preamble_top.append('%description')
581 append_description(spec_preamble_top, srcdescription)
582
583 spec_preamble_top.append('')
584
585 if srcrpreinst:
586 spec_scriptlets_top.append('%pre')
587 spec_scriptlets_top.append('# %s - preinst' % srcname)
588 spec_scriptlets_top.append(srcrpreinst)
589 spec_scriptlets_top.append('')
590 if srcrpostinst:
591 spec_scriptlets_top.append('%post')
592 spec_scriptlets_top.append('# %s - postinst' % srcname)
593 spec_scriptlets_top.append(srcrpostinst)
594 spec_scriptlets_top.append('')
595 if srcrprerm:
596 spec_scriptlets_top.append('%preun')
597 spec_scriptlets_top.append('# %s - prerm' % srcname)
598 scriptvar = wrap_uninstall(srcrprerm)
599 spec_scriptlets_top.append(scriptvar)
600 spec_scriptlets_top.append('')
601 if srcrpostrm:
602 spec_scriptlets_top.append('%postun')
603 spec_scriptlets_top.append('# %s - postrm' % srcname)
604 scriptvar = wrap_uninstall(srcrpostrm)
605 spec_scriptlets_top.append(scriptvar)
606 spec_scriptlets_top.append('')
607
608 # Write the SPEC file
609 specfile = open(outspecfile, 'w')
610
611 # RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
612 # of the generated spec file
613 external_preamble = d.getVar("RPMSPEC_PREAMBLE")
614 if external_preamble:
615 specfile.write(external_preamble + "\n")
616
617 for line in spec_preamble_top:
618 specfile.write(line + "\n")
619
620 for line in spec_preamble_bottom:
621 specfile.write(line + "\n")
622
623 for line in spec_scriptlets_top:
624 specfile.write(line + "\n")
625
626 for line in spec_scriptlets_bottom:
627 specfile.write(line + "\n")
628
629 for line in spec_files_top:
630 specfile.write(line + "\n")
631
632 for line in spec_files_bottom:
633 specfile.write(line + "\n")
634
635 specfile.close()
636}
637# Otherwise allarch packages may change depending on override configuration
638write_specfile[vardepsexclude] = "OVERRIDES"
639
640# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
641RPMEXTRAVARS = "PACKAGE_ADD_METADATA_RPM"
642write_specfile[vardeps] += "${@gen_packagevar(d, 'RPMEXTRAVARS')}"
643
644python do_package_rpm () {
645 workdir = d.getVar('WORKDIR')
646 tmpdir = d.getVar('TMPDIR')
647 pkgd = d.getVar('PKGD')
648 if not workdir or not pkgd or not tmpdir:
649 bb.error("Variables incorrectly set, unable to package")
650 return
651
652 packages = d.getVar('PACKAGES')
653 if not packages or packages == '':
654 bb.debug(1, "No packages; nothing to do")
655 return
656
657 # Construct the spec file...
658 # If the spec file already exist, and has not been stored into
659 # pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
660 # so remove it before doing rpmbuild src.rpm.
661 srcname = d.getVar('PN')
662 outspecfile = workdir + "/" + srcname + ".spec"
663 if os.path.isfile(outspecfile):
664 os.remove(outspecfile)
665 d.setVar('OUTSPECFILE', outspecfile)
666 bb.build.exec_func('write_specfile', d)
667
668 perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
669 if perfiledeps:
670 outdepends, outprovides = write_rpm_perfiledata(srcname, d)
671
672 # Setup the rpmbuild arguments...
673 rpmbuild = d.getVar('RPMBUILD')
674 rpmbuild_compmode = d.getVar('RPMBUILD_COMPMODE')
675 rpmbuild_extra_params = d.getVar('RPMBUILD_EXTRA_PARAMS') or ""
676
677 # Too many places in dnf stack assume that arch-independent packages are "noarch".
678 # Let's not fight against this.
679 package_arch = (d.getVar('PACKAGE_ARCH') or "").replace("-", "_")
680 if package_arch == "all":
681 package_arch = "noarch"
682
683 d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
684 pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
685 d.setVar('RPM_PKGWRITEDIR', pkgwritedir)
686 bb.debug(1, 'PKGWRITEDIR: %s' % d.getVar('RPM_PKGWRITEDIR'))
687 pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-linux')
688 bb.utils.mkdirhier(pkgwritedir)
689 os.chmod(pkgwritedir, 0o755)
690
691 cmd = rpmbuild
692 cmd = cmd + " --noclean --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
693 cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
694 cmd = cmd + " --define '_builddir " + d.getVar('B') + "'"
695 cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
696 cmd = cmd + " --define '_use_internal_dependency_generator 0'"
697 cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
698 cmd = cmd + " --define '_build_id_links none'"
699 cmd = cmd + " --define '_smp_ncpus_max 4'"
700 cmd = cmd + " --define '_source_payload %s'" % rpmbuild_compmode
701 cmd = cmd + " --define '_binary_payload %s'" % rpmbuild_compmode
702 cmd = cmd + " --define 'build_mtime_policy clamp_to_source_date_epoch'"
703 cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
704 cmd = cmd + " --define '_buildhost reproducible'"
705 cmd = cmd + " --define '__font_provides %{nil}'"
706 if perfiledeps:
707 cmd = cmd + " --define '__find_requires " + outdepends + "'"
708 cmd = cmd + " --define '__find_provides " + outprovides + "'"
709 else:
710 cmd = cmd + " --define '__find_requires %{nil}'"
711 cmd = cmd + " --define '__find_provides %{nil}'"
712 cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
713 cmd = cmd + " --define 'debug_package %{nil}'"
714 cmd = cmd + " --define '_tmppath " + workdir + "'"
715 cmd = cmd + " --define '_use_weak_usergroup_deps 1'"
716 cmd = cmd + " --define '_passwd_path " + "/completely/bogus/path" + "'"
717 cmd = cmd + " --define '_group_path " + "/completely/bogus/path" + "'"
718 cmd = cmd + rpmbuild_extra_params
719 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
720 cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR') + "'"
721 cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_RPMOUTDIR') + "'"
722 cmdsrpm = cmdsrpm + " -bs " + outspecfile
723 # Build the .src.rpm
724 d.setVar('SBUILDSPEC', cmdsrpm + "\n")
725 d.setVarFlag('SBUILDSPEC', 'func', '1')
726 bb.build.exec_func('SBUILDSPEC', d)
727 cmd = cmd + " -bb " + outspecfile
728
729 # rpm 4 creates various empty directories in _topdir, let's clean them up
730 cleanupcmd = "rm -rf %s/BUILDROOT %s/SOURCES %s/SPECS %s/SRPMS" % (workdir, workdir, workdir, workdir)
731
732 # Build the rpm package!
733 d.setVar('BUILDSPEC', cmd + "\n" + cleanupcmd + "\n")
734 d.setVarFlag('BUILDSPEC', 'func', '1')
735 bb.build.exec_func('BUILDSPEC', d)
736
737 if d.getVar('RPM_SIGN_PACKAGES') == '1':
738 bb.build.exec_func("sign_rpm", d)
739}
740
741python () {
742 if d.getVar('PACKAGES') != '':
743 deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
744 d.appendVarFlag('do_package_write_rpm', 'depends', deps)
745 d.setVarFlag('do_package_write_rpm', 'fakeroot', '1')
746
747 # Needed to ensure PKG_xxx renaming of dependency packages works
748 d.setVarFlag('do_package_write_rpm', 'deptask', "do_packagedata")
749 d.setVarFlag('do_package_write_rpm', 'rdeptask', "do_packagedata")
750}
751
752SSTATETASKS += "do_package_write_rpm"
753do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}"
754do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}"
755# Take a shared lock, we can write multiple packages at the same time...
756# but we need to stop the rootfs/solver from running while we do...
757do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock"
758
759python do_package_write_rpm_setscene () {
760 sstate_setscene(d)
761}
762addtask do_package_write_rpm_setscene
763
764python do_package_write_rpm () {
765 bb.build.exec_func("read_subpackage_metadata", d)
766 bb.build.exec_func("do_package_rpm", d)
767}
768
769do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
770do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
771do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
772addtask package_write_rpm after do_packagedata do_package do_deploy_source_date_epoch before do_build
773do_build[rdeptask] += "do_package_write_rpm"
774
775PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
776PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
diff --git a/meta/classes-global/packagedata.bbclass b/meta/classes-global/packagedata.bbclass
deleted file mode 100644
index 9f72c01d77..0000000000
--- a/meta/classes-global/packagedata.bbclass
+++ /dev/null
@@ -1,40 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7python read_subpackage_metadata () {
8 import oe.packagedata
9
10 vars = {
11 "PN" : d.getVar('PN'),
12 "PE" : d.getVar('PE'),
13 "PV" : d.getVar('PV'),
14 "PR" : d.getVar('PR'),
15 }
16
17 data = oe.packagedata.read_pkgdata(vars["PN"], d)
18
19 for key in data.keys():
20 d.setVar(key, data[key])
21
22 for pkg in d.getVar('PACKAGES').split():
23 sdata = oe.packagedata.read_subpkgdata(pkg, d)
24 for key in sdata.keys():
25 if key in vars:
26 if sdata[key] != vars[key]:
27 if key == "PN":
28 bb.fatal("Recipe %s is trying to create package %s which was already written by recipe %s. This will cause corruption, please resolve this and only provide the package from one recipe or the other or only build one of the recipes." % (vars[key], pkg, sdata[key]))
29 bb.fatal("Recipe %s is trying to change %s from '%s' to '%s'. This will cause do_package_write_* failures since the incorrect data will be used and they will be unable to find the right workdir." % (vars["PN"], key, vars[key], sdata[key]))
30 continue
31 #
32 # If we set unsuffixed variables here there is a chance they could clobber override versions
33 # of that variable, e.g. DESCRIPTION could clobber DESCRIPTION:<pkgname>
34 # We therefore don't clobber for the unsuffixed variable versions
35 #
36 if key.endswith(":" + pkg):
37 d.setVar(key, sdata[key])
38 else:
39 d.setVar(key, sdata[key], parsing=True)
40}
diff --git a/meta/classes-global/patch.bbclass b/meta/classes-global/patch.bbclass
deleted file mode 100644
index e5786b1c9a..0000000000
--- a/meta/classes-global/patch.bbclass
+++ /dev/null
@@ -1,169 +0,0 @@
1# Copyright (C) 2006 OpenedHand LTD
2#
3# SPDX-License-Identifier: MIT
4
5# Point to an empty file so any user's custom settings don't break things
6QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
7
8PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
9
10# There is a bug in patch 2.7.3 and earlier where index lines
11# in patches can change file modes when they shouldn't:
12# http://git.savannah.gnu.org/cgit/patch.git/patch/?id=82b800c9552a088a241457948219d25ce0a407a4
13# This leaks into debug sources in particular. Add the dependency
14# to target recipes to avoid this problem until we can rely on 2.7.4 or later.
15PATCHDEPENDENCY:append:class-target = " patch-replacement-native:do_populate_sysroot"
16
17PATCH_GIT_USER_NAME ?= "OpenEmbedded"
18PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
19
20inherit terminal
21
22python () {
23 if d.getVar('PATCHTOOL') == 'git' and d.getVar('PATCH_COMMIT_FUNCTIONS') == '1':
24 extratasks = bb.build.tasksbetween('do_unpack', 'do_patch', d)
25 try:
26 extratasks.remove('do_unpack')
27 except ValueError:
28 # For some recipes do_unpack doesn't exist, ignore it
29 pass
30
31 d.appendVarFlag('do_patch', 'prefuncs', ' patch_task_patch_prefunc')
32 for task in extratasks:
33 d.appendVarFlag(task, 'postfuncs', ' patch_task_postfunc')
34}
35
36python patch_task_patch_prefunc() {
37 # Prefunc for do_patch
38 srcsubdir = d.getVar('S')
39
40 workdir = os.path.abspath(d.getVar('WORKDIR'))
41 testsrcdir = os.path.abspath(srcsubdir)
42 if (testsrcdir + os.sep).startswith(workdir + os.sep):
43 # Double-check that either workdir or S or some directory in-between is a git repository
44 found = False
45 while testsrcdir != workdir:
46 if os.path.exists(os.path.join(testsrcdir, '.git')):
47 found = True
48 break
49 if testsrcdir == workdir:
50 break
51 testsrcdir = os.path.dirname(testsrcdir)
52 if not found:
53 bb.fatal('PATCHTOOL = "git" set for source tree that is not a git repository. Refusing to continue as that may result in commits being made in your metadata repository.')
54
55 patchdir = os.path.join(srcsubdir, 'patches')
56 if os.path.exists(patchdir):
57 if os.listdir(patchdir):
58 d.setVar('PATCH_HAS_PATCHES_DIR', '1')
59 else:
60 os.rmdir(patchdir)
61}
62
63python patch_task_postfunc() {
64 # Prefunc for task functions between do_unpack and do_patch
65 import oe.patch
66 import shutil
67 func = d.getVar('BB_RUNTASK')
68 srcsubdir = d.getVar('S')
69
70 if os.path.exists(srcsubdir):
71 if func == 'do_patch':
72 haspatches = (d.getVar('PATCH_HAS_PATCHES_DIR') == '1')
73 patchdir = os.path.join(srcsubdir, 'patches')
74 if os.path.exists(patchdir):
75 shutil.rmtree(patchdir)
76 if haspatches:
77 stdout, _ = bb.process.run('git status --porcelain patches', cwd=srcsubdir)
78 if stdout:
79 bb.process.run('git checkout patches', cwd=srcsubdir)
80 stdout, _ = bb.process.run('git status --porcelain .', cwd=srcsubdir)
81 if stdout:
82 oe.patch.GitApplyTree.commitIgnored("Add changes from %s" % func, dir=srcsubdir, files=['.'], d=d)
83}
84
85def src_patches(d, all=False, expand=True):
86 import oe.patch
87 return oe.patch.src_patches(d, all, expand)
88
89def should_apply(parm, d):
90 """Determine if we should apply the given patch"""
91 import oe.patch
92 return oe.patch.should_apply(parm, d)
93
94should_apply[vardepsexclude] = "DATE SRCDATE"
95
96python patch_do_patch() {
97 import oe.patch
98
99 patchsetmap = {
100 "patch": oe.patch.PatchTree,
101 "quilt": oe.patch.QuiltTree,
102 "git": oe.patch.GitApplyTree,
103 }
104
105 cls = patchsetmap[d.getVar('PATCHTOOL') or 'quilt']
106
107 resolvermap = {
108 "noop": oe.patch.NOOPResolver,
109 "user": oe.patch.UserResolver,
110 }
111
112 rcls = resolvermap[d.getVar('PATCHRESOLVE') or 'user']
113
114 classes = {}
115
116 s = d.getVar('S')
117
118 os.putenv('PATH', d.getVar('PATH'))
119
120 # We must use one TMPDIR per process so that the "patch" processes
121 # don't generate the same temp file name.
122
123 import tempfile
124 process_tmpdir = tempfile.mkdtemp()
125 os.environ['TMPDIR'] = process_tmpdir
126
127 for patch in src_patches(d):
128 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
129
130 if "patchdir" in parm:
131 patchdir = parm["patchdir"]
132 if not os.path.isabs(patchdir):
133 patchdir = os.path.join(s, patchdir)
134 if not os.path.isdir(patchdir):
135 bb.fatal("Target directory '%s' not found, patchdir '%s' is incorrect in patch file '%s'" %
136 (patchdir, parm["patchdir"], parm['patchname']))
137 else:
138 patchdir = s
139
140 if not patchdir in classes:
141 patchset = cls(patchdir, d)
142 resolver = rcls(patchset, oe_terminal)
143 classes[patchdir] = (patchset, resolver)
144 patchset.Clean()
145 else:
146 patchset, resolver = classes[patchdir]
147
148 bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
149 try:
150 patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
151 except Exception as exc:
152 bb.utils.remove(process_tmpdir, True)
153 bb.fatal("Importing patch '%s' with striplevel '%s'\n%s" % (parm['patchname'], parm['striplevel'], repr(exc).replace("\\n", "\n")))
154 try:
155 resolver.Resolve()
156 except bb.BBHandledException as e:
157 bb.utils.remove(process_tmpdir, True)
158 bb.fatal("Applying patch '%s' on target directory '%s'\n%s" % (parm['patchname'], patchdir, repr(e).replace("\\n", "\n")))
159
160 bb.utils.remove(process_tmpdir, True)
161 del os.environ['TMPDIR']
162}
163patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
164
165addtask patch after do_unpack
166do_patch[dirs] = "${WORKDIR}"
167do_patch[depends] = "${PATCHDEPENDENCY}"
168
169EXPORT_FUNCTIONS do_patch
diff --git a/meta/classes-global/retain.bbclass b/meta/classes-global/retain.bbclass
deleted file mode 100644
index 46e8c256cf..0000000000
--- a/meta/classes-global/retain.bbclass
+++ /dev/null
@@ -1,182 +0,0 @@
1# Creates a tarball of the work directory for a recipe when one of its
2# tasks fails, or any other nominated directories.
3# Useful in cases where the environment in which builds are run is
4# ephemeral or otherwise inaccessible for examination during
5# debugging.
6#
7# To enable, simply add the following to your configuration:
8#
9# INHERIT += "retain"
10#
11# You can specify the recipe-specific directories to save upon failure
12# or always (space-separated) e.g.:
13#
14# RETAIN_DIRS_FAILURE = "${WORKDIR};prefix=workdir" # default
15# RETAIN_DIRS_ALWAYS = "${T}"
16#
17# Naturally you can use overrides to limit it to a specific recipe:
18# RETAIN_DIRS_ALWAYS:pn-somerecipe = "${T}"
19#
20# You can also specify global (non-recipe-specific) directories to save:
21#
22# RETAIN_DIRS_GLOBAL_FAILURE = "${LOG_DIR}"
23# RETAIN_DIRS_GLOBAL_ALWAYS = "${BUILDSTATS_BASE}"
24#
25# If you wish to use a different tarball name prefix than the default of
26# the directory name, you can do so by specifying a ;prefix= followed by
27# the desired prefix (no spaces) in any of the RETAIN_DIRS_* variables.
28# e.g. to always save the log files with a "recipelogs" as the prefix for
29# the tarball of ${T} you would do this:
30#
31# RETAIN_DIRS_ALWAYS = "${T};prefix=recipelogs"
32#
33# Notes:
34# * For this to be useful you also need corresponding logic in your build
35# orchestration tool to pick up any files written out to RETAIN_OUTDIR
36# (with the other assumption being that no files are present there at
37# the start of the build, since there is no logic to purge old files).
38# * Work directories can be quite large, so saving them can take some time
39# and of course space.
40# * Tarball creation is deferred to the end of the build, thus you will
41# get the state at the end, not immediately upon failure.
42# * Extra directories must naturally be populated at the time the retain
43# class goes to save them (build completion); to try ensure this for
44# things that are also saved on build completion (e.g. buildstats), put
45# the INHERIT += "retain" after the INHERIT += lines for the class that
46# is writing out the data that you wish to save.
47# * The tarballs have the tarball name as a top-level directory so that
48# multiple tarballs can be extracted side-by-side easily.
49#
50# Copyright (c) 2020, 2024 Microsoft Corporation
51#
52# SPDX-License-Identifier: GPL-2.0-only
53#
54
55RETAIN_OUTDIR ?= "${TMPDIR}/retained"
56RETAIN_DIRS_FAILURE ?= "${WORKDIR};prefix=workdir"
57RETAIN_DIRS_ALWAYS ?= ""
58RETAIN_DIRS_GLOBAL_FAILURE ?= ""
59RETAIN_DIRS_GLOBAL_ALWAYS ?= ""
60RETAIN_TARBALL_SUFFIX ?= "${DATETIME}.tar.gz"
61RETAIN_ENABLED ?= "1"
62
63
64def retain_retain_dir(desc, tarprefix, path, tarbasepath, d):
65 import datetime
66
67 outdir = d.getVar('RETAIN_OUTDIR')
68 bb.utils.mkdirhier(outdir)
69 suffix = d.getVar('RETAIN_TARBALL_SUFFIX')
70 tarname = '%s_%s' % (tarprefix, suffix)
71 tarfp = os.path.join(outdir, '%s' % tarname)
72 tardir = os.path.relpath(path, tarbasepath)
73 cmdargs = ['tar', 'cfa', tarfp]
74 # Prefix paths within the tarball with the tarball name so that
75 # multiple tarballs can be extracted side-by-side
76 tarname_noext = os.path.splitext(tarname)[0]
77 if tarname_noext.endswith('.tar'):
78 tarname_noext = tarname_noext[:-4]
79 cmdargs += ['--transform', 's:^:%s/:' % tarname_noext]
80 cmdargs += [tardir]
81 try:
82 bb.process.run(cmdargs, cwd=tarbasepath)
83 except bb.process.ExecutionError as e:
84 # It is possible for other tasks to be writing to the workdir
85 # while we are tarring it up, in which case tar will return 1,
86 # but we don't care in this situation (tar returns 2 for other
87 # errors so we we will see those)
88 if e.exitcode != 1:
89 bb.warn('retain: error saving %s: %s' % (desc, str(e)))
90
91
92addhandler retain_task_handler
93retain_task_handler[eventmask] = "bb.build.TaskFailed bb.build.TaskSucceeded"
94
95addhandler retain_build_handler
96retain_build_handler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted"
97
98python retain_task_handler() {
99 if d.getVar('RETAIN_ENABLED') != '1':
100 return
101
102 dirs = d.getVar('RETAIN_DIRS_ALWAYS')
103 if isinstance(e, bb.build.TaskFailed):
104 dirs += ' ' + d.getVar('RETAIN_DIRS_FAILURE')
105
106 dirs = dirs.strip().split()
107 if dirs:
108 outdir = d.getVar('RETAIN_OUTDIR')
109 bb.utils.mkdirhier(outdir)
110 dirlist_file = os.path.join(outdir, 'retain_dirs.list')
111 pn = d.getVar('PN')
112 taskname = d.getVar('BB_CURRENTTASK')
113 with open(dirlist_file, 'a') as f:
114 for entry in dirs:
115 f.write('%s %s %s\n' % (pn, taskname, entry))
116}
117
118python retain_build_handler() {
119 outdir = d.getVar('RETAIN_OUTDIR')
120 dirlist_file = os.path.join(outdir, 'retain_dirs.list')
121
122 if isinstance(e, bb.event.BuildStarted):
123 if os.path.exists(dirlist_file):
124 os.remove(dirlist_file)
125 return
126
127 if d.getVar('RETAIN_ENABLED') != '1':
128 return
129
130 savedirs = {}
131 try:
132 with open(dirlist_file, 'r') as f:
133 for line in f:
134 pn, _, path = line.rstrip().split()
135 if not path in savedirs:
136 savedirs[path] = pn
137 os.remove(dirlist_file)
138 except FileNotFoundError:
139 pass
140
141 if e.getFailures():
142 for path in (d.getVar('RETAIN_DIRS_GLOBAL_FAILURE') or '').strip().split():
143 savedirs[path] = ''
144
145 for path in (d.getVar('RETAIN_DIRS_GLOBAL_ALWAYS') or '').strip().split():
146 savedirs[path] = ''
147
148 if savedirs:
149 bb.plain('NOTE: retain: retaining build output...')
150 count = 0
151 for path, pn in savedirs.items():
152 prefix = None
153 if ';' in path:
154 pathsplit = path.split(';')
155 path = pathsplit[0]
156 for param in pathsplit[1:]:
157 if '=' in param:
158 name, value = param.split('=', 1)
159 if name == 'prefix':
160 prefix = value
161 else:
162 bb.error('retain: invalid parameter "%s" in RETAIN_* variable value' % param)
163 return
164 else:
165 bb.error('retain: parameter "%s" missing value in RETAIN_* variable value' % param)
166 return
167 if prefix:
168 itemname = prefix
169 else:
170 itemname = os.path.basename(path)
171 if pn:
172 # Always add the recipe name in front
173 itemname = pn + '_' + itemname
174 if os.path.exists(path):
175 retain_retain_dir(itemname, itemname, path, os.path.dirname(path), d)
176 count += 1
177 else:
178 bb.warn('retain: path %s does not currently exist' % path)
179 if count:
180 item = 'archive' if count == 1 else 'archives'
181 bb.plain('NOTE: retain: saved %d %s to %s' % (count, item, outdir))
182}
diff --git a/meta/classes-global/sanity.bbclass b/meta/classes-global/sanity.bbclass
deleted file mode 100644
index 6934e071a3..0000000000
--- a/meta/classes-global/sanity.bbclass
+++ /dev/null
@@ -1,1114 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Sanity check the users setup for common misconfigurations
9#
10
11SANITY_REQUIRED_UTILITIES ?= "patch diffstat git bzip2 tar \
12 gzip gawk chrpath wget cpio perl file which"
13
14def bblayers_conf_file(d):
15 return os.path.join(d.getVar('TOPDIR'), 'conf/bblayers.conf')
16
17def sanity_conf_read(fn):
18 with open(fn, 'r') as f:
19 lines = f.readlines()
20 return lines
21
22def sanity_conf_find_line(pattern, lines):
23 import re
24 return next(((index, line)
25 for index, line in enumerate(lines)
26 if re.search(pattern, line)), (None, None))
27
28def sanity_conf_update(fn, lines, version_var_name, new_version):
29 index, line = sanity_conf_find_line(r"^%s" % version_var_name, lines)
30 lines[index] = '%s = "%d"\n' % (version_var_name, new_version)
31 with open(fn, "w") as f:
32 f.write(''.join(lines))
33
34# Functions added to this variable MUST throw a NotImplementedError exception unless
35# they successfully changed the config version in the config file. Exceptions
36# are used since exec_func doesn't handle return values.
37BBLAYERS_CONF_UPDATE_FUNCS += " \
38 conf/bblayers.conf:LCONF_VERSION:LAYER_CONF_VERSION:oecore_update_bblayers \
39 conf/local.conf:CONF_VERSION:LOCALCONF_VERSION:oecore_update_localconf \
40 conf/site.conf:SCONF_VERSION:SITE_CONF_VERSION:oecore_update_siteconf \
41"
42
43SANITY_DIFF_TOOL ?= "diff -u"
44
45SANITY_LOCALCONF_SAMPLE ?= "${COREBASE}/meta*/conf/templates/default/local.conf.sample"
46python oecore_update_localconf() {
47 # Check we are using a valid local.conf
48 current_conf = d.getVar('CONF_VERSION')
49 conf_version = d.getVar('LOCALCONF_VERSION')
50
51 failmsg = """Your version of local.conf was generated from an older/newer version of
52local.conf.sample and there have been updates made to this file. Please compare the two
53files and merge any changes before continuing.
54
55Matching the version numbers will remove this message.
56
57\"${SANITY_DIFF_TOOL} conf/local.conf ${SANITY_LOCALCONF_SAMPLE}\"
58
59is a good way to visualise the changes."""
60 failmsg = d.expand(failmsg)
61
62 raise NotImplementedError(failmsg)
63}
64
65SANITY_SITECONF_SAMPLE ?= "${COREBASE}/meta*/conf/templates/default/site.conf.sample"
66python oecore_update_siteconf() {
67 # If we have a site.conf, check it's valid
68 current_sconf = d.getVar('SCONF_VERSION')
69 sconf_version = d.getVar('SITE_CONF_VERSION')
70
71 failmsg = """Your version of site.conf was generated from an older version of
72site.conf.sample and there have been updates made to this file. Please compare the two
73files and merge any changes before continuing.
74
75Matching the version numbers will remove this message.
76
77\"${SANITY_DIFF_TOOL} conf/site.conf ${SANITY_SITECONF_SAMPLE}\"
78
79is a good way to visualise the changes."""
80 failmsg = d.expand(failmsg)
81
82 raise NotImplementedError(failmsg)
83}
84
85SANITY_BBLAYERCONF_SAMPLE ?= "${COREBASE}/meta*/conf/templates/default/bblayers.conf.sample"
86python oecore_update_bblayers() {
87 # bblayers.conf is out of date, so see if we can resolve that
88
89 current_lconf = int(d.getVar('LCONF_VERSION'))
90 lconf_version = int(d.getVar('LAYER_CONF_VERSION'))
91
92 failmsg = """Your version of bblayers.conf has the wrong LCONF_VERSION (has ${LCONF_VERSION}, expecting ${LAYER_CONF_VERSION}).
93Please compare your file against bblayers.conf.sample and merge any changes before continuing.
94"${SANITY_DIFF_TOOL} conf/bblayers.conf ${SANITY_BBLAYERCONF_SAMPLE}"
95
96is a good way to visualise the changes."""
97 failmsg = d.expand(failmsg)
98
99 if not current_lconf:
100 raise NotImplementedError(failmsg)
101
102 lines = []
103
104 if current_lconf < 4:
105 raise NotImplementedError(failmsg)
106
107 bblayers_fn = bblayers_conf_file(d)
108 lines = sanity_conf_read(bblayers_fn)
109
110 if current_lconf == 4 and lconf_version > 4:
111 topdir_var = '$' + '{TOPDIR}'
112 index, bbpath_line = sanity_conf_find_line('BBPATH', lines)
113 if bbpath_line:
114 start = bbpath_line.find('"')
115 if start != -1 and (len(bbpath_line) != (start + 1)):
116 if bbpath_line[start + 1] == '"':
117 lines[index] = (bbpath_line[:start + 1] +
118 topdir_var + bbpath_line[start + 1:])
119 else:
120 if not topdir_var in bbpath_line:
121 lines[index] = (bbpath_line[:start + 1] +
122 topdir_var + ':' + bbpath_line[start + 1:])
123 else:
124 raise NotImplementedError(failmsg)
125 else:
126 index, bbfiles_line = sanity_conf_find_line('BBFILES', lines)
127 if bbfiles_line:
128 lines.insert(index, 'BBPATH = "' + topdir_var + '"\n')
129 else:
130 raise NotImplementedError(failmsg)
131
132 current_lconf += 1
133 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
134 bb.note("Your conf/bblayers.conf has been automatically updated.")
135 return
136
137 elif current_lconf == 5 and lconf_version > 5:
138 # Null update, to avoid issues with people switching between poky and other distros
139 current_lconf = 6
140 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
141 bb.note("Your conf/bblayers.conf has been automatically updated.")
142 return
143
144 status.addresult()
145
146 elif current_lconf == 6 and lconf_version > 6:
147 # Handle rename of meta-yocto -> meta-poky
148 # This marks the start of separate version numbers but code is needed in OE-Core
149 # for the migration, one last time.
150 layers = d.getVar('BBLAYERS').split()
151 layers = [ os.path.basename(path) for path in layers ]
152 if 'meta-yocto' in layers:
153 found = False
154 while True:
155 index, meta_yocto_line = sanity_conf_find_line(r'.*meta-yocto[\'"\s\n]', lines)
156 if meta_yocto_line:
157 lines[index] = meta_yocto_line.replace('meta-yocto', 'meta-poky')
158 found = True
159 else:
160 break
161 if not found:
162 raise NotImplementedError(failmsg)
163 index, meta_yocto_line = sanity_conf_find_line('LCONF_VERSION.*\n', lines)
164 if meta_yocto_line:
165 lines[index] = 'POKY_BBLAYERS_CONF_VERSION = "1"\n'
166 else:
167 raise NotImplementedError(failmsg)
168 with open(bblayers_fn, "w") as f:
169 f.write(''.join(lines))
170 bb.note("Your conf/bblayers.conf has been automatically updated.")
171 return
172 current_lconf += 1
173 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
174 bb.note("Your conf/bblayers.conf has been automatically updated.")
175 return
176
177 raise NotImplementedError(failmsg)
178}
179
180def raise_sanity_error(msg, d, network_error=False):
181 if d.getVar("SANITY_USE_EVENTS") == "1":
182 try:
183 bb.event.fire(bb.event.SanityCheckFailed(msg, network_error), d)
184 except TypeError:
185 bb.event.fire(bb.event.SanityCheckFailed(msg), d)
186 return
187
188 bb.fatal(""" OE-core's config sanity checker detected a potential misconfiguration.
189 Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
190 Following is the list of potential problems / advisories:
191
192 %s""" % msg)
193
194# Check a single tune for validity.
195def check_toolchain_tune(data, tune, multilib):
196 tune_errors = []
197 if not tune:
198 return "No tuning found for %s multilib." % multilib
199 localdata = bb.data.createCopy(data)
200 if multilib != "default":
201 # Apply the overrides so we can look at the details.
202 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
203 localdata.setVar("OVERRIDES", overrides)
204 bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
205 features = (localdata.getVar("TUNE_FEATURES:tune-%s" % tune) or "").split()
206 if not features:
207 return "Tuning '%s' has no defined features, and cannot be used." % tune
208 valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
209 conflicts = localdata.getVarFlags('TUNECONFLICTS') or {}
210 # [doc] is the documentation for the variable, not a real feature
211 if 'doc' in valid_tunes:
212 del valid_tunes['doc']
213 if 'doc' in conflicts:
214 del conflicts['doc']
215 for feature in features:
216 if feature in conflicts:
217 for conflict in conflicts[feature].split():
218 if conflict in features:
219 tune_errors.append("Feature '%s' conflicts with '%s'." %
220 (feature, conflict))
221 if feature in valid_tunes:
222 bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
223 else:
224 tune_errors.append("Feature '%s' is not defined." % feature)
225 if tune_errors:
226 return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
227
228def check_toolchain(data):
229 tune_error_set = []
230 deftune = data.getVar("DEFAULTTUNE")
231 tune_errors = check_toolchain_tune(data, deftune, 'default')
232 if tune_errors:
233 tune_error_set.append(tune_errors)
234
235 multilibs = (data.getVar("MULTILIB_VARIANTS") or "").split()
236 global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS") or "").split()
237
238 if multilibs:
239 seen_libs = []
240 seen_tunes = []
241 for lib in multilibs:
242 if lib in seen_libs:
243 tune_error_set.append("The multilib '%s' appears more than once." % lib)
244 else:
245 seen_libs.append(lib)
246 if not lib in global_multilibs:
247 tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
248 tune = data.getVar("DEFAULTTUNE:virtclass-multilib-%s" % lib)
249 if tune in seen_tunes:
250 tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
251 else:
252 seen_libs.append(tune)
253 if tune == deftune:
254 tune_error_set.append("Multilib '%s' (%s) is also the default tuning." % (lib, deftune))
255 else:
256 tune_errors = check_toolchain_tune(data, tune, lib)
257 if tune_errors:
258 tune_error_set.append(tune_errors)
259 if tune_error_set:
260 return "Toolchain tunings invalid:\n" + '\n'.join(tune_error_set) + "\n"
261
262 return ""
263
264def check_conf_exists(fn, data):
265 bbpath = []
266 fn = data.expand(fn)
267 vbbpath = data.getVar("BBPATH", False)
268 if vbbpath:
269 bbpath += vbbpath.split(":")
270 for p in bbpath:
271 currname = os.path.join(data.expand(p), fn)
272 if os.access(currname, os.R_OK):
273 return True
274 return False
275
276def check_create_long_filename(filepath, pathname):
277 import string, random
278 testfile = os.path.join(filepath, ''.join(random.choice(string.ascii_letters) for x in range(200)))
279 try:
280 if not os.path.exists(filepath):
281 bb.utils.mkdirhier(filepath)
282 f = open(testfile, "w")
283 f.close()
284 os.remove(testfile)
285 except IOError as e:
286 import errno
287 err, strerror = e.args
288 if err == errno.ENAMETOOLONG:
289 return "Failed to create a file with a long name in %s. Please use a filesystem that does not unreasonably limit filename length.\n" % pathname
290 else:
291 return "Failed to create a file in %s: %s.\n" % (pathname, strerror)
292 except OSError as e:
293 errno, strerror = e.args
294 return "Failed to create %s directory in which to run long name sanity check: %s.\n" % (pathname, strerror)
295 return ""
296
297def check_path_length(filepath, pathname, limit):
298 if len(filepath) > limit:
299 return "The length of %s is longer than %s, this would cause unexpected errors, please use a shorter path.\n" % (pathname, limit)
300 return ""
301
302def check_non_ascii(filepath, pathname):
303 if(not filepath.isascii()):
304 return "Non-ASCII character(s) in %s path (\"%s\") detected. This would cause build failures as we build software that doesn't support this.\n" % (pathname, filepath)
305 return ""
306
307def get_filesystem_id(path):
308 import subprocess
309 try:
310 return subprocess.check_output(["stat", "-f", "-c", "%t", path]).decode('utf-8').strip()
311 except subprocess.CalledProcessError:
312 bb.warn("Can't get filesystem id of: %s" % path)
313 return None
314
315# Check that the path isn't located on nfs.
316def check_not_nfs(path, name):
317 # The nfs' filesystem id is 6969
318 if get_filesystem_id(path) == "6969":
319 return "The %s: %s can't be located on nfs.\n" % (name, path)
320 return ""
321
322# Check that the path is on a case-sensitive file system
323def check_case_sensitive(path, name):
324 import tempfile
325 with tempfile.NamedTemporaryFile(prefix='TmP', dir=path) as tmp_file:
326 if os.path.exists(tmp_file.name.lower()):
327 return "The %s (%s) can't be on a case-insensitive file system.\n" % (name, path)
328 return ""
329
330# Check that path isn't a broken symlink
331def check_symlink(lnk, data):
332 if os.path.islink(lnk) and not os.path.exists(lnk):
333 raise_sanity_error("%s is a broken symlink." % lnk, data)
334
335def check_connectivity(d):
336 # URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable
337 # using the same syntax as for SRC_URI. If the variable is not set
338 # the check is skipped
339 test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS') or "").split()
340 retval = ""
341
342 bbn = d.getVar('BB_NO_NETWORK')
343 if bbn not in (None, '0', '1'):
344 return 'BB_NO_NETWORK should be "0" or "1", but it is "%s"' % bbn
345
346 # Only check connectivity if network enabled and the
347 # CONNECTIVITY_CHECK_URIS are set
348 network_enabled = not (bbn == '1')
349 check_enabled = len(test_uris)
350 if check_enabled and network_enabled:
351 # Take a copy of the data store and unset MIRRORS and PREMIRRORS
352 data = bb.data.createCopy(d)
353 data.delVar('PREMIRRORS')
354 data.delVar('MIRRORS')
355 try:
356 fetcher = bb.fetch2.Fetch(test_uris, data)
357 fetcher.checkstatus()
358 except Exception as err:
359 # Allow the message to be configured so that users can be
360 # pointed to a support mechanism.
361 msg = data.getVar('CONNECTIVITY_CHECK_MSG') or ""
362 if len(msg) == 0:
363 msg = "%s.\n" % err
364 msg += " Please ensure your host's network is configured correctly.\n"
365 msg += " Please ensure CONNECTIVITY_CHECK_URIS is correct and specified URIs are available.\n"
366 msg += " If your ISP or network is blocking the above URL,\n"
367 msg += " try with another domain name, for example by setting:\n"
368 msg += " CONNECTIVITY_CHECK_URIS = \"https://www.example.com/\""
369 msg += " You could also set BB_NO_NETWORK = \"1\" to disable network\n"
370 msg += " access if all required sources are on local disk.\n"
371 retval = msg
372
373 return retval
374
375def check_supported_distro(sanity_data):
376 from fnmatch import fnmatch
377
378 tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS')
379 if not tested_distros:
380 return
381
382 try:
383 distro = oe.lsb.distro_identifier()
384 except Exception:
385 distro = None
386
387 if not distro:
388 bb.warn('Host distribution could not be determined; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.')
389
390 for supported in [x.strip() for x in tested_distros.split('\\n')]:
391 if fnmatch(distro, supported):
392 return
393
394 bb.warn('Host distribution "%s" has not been validated with this version of the build system; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.' % distro)
395
396# Checks we should only make if MACHINE is set correctly
397def check_sanity_validmachine(sanity_data):
398 messages = ""
399
400 # Check TUNE_ARCH is set
401 if sanity_data.getVar('TUNE_ARCH') == 'INVALID':
402 messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n'
403
404 # Check TARGET_OS is set
405 if sanity_data.getVar('TARGET_OS') == 'INVALID':
406 messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
407
408 # Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
409 pkgarchs = sanity_data.getVar('PACKAGE_ARCHS')
410 tunepkg = sanity_data.getVar('TUNE_PKGARCH')
411 defaulttune = sanity_data.getVar('DEFAULTTUNE')
412 tunefound = False
413 seen = {}
414 dups = []
415
416 for pa in pkgarchs.split():
417 if seen.get(pa, 0) == 1:
418 dups.append(pa)
419 else:
420 seen[pa] = 1
421 if pa == tunepkg:
422 tunefound = True
423
424 if len(dups):
425 messages = messages + "Error, the PACKAGE_ARCHS variable contains duplicates. The following archs are listed more than once: %s" % " ".join(dups)
426
427 if tunefound == False:
428 messages = messages + "Error, the PACKAGE_ARCHS variable (%s) for DEFAULTTUNE (%s) does not contain TUNE_PKGARCH (%s)." % (pkgarchs, defaulttune, tunepkg)
429
430 return messages
431
432# Patch before 2.7 can't handle all the features in git-style diffs. Some
433# patches may incorrectly apply, and others won't apply at all.
434def check_patch_version(sanity_data):
435 import re, subprocess
436
437 patch_minimum_version = "2.7"
438
439 try:
440 result = subprocess.check_output(["patch", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
441 version = re.search(r"[0-9.]+", result.splitlines()[0]).group()
442 if bb.utils.vercmp_string_op(version, patch_minimum_version, "<"):
443 return ("Your version of patch is older than %s and has bugs which will break builds. "
444 "Please install a newer version of patch.\n" % patch_minimum_version)
445 else:
446 return None
447 except subprocess.CalledProcessError as e:
448 return "Unable to execute patch --version, exit code %d:\n%s\n" % (e.returncode, e.output)
449
450# Glibc needs make 4.0 or later, we may as well match at this point
451def check_make_version(sanity_data):
452 make_minimum_version = "4.0"
453 import subprocess
454
455 try:
456 result = subprocess.check_output(['make', '--version'], stderr=subprocess.STDOUT).decode('utf-8')
457 except subprocess.CalledProcessError as e:
458 return "Unable to execute make --version, exit code %d\n%s\n" % (e.returncode, e.output)
459 version = result.split()[2]
460 if bb.utils.vercmp_string_op(version, make_minimum_version, "<"):
461 return "Please install a make version of %s or later.\n" % make_minimum_version
462
463 if bb.utils.vercmp_string_op(version, "4.2.1", "=="):
464 distro = oe.lsb.distro_identifier()
465 if "ubuntu" in distro or "debian" in distro or "linuxmint" in distro:
466 return None
467 return "make version 4.2.1 is known to have issues on Centos/OpenSUSE and other non-Ubuntu systems. Please use a buildtools-make-tarball or a newer version of make.\n"
468 return None
469
470
471# Check if we're running on WSL (Windows Subsystem for Linux).
472# WSLv1 is known not to work but WSLv2 should work properly as
473# long as the VHDX file is optimized often, let the user know
474# upfront.
475# More information on installing WSLv2 at:
476# https://docs.microsoft.com/en-us/windows/wsl/wsl2-install
477def check_wsl(d):
478 with open("/proc/version", "r") as f:
479 verdata = f.readlines()
480 for l in verdata:
481 if "Microsoft" in l:
482 return "OpenEmbedded doesn't work under WSLv1, please upgrade to WSLv2 if you want to run builds on Windows"
483 elif "microsoft" in l:
484 bb.warn("You are running bitbake under WSLv2, this works properly but you should optimize your VHDX file eventually to avoid running out of storage space")
485 return None
486
487def check_userns():
488 """
489 Check that user namespaces are functional, as they're used for network isolation.
490 """
491
492 # There is a known failure case with AppAmrmor where the unshare() call
493 # succeeds (at which point the uid is nobody) but writing to the uid_map
494 # fails (so the uid isn't reset back to the user's uid). We can detect this.
495 parentuid = os.getuid()
496 if not bb.utils.is_local_uid(parentuid):
497 return None
498 pid = os.fork()
499 if not pid:
500 try:
501 bb.utils.disable_network()
502 except:
503 pass
504 os._exit(parentuid != os.getuid())
505
506 ret = os.waitpid(pid, 0)[1]
507 if ret:
508 bb.fatal("User namespaces are not usable by BitBake, possibly due to AppArmor.\n"
509 "See https://discourse.ubuntu.com/t/ubuntu-24-04-lts-noble-numbat-release-notes/39890#unprivileged-user-namespace-restrictions for more information.")
510
511
512# Require at least gcc version 10.1
513#
514# A less invasive fix is with scripts/install-buildtools (or with user
515# built buildtools-extended-tarball)
516#
517def check_gcc_version(sanity_data):
518 gcc_minimum_version = "10.1"
519 version = oe.utils.get_host_gcc_version(sanity_data)
520 if bb.utils.vercmp_string_op(version, gcc_minimum_version, "<"):
521 return ("Your version of gcc is older than %s and will break builds. Please install a newer "
522 "version of gcc (you could use the project's buildtools-extended-tarball or use "
523 "scripts/install-buildtools).\n" % gcc_minimum_version)
524 return None
525
526# Tar version 1.24 and onwards handle overwriting symlinks correctly
527# but earlier versions do not; this needs to work properly for sstate
528# Version 1.28 is needed so opkg-build works correctly when reproducible builds are enabled
529# Gtar is assumed at to be used as tar in poky
530def check_tar_version(sanity_data):
531 tar_minimum_version = "1.28"
532 import subprocess
533 try:
534 result = subprocess.check_output(["tar", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
535 except subprocess.CalledProcessError as e:
536 return "Unable to execute tar --version, exit code %d\n%s\n" % (e.returncode, e.output)
537 if not "GNU" in result:
538 return "Your version of tar is not gtar. Please install gtar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
539 version = result.split()[3]
540 if bb.utils.vercmp_string_op(version, tar_minimum_version, "<"):
541 return ("Your version of tar is older than %s and does not have the support needed to enable reproducible "
542 "builds. Please install a newer version of tar (you could use the project's buildtools-tarball from "
543 "our last release or use scripts/install-buildtools).\n" % tar_minimum_version)
544
545 try:
546 result = subprocess.check_output(["tar", "--help"], stderr=subprocess.STDOUT).decode('utf-8')
547 if "--xattrs" not in result:
548 return "Your tar doesn't support --xattrs, please use GNU tar.\n"
549 except subprocess.CalledProcessError as e:
550 return "Unable to execute tar --help, exit code %d\n%s\n" % (e.returncode, e.output)
551
552 return None
553
554# We use git parameters and functionality only found in 1.7.8 or later
555# The kernel tools assume git >= 1.8.3.1 (verified needed > 1.7.9.5) see #6162
556# The git fetcher also had workarounds for git < 1.7.9.2 which we've dropped
557def check_git_version(sanity_data):
558 git_minimum_version = "1.8.3.1"
559 import subprocess
560 try:
561 result = subprocess.check_output(["git", "--version"], stderr=subprocess.DEVNULL).decode('utf-8')
562 except subprocess.CalledProcessError as e:
563 return "Unable to execute git --version, exit code %d\n%s\n" % (e.returncode, e.output)
564 version = result.split()[2]
565 if bb.utils.vercmp_string_op(version, git_minimum_version, "<"):
566 return ("Your version of git is older than %s and has bugs which will break builds. "
567 "Please install a newer version of git.\n" % git_minimum_version)
568 return None
569
570# Check the required perl modules which may not be installed by default
571def check_perl_modules(sanity_data):
572 import subprocess
573 ret = ""
574 modules = ( "Text::ParseWords", "Thread::Queue", "Data::Dumper", "File::Compare", "File::Copy", "open ':std'", "FindBin" )
575 errresult = ''
576 for m in modules:
577 try:
578 subprocess.check_output(["perl", "-e", "use %s" % m])
579 except subprocess.CalledProcessError as e:
580 errresult += bytes.decode(e.output)
581 ret += "%s " % m
582 if ret:
583 return "Required perl module(s) not found: %s\n\n%s\n" % (ret, errresult)
584 return None
585
586def sanity_check_conffiles(d):
587 funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS').split()
588 for func in funcs:
589 conffile, current_version, required_version, func = func.split(":")
590 if check_conf_exists(conffile, d) and d.getVar(current_version) is not None and \
591 d.getVar(current_version) != d.getVar(required_version):
592 try:
593 bb.build.exec_func(func, d)
594 except NotImplementedError as e:
595 bb.fatal(str(e))
596 d.setVar("BB_INVALIDCONF", True)
597
598def drop_v14_cross_builds(d):
599 import glob
600 indexes = glob.glob(d.expand("${SSTATE_MANIFESTS}/index-${BUILD_ARCH}_*"))
601 for i in indexes:
602 with open(i, "r") as f:
603 lines = f.readlines()
604 for l in reversed(lines):
605 try:
606 (stamp, manifest, workdir) = l.split()
607 except ValueError:
608 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
609 for m in glob.glob(manifest + ".*"):
610 if m.endswith(".postrm"):
611 continue
612 sstate_clean_manifest(m, d)
613 bb.utils.remove(stamp + "*")
614 bb.utils.remove(workdir, recurse = True)
615
616def check_cpp_toolchain_flag(d, flag, error_message=None):
617 """
618 Checks if the g++ compiler supports the given flag
619 """
620 import shlex
621 import subprocess
622
623 cpp_code = """
624 #include <iostream>
625 int main() {
626 std::cout << "Hello, World!" << std::endl;
627 return 0;
628 }
629 """
630
631 cmd = ["g++", "-x", "c++","-", "-o", "/dev/null", flag]
632 try:
633 subprocess.run(cmd, input=cpp_code, capture_output=True, text=True, check=True)
634 return None
635 except subprocess.CalledProcessError as e:
636 return error_message or f"An unexpected issue occurred during the C++ toolchain check: {str(e)}"
637
638def sanity_handle_abichanges(status, d):
639 #
640 # Check the 'ABI' of TMPDIR
641 #
642 import subprocess
643
644 current_abi = d.getVar('OELAYOUT_ABI')
645 abifile = d.getVar('SANITY_ABIFILE')
646 if os.path.exists(abifile):
647 with open(abifile, "r") as f:
648 abi = f.read().strip()
649 if not abi.isdigit():
650 with open(abifile, "w") as f:
651 f.write(current_abi)
652 elif int(abi) <= 11 and current_abi == "12":
653 status.addresult("The layout of TMPDIR changed for Recipe Specific Sysroots.\nConversion doesn't make sense and this change will rebuild everything so please delete TMPDIR (%s).\n" % d.getVar("TMPDIR"))
654 elif int(abi) <= 13 and current_abi == "14":
655 status.addresult("TMPDIR changed to include path filtering from the pseudo database.\nIt is recommended to use a clean TMPDIR with the new pseudo path filtering so TMPDIR (%s) would need to be removed to continue.\n" % d.getVar("TMPDIR"))
656 elif int(abi) == 14 and current_abi == "15":
657 drop_v14_cross_builds(d)
658 with open(abifile, "w") as f:
659 f.write(current_abi)
660 elif (abi != current_abi):
661 # Code to convert from one ABI to another could go here if possible.
662 status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
663 else:
664 with open(abifile, "w") as f:
665 f.write(current_abi)
666
667def check_sanity_sstate_dir_change(sstate_dir, data):
668 # Sanity checks to be done when the value of SSTATE_DIR changes
669
670 # Check that SSTATE_DIR isn't on a filesystem with limited filename length (eg. eCryptFS)
671 testmsg = ""
672 if sstate_dir != "":
673 testmsg = check_create_long_filename(sstate_dir, "SSTATE_DIR")
674 # If we don't have permissions to SSTATE_DIR, suggest the user set it as an SSTATE_MIRRORS
675 try:
676 err = testmsg.split(': ')[1].strip()
677 if err == "Permission denied.":
678 testmsg = testmsg + "You could try using %s in SSTATE_MIRRORS rather than as an SSTATE_CACHE.\n" % (sstate_dir)
679 except IndexError:
680 pass
681 return testmsg
682
683def check_sanity_version_change(status, d):
684 import glob
685
686 # Sanity checks to be done when SANITY_VERSION or NATIVELSBSTRING changes
687 # In other words, these tests run once in a given build directory and then
688 # never again until the sanity version or host distribution id/version changes.
689
690 # Check the python install is complete. Examples that are often removed in
691 # minimal installations: glib-2.0-natives requires xml.parsers.expat
692 try:
693 import xml.parsers.expat
694 except ImportError as e:
695 status.addresult('Your Python 3 is not a full install. Please install the module %s (see the Getting Started guide for further information).\n' % e.name)
696
697 status.addresult(check_gcc_version(d))
698 status.addresult(check_make_version(d))
699 status.addresult(check_patch_version(d))
700 status.addresult(check_tar_version(d))
701 status.addresult(check_git_version(d))
702 status.addresult(check_perl_modules(d))
703 status.addresult(check_wsl(d))
704 status.addresult(check_userns())
705
706 missing = ""
707
708 if not check_app_exists("${MAKE}", d):
709 missing = missing + "GNU make,"
710
711 if not check_app_exists('gcc', d):
712 missing = missing + "C Compiler (gcc),"
713
714 if not check_app_exists('g++', d):
715 missing = missing + "C++ Compiler (g++),"
716
717 # installing emacs on Ubuntu 24.04 pulls in emacs-gtk -> libgcc-14-dev despite gcc being 13
718 # this breaks libcxx-native and compiler-rt-native builds so tell the user to fix things
719 if glob.glob("/usr/lib/gcc/*/14/libgcc_s.so") and not glob.glob("/usr/lib/gcc/*/14/libstdc++.so"):
720 status.addresult('libgcc-14-dev is installed and not libstdc++-14-dev which will break clang native compiles. Please remove one or install the other.')
721
722 required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES')
723
724 for util in required_utilities.split():
725 if not check_app_exists(util, d):
726 missing = missing + "%s," % util
727
728 if missing:
729 missing = missing.rstrip(',')
730 status.addresult("Please install the following missing utilities: %s\n" % missing)
731
732 assume_provided = d.getVar('ASSUME_PROVIDED').split()
733 # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
734 if "diffstat-native" not in assume_provided:
735 status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
736
737 # Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
738 import stat
739 tmpdir = d.getVar('TMPDIR')
740 topdir = d.getVar('TOPDIR')
741 status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
742 tmpdirmode = os.stat(tmpdir).st_mode
743 if (tmpdirmode & stat.S_ISGID):
744 status.addresult("TMPDIR is setgid, please don't build in a setgid directory")
745 if (tmpdirmode & stat.S_ISUID):
746 status.addresult("TMPDIR is setuid, please don't build in a setuid directory")
747
748 # Check that a user isn't building in a path in PSEUDO_IGNORE_PATHS
749 pseudoignorepaths = (d.getVar('PSEUDO_IGNORE_PATHS', expand=True) or "").split(",")
750 workdir = d.getVar('WORKDIR', expand=True)
751 for i in pseudoignorepaths:
752 if i and workdir.startswith(i):
753 status.addresult("You are building in a path included in PSEUDO_IGNORE_PATHS " + str(i) + " please locate the build outside this path.\n")
754
755 # Check if PSEUDO_IGNORE_PATHS and paths under pseudo control overlap
756 pseudoignorepaths = (d.getVar('PSEUDO_IGNORE_PATHS', expand=True) or "").split(",")
757 pseudo_control_dir = "${D},${PKGD},${PKGDEST},${IMAGEROOTFS},${SDK_OUTPUT}"
758 pseudocontroldir = d.expand(pseudo_control_dir).split(",")
759 for i in pseudoignorepaths:
760 for j in pseudocontroldir:
761 if i and j:
762 if j.startswith(i):
763 status.addresult("A path included in PSEUDO_IGNORE_PATHS " + str(i) + " and the path " + str(j) + " overlap and this will break pseudo permission and ownership tracking. Please set the path " + str(j) + " to a different directory which does not overlap with pseudo controlled directories. \n")
764
765 # Some third-party software apparently relies on chmod etc. being suid root (!!)
766 import stat
767 suid_check_bins = "chown chmod mknod".split()
768 for bin_cmd in suid_check_bins:
769 bin_path = bb.utils.which(os.environ["PATH"], bin_cmd)
770 if bin_path:
771 bin_stat = os.stat(bin_path)
772 if bin_stat.st_uid == 0 and bin_stat.st_mode & stat.S_ISUID:
773 status.addresult('%s has the setuid bit set. This interferes with pseudo and may cause other issues that break the build process.\n' % bin_path)
774
775 # Check that we can fetch from various network transports
776 netcheck = check_connectivity(d)
777 status.addresult(netcheck)
778 if netcheck:
779 status.network_error = True
780
781 nolibs = d.getVar('NO32LIBS')
782 if not nolibs:
783 lib32path = '/lib'
784 if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ):
785 lib32path = '/lib32'
786
787 if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'):
788 status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n")
789
790 bbpaths = d.getVar('BBPATH').split(":")
791 if ("." in bbpaths or "./" in bbpaths or "" in bbpaths):
792 status.addresult("BBPATH references the current directory, either through " \
793 "an empty entry, a './' or a '.'.\n\t This is unsafe and means your "\
794 "layer configuration is adding empty elements to BBPATH.\n\t "\
795 "Please check your layer.conf files and other BBPATH " \
796 "settings to remove the current working directory " \
797 "references.\n" \
798 "Parsed BBPATH is" + str(bbpaths));
799
800 oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF')
801 if not oes_bb_conf:
802 status.addresult('You are not using the OpenEmbedded version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n')
803
804 # The length of TMPDIR can't be longer than 400
805 status.addresult(check_path_length(tmpdir, "TMPDIR", 400))
806
807 # Check that TOPDIR does not contain non ascii chars (perl_5.40.0, Perl-native and shadow-native build failures)
808 status.addresult(check_non_ascii(topdir, "TOPDIR"))
809
810 # Check that TMPDIR isn't located on nfs
811 status.addresult(check_not_nfs(tmpdir, "TMPDIR"))
812
813 # Check for case-insensitive file systems (such as Linux in Docker on
814 # macOS with default HFS+ file system)
815 status.addresult(check_case_sensitive(tmpdir, "TMPDIR"))
816
817 # Check if linking with lstdc++ is failing
818 status.addresult(check_cpp_toolchain_flag(d, "-lstdc++"))
819
820 # Check if the C++ toochain support the "--std=gnu++20" flag
821 status.addresult(check_cpp_toolchain_flag(d, "--std=gnu++20",
822 "An error occurred during checking the C++ toolchain for '--std=gnu++20' support. "
823 "Please use a g++ compiler that supports C++20 (e.g. g++ version 10 onwards)."))
824
825def sanity_check_locale(d):
826 """
827 Currently bitbake switches locale to en_US.UTF-8 so check that this locale actually exists.
828 """
829 import locale
830 try:
831 locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
832 except locale.Error:
833 raise_sanity_error("Your system needs to support the en_US.UTF-8 locale.", d)
834
835def check_sanity_everybuild(status, d):
836 import os, stat
837 # Sanity tests which test the users environment so need to run at each build (or are so cheap
838 # it makes sense to always run them.
839
840 if 0 == os.getuid():
841 raise_sanity_error("Do not use Bitbake as root.", d)
842
843 # Check the Python version, we now have a minimum of Python 3.9
844 import sys
845 if sys.hexversion < 0x030900F0:
846 status.addresult('The system requires at least Python 3.9 to run. Please update your Python interpreter.\n')
847
848 # Check the bitbake version meets minimum requirements
849 minversion = d.getVar('BB_MIN_VERSION')
850 if bb.utils.vercmp_string_op(bb.__version__, minversion, "<"):
851 status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
852
853 sanity_check_locale(d)
854
855 paths = d.getVar('PATH').split(":")
856 if "." in paths or "./" in paths or "" in paths:
857 status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
858
859 #Check if bitbake is present in PATH environment variable
860 bb_check = bb.utils.which(d.getVar('PATH'), 'bitbake')
861 if not bb_check:
862 bb.warn("bitbake binary is not found in PATH, did you source the script?")
863
864 # Check whether 'inherit' directive is found (used for a class to inherit)
865 # in conf file it's supposed to be uppercase INHERIT
866 inherit = d.getVar('inherit')
867 if inherit:
868 status.addresult("Please don't use inherit directive in your local.conf. The directive is supposed to be used in classes and recipes only to inherit of bbclasses. Here INHERIT should be used.\n")
869
870 # Check that the DISTRO is valid, if set
871 # need to take into account DISTRO renaming DISTRO
872 distro = d.getVar('DISTRO')
873 if distro and distro != "nodistro":
874 if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ):
875 status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO"))
876
877 # Check that these variables don't use tilde-expansion as we don't do that
878 for v in ("TMPDIR", "DL_DIR", "SSTATE_DIR"):
879 if d.getVar(v).startswith("~"):
880 status.addresult("%s uses ~ but Bitbake will not expand this, use an absolute path or variables." % v)
881
882 # Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
883 # set, since so much relies on it being set.
884 dldir = d.getVar('DL_DIR')
885 if not dldir:
886 status.addresult("DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n")
887 if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
888 status.addresult("DL_DIR: %s exists but you do not appear to have write access to it. \n" % dldir)
889 check_symlink(dldir, d)
890
891 # Check that the MACHINE is valid, if it is set
892 machinevalid = True
893 if d.getVar('MACHINE'):
894 if not check_conf_exists("conf/machine/${MACHINE}.conf", d):
895 status.addresult('MACHINE=%s is invalid. Please set a valid MACHINE in your local.conf, environment or other configuration file.\n' % (d.getVar('MACHINE')))
896 machinevalid = False
897 else:
898 status.addresult(check_sanity_validmachine(d))
899 else:
900 status.addresult('Please set a MACHINE in your local.conf or environment\n')
901 machinevalid = False
902 if machinevalid:
903 status.addresult(check_toolchain(d))
904
905 # Check that the SDKMACHINE is valid, if it is set
906 if d.getVar('SDKMACHINE'):
907 if not check_conf_exists("conf/machine-sdk/${SDKMACHINE}.conf", d):
908 status.addresult('Specified SDKMACHINE value is not valid\n')
909 elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
910 status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
911
912 # If SDK_VENDOR looks like "-my-sdk" then the triples are badly formed so fail early
913 sdkvendor = d.getVar("SDK_VENDOR")
914 if not (sdkvendor.startswith("-") and sdkvendor.count("-") == 1):
915 status.addresult("SDK_VENDOR should be of the form '-foosdk' with a single dash; found '%s'\n" % sdkvendor)
916
917 check_supported_distro(d)
918
919 omask = os.umask(0o022)
920 if omask & 0o755:
921 status.addresult("Please use a umask which allows a+rx and u+rwx\n")
922 os.umask(omask)
923
924 # Ensure /tmp is NOT mounted with noexec
925 if os.statvfs("/tmp").f_flag & os.ST_NOEXEC:
926 raise_sanity_error("/tmp shouldn't be mounted with noexec.", d)
927
928 if d.getVar('TARGET_ARCH') == "arm":
929 # This path is no longer user-readable in modern (very recent) Linux
930 try:
931 if os.path.exists("/proc/sys/vm/mmap_min_addr"):
932 f = open("/proc/sys/vm/mmap_min_addr", "r")
933 try:
934 if (int(f.read().strip()) > 65536):
935 status.addresult("/proc/sys/vm/mmap_min_addr is not <= 65536. This will cause problems with qemu so please fix the value (as root).\n\nTo fix this in later reboots, set vm.mmap_min_addr = 65536 in /etc/sysctl.conf.\n")
936 finally:
937 f.close()
938 except:
939 pass
940
941 for checkdir in ['COREBASE', 'TMPDIR']:
942 val = d.getVar(checkdir)
943 if val.find('..') != -1:
944 status.addresult("Error, you have '..' in your %s directory path. Please ensure the variable contains an absolute path as this can break some recipe builds in obtuse ways." % checkdir)
945 if val.find('+') != -1:
946 status.addresult("Error, you have an invalid character (+) in your %s directory path. Please move the installation to a directory which doesn't include any + characters." % checkdir)
947 if val.find('@') != -1:
948 status.addresult("Error, you have an invalid character (@) in your %s directory path. Please move the installation to a directory which doesn't include any @ characters." % checkdir)
949 if val.find(' ') != -1:
950 status.addresult("Error, you have a space in your %s directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this." % checkdir)
951 if val.find('%') != -1:
952 status.addresult("Error, you have an invalid character (%) in your %s directory path which causes problems with python string formatting. Please move the installation to a directory which doesn't include any % characters." % checkdir)
953
954 # Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS
955 import re
956 mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
957 protocols = ['http', 'ftp', 'file', 'https', \
958 'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
959 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3', \
960 'az', 'ftps', 'crate', 'gs']
961 for mirror_var in mirror_vars:
962 mirrors = (d.getVar(mirror_var) or '').replace('\\n', ' ').split()
963
964 # Split into pairs
965 if len(mirrors) % 2 != 0:
966 bb.warn('Invalid mirror variable value for %s: %s, should contain paired members.' % (mirror_var, str(mirrors)))
967 continue
968 mirrors = list(zip(*[iter(mirrors)]*2))
969
970 for mirror_entry in mirrors:
971 pattern, mirror = mirror_entry
972
973 decoded = bb.fetch2.decodeurl(pattern)
974 try:
975 pattern_scheme = re.compile(decoded[0])
976 except re.error as exc:
977 bb.warn('Invalid scheme regex (%s) in %s; %s' % (pattern, mirror_var, mirror_entry))
978 continue
979
980 if not any(pattern_scheme.match(protocol) for protocol in protocols):
981 bb.warn('Invalid protocol (%s) in %s: %s' % (decoded[0], mirror_var, mirror_entry))
982 continue
983
984 if not any(mirror.startswith(protocol + '://') for protocol in protocols):
985 bb.warn('Invalid protocol in %s: %s' % (mirror_var, mirror_entry))
986 continue
987
988 if mirror.startswith('file://'):
989 import urllib
990 check_symlink(urllib.parse.urlparse(mirror).path, d)
991 # SSTATE_MIRROR ends with a /PATH string
992 if mirror.endswith('/PATH'):
993 # remove /PATH$ from SSTATE_MIRROR to get a working
994 # base directory path
995 mirror_base = urllib.parse.urlparse(mirror[:-1*len('/PATH')]).path
996 check_symlink(mirror_base, d)
997
998 # Check sstate mirrors aren't being used with a local hash server and no remote
999 hashserv = d.getVar("BB_HASHSERVE")
1000 if d.getVar("SSTATE_MIRRORS") and hashserv and hashserv.startswith("unix://") and not d.getVar("BB_HASHSERVE_UPSTREAM"):
1001 bb.warn("You are using a local hash equivalence server but have configured an sstate mirror. This will likely mean no sstate will match from the mirror. You may wish to disable the hash equivalence use (BB_HASHSERVE), or use a hash equivalence server alongside the sstate mirror.")
1002
1003 # Check that TMPDIR hasn't changed location since the last time we were run
1004 tmpdir = d.getVar('TMPDIR')
1005 checkfile = os.path.join(tmpdir, "saved_tmpdir")
1006 if os.path.exists(checkfile):
1007 with open(checkfile, "r") as f:
1008 saved_tmpdir = f.read().strip()
1009 if (saved_tmpdir != tmpdir):
1010 status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or delete it and rebuild\n" % saved_tmpdir)
1011 else:
1012 bb.utils.mkdirhier(tmpdir)
1013 # Remove setuid, setgid and sticky bits from TMPDIR
1014 try:
1015 os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISUID)
1016 os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISGID)
1017 os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISVTX)
1018 except OSError as exc:
1019 bb.warn("Unable to chmod TMPDIR: %s" % exc)
1020 with open(checkfile, "w") as f:
1021 f.write(tmpdir)
1022
1023 # If /bin/sh is a symlink, check that it points to dash or bash
1024 if os.path.islink('/bin/sh'):
1025 real_sh = os.path.realpath('/bin/sh')
1026 # Due to update-alternatives, the shell name may take various
1027 # forms, such as /bin/dash, bin/bash, /bin/bash.bash ...
1028 if '/dash' not in real_sh and '/bash' not in real_sh:
1029 status.addresult("Error, /bin/sh links to %s, must be dash or bash\n" % real_sh)
1030
1031def check_sanity(sanity_data):
1032 class SanityStatus(object):
1033 def __init__(self):
1034 self.messages = ""
1035 self.network_error = False
1036
1037 def addresult(self, message):
1038 if message:
1039 self.messages = self.messages + message
1040
1041 status = SanityStatus()
1042
1043 tmpdir = sanity_data.getVar('TMPDIR')
1044 sstate_dir = sanity_data.getVar('SSTATE_DIR')
1045
1046 check_symlink(sstate_dir, sanity_data)
1047
1048 # Check saved sanity info
1049 last_sanity_version = 0
1050 last_tmpdir = ""
1051 last_sstate_dir = ""
1052 last_nativelsbstr = ""
1053 sanityverfile = sanity_data.expand("${TOPDIR}/cache/sanity_info")
1054 if os.path.exists(sanityverfile):
1055 with open(sanityverfile, 'r') as f:
1056 for line in f:
1057 if line.startswith('SANITY_VERSION'):
1058 last_sanity_version = int(line.split()[1])
1059 if line.startswith('TMPDIR'):
1060 last_tmpdir = line.split()[1]
1061 if line.startswith('SSTATE_DIR'):
1062 last_sstate_dir = line.split()[1]
1063 if line.startswith('NATIVELSBSTRING'):
1064 last_nativelsbstr = line.split()[1]
1065
1066 check_sanity_everybuild(status, sanity_data)
1067
1068 sanity_version = int(sanity_data.getVar('SANITY_VERSION') or 1)
1069 network_error = False
1070 # NATIVELSBSTRING var may have been overridden with "universal", so
1071 # get actual host distribution id and version
1072 nativelsbstr = lsb_distro_identifier(sanity_data)
1073 if last_sanity_version < sanity_version or last_nativelsbstr != nativelsbstr:
1074 check_sanity_version_change(status, sanity_data)
1075 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
1076 else:
1077 if last_sstate_dir != sstate_dir:
1078 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
1079
1080 if os.path.exists(os.path.dirname(sanityverfile)) and not status.messages:
1081 with open(sanityverfile, 'w') as f:
1082 f.write("SANITY_VERSION %s\n" % sanity_version)
1083 f.write("TMPDIR %s\n" % tmpdir)
1084 f.write("SSTATE_DIR %s\n" % sstate_dir)
1085 f.write("NATIVELSBSTRING %s\n" % nativelsbstr)
1086
1087 sanity_handle_abichanges(status, sanity_data)
1088
1089 if status.messages != "":
1090 raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error)
1091
1092addhandler config_reparse_eventhandler
1093config_reparse_eventhandler[eventmask] = "bb.event.ConfigParsed"
1094python config_reparse_eventhandler() {
1095 sanity_check_conffiles(e.data)
1096}
1097
1098addhandler check_sanity_eventhandler
1099check_sanity_eventhandler[eventmask] = "bb.event.SanityCheck bb.event.NetworkTest"
1100python check_sanity_eventhandler() {
1101 if bb.event.getName(e) == "SanityCheck":
1102 sanity_data = bb.data.createCopy(e.data)
1103 check_sanity(sanity_data)
1104 if e.generateevents:
1105 sanity_data.setVar("SANITY_USE_EVENTS", "1")
1106 bb.event.fire(bb.event.SanityCheckPassed(), e.data)
1107 elif bb.event.getName(e) == "NetworkTest":
1108 sanity_data = bb.data.createCopy(e.data)
1109 if e.generateevents:
1110 sanity_data.setVar("SANITY_USE_EVENTS", "1")
1111 bb.event.fire(bb.event.NetworkTestFailed() if check_connectivity(sanity_data) else bb.event.NetworkTestPassed(), e.data)
1112
1113 return
1114}
diff --git a/meta/classes-global/sstate.bbclass b/meta/classes-global/sstate.bbclass
deleted file mode 100644
index 2fd29d7323..0000000000
--- a/meta/classes-global/sstate.bbclass
+++ /dev/null
@@ -1,1372 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SSTATE_VERSION = "14"
8
9SSTATE_ZSTD_CLEVEL ??= "8"
10
11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
13
14def generate_sstatefn(spec, hash, taskname, siginfo, d):
15 if taskname is None:
16 return ""
17 extension = ".tar.zst"
18 # 8 chars reserved for siginfo
19 limit = 254 - 8
20 if siginfo:
21 limit = 254
22 extension = ".tar.zst.siginfo"
23 if not hash:
24 hash = "INVALID"
25 fn = spec + hash + "_" + taskname + extension
26 # If the filename is too long, attempt to reduce it
27 if len(fn) > limit:
28 components = spec.split(":")
29 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
30 # 7 is for the separators
31 avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
32 components[2] = components[2][:avail]
33 components[3] = components[3][:avail]
34 components[4] = components[4][:avail]
35 spec = ":".join(components)
36 fn = spec + hash + "_" + taskname + extension
37 if len(fn) > limit:
38 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
39 return hash[:2] + "/" + hash[2:4] + "/" + fn
40
41SSTATE_PKGARCH = "${PACKAGE_ARCH}"
42SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
43SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
44SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
45SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
46SSTATE_EXTRAPATH = ""
47SSTATE_EXTRAPATHWILDCARD = ""
48SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
49
50# explicitly make PV to depend on evaluated value of PV variable
51PV[vardepvalue] = "${PV}"
52
53# We don't want the sstate to depend on things like the distro string
54# of the system, we let the sstate paths take care of this.
55SSTATE_EXTRAPATH[vardepvalue] = ""
56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
57
58# Avoid docbook/sgml catalog warnings for now
59SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
60# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
61SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
62SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
63# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
64SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
65# Archive the sources for many architectures in one deploy folder
66SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
67# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
69SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
72
73SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
74SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
75SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
76SSTATE_HASHEQUIV_FILEMAP ?= " \
77 populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
78 populate_sysroot:*/postinst-useradd-*:${COREBASE} \
79 populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_INCLUDE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
80 populate_sysroot:*/crossscripts/*:${TMPDIR} \
81 populate_sysroot:*/crossscripts/*:${COREBASE} \
82 "
83
84BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
85
86SSTATE_ARCHS_TUNEPKG ??= "${TUNE_PKGARCH}"
87SSTATE_ARCHS = " \
88 ${BUILD_ARCH} \
89 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
90 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
91 ${SDK_ARCH}_${SDK_OS} \
92 ${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX} \
93 allarch \
94 ${SSTATE_ARCHS_TUNEPKG} \
95 ${PACKAGE_EXTRA_ARCHS} \
96 ${MACHINE_ARCH}"
97SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
98
99SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
100
101SSTATECREATEFUNCS += "sstate_hardcode_path"
102SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
103SSTATEPOSTCREATEFUNCS = ""
104SSTATEPREINSTFUNCS = ""
105SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
106EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
107
108# Check whether sstate exists for tasks that support sstate and are in the
109# locked signatures file.
110SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
111
112# Check whether the task's computed hash matches the task's hash in the
113# locked signatures file.
114SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
115
116# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
117# not sign)
118SSTATE_SIG_KEY ?= ""
119SSTATE_SIG_PASSPHRASE ?= ""
120# Whether to verify the GnUPG signatures when extracting sstate archives
121SSTATE_VERIFY_SIG ?= "0"
122# List of signatures to consider valid.
123SSTATE_VALID_SIGS ??= ""
124SSTATE_VALID_SIGS[vardepvalue] = ""
125
126SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
127SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
128 the output hash for a task, which in turn is used to determine equivalency. \
129 "
130
131SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
132SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
133 hash equivalency server, such as PN, PV, taskname, etc. This information \
134 is very useful for developers looking at task data, but may leak sensitive \
135 data if the equivalence server is public. \
136 "
137
138python () {
139 if bb.data.inherits_class('native', d):
140 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
141 elif bb.data.inherits_class('crosssdk', d):
142 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
143 elif bb.data.inherits_class('cross', d):
144 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
145 elif bb.data.inherits_class('nativesdk', d):
146 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
147 elif bb.data.inherits_class('cross-canadian', d):
148 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
149 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
150 d.setVar('SSTATE_PKGARCH', "allarch")
151 else:
152 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
153
154 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
155 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
156 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
157 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
158
159 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
160 d.setVar('SSTATETASKS', " ".join(unique_tasks))
161 for task in unique_tasks:
162 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
163 # Generally sstate should be last, execpt for buildhistory functions
164 postfuncs = (d.getVarFlag(task, 'postfuncs') or "").split()
165 newpostfuncs = [p for p in postfuncs if "buildhistory" not in p] + ["sstate_task_postfunc"] + [p for p in postfuncs if "buildhistory" in p]
166 d.setVarFlag(task, 'postfuncs', " ".join(newpostfuncs))
167 d.setVarFlag(task, 'network', '1')
168 d.setVarFlag(task + "_setscene", 'network', '1')
169}
170
171def sstate_init(task, d):
172 ss = {}
173 ss['task'] = task
174 ss['dirs'] = []
175 ss['plaindirs'] = []
176 ss['lockfiles'] = []
177 ss['lockfiles-shared'] = []
178 return ss
179
180def sstate_state_fromvars(d, task = None):
181 if task is None:
182 task = d.getVar('BB_CURRENTTASK')
183 if not task:
184 bb.fatal("sstate code running without task context?!")
185 task = task.replace("_setscene", "")
186
187 if task.startswith("do_"):
188 task = task[3:]
189 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
190 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
191 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
192 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
193 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
194 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
195 if not task or len(inputs) != len(outputs):
196 bb.fatal("sstate variables not setup correctly?!")
197
198 if task == "populate_lic":
199 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
200 d.setVar("SSTATE_EXTRAPATH", "")
201 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
202
203 ss = sstate_init(task, d)
204 for i in range(len(inputs)):
205 sstate_add(ss, inputs[i], outputs[i], d)
206 ss['lockfiles'] = lockfiles
207 ss['lockfiles-shared'] = lockfilesshared
208 ss['plaindirs'] = plaindirs
209 ss['fixmedir'] = fixmedir
210 return ss
211
212def sstate_add(ss, source, dest, d):
213 if not source.endswith("/"):
214 source = source + "/"
215 if not dest.endswith("/"):
216 dest = dest + "/"
217 source = os.path.normpath(source)
218 dest = os.path.normpath(dest)
219 srcbase = os.path.basename(source)
220 ss['dirs'].append([srcbase, source, dest])
221 return ss
222
223def sstate_install(ss, d):
224 import oe.path
225 import oe.sstatesig
226 import subprocess
227
228 def prepdir(dir):
229 # remove dir if it exists, ensure any parent directories do exist
230 if os.path.exists(dir):
231 oe.path.remove(dir)
232 bb.utils.mkdirhier(dir)
233 oe.path.remove(dir)
234
235 sstateinst = d.getVar("SSTATE_INSTDIR")
236
237 for state in ss['dirs']:
238 prepdir(state[1])
239 bb.utils.rename(sstateinst + state[0], state[1])
240
241 sharedfiles = []
242 shareddirs = []
243 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
244
245 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
246
247 if os.access(manifest, os.R_OK):
248 bb.fatal("Package already staged (%s)?!" % manifest)
249
250 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
251
252 locks = []
253 for lock in ss['lockfiles-shared']:
254 locks.append(bb.utils.lockfile(lock, True))
255 for lock in ss['lockfiles']:
256 locks.append(bb.utils.lockfile(lock))
257
258 for state in ss['dirs']:
259 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
260 for walkroot, dirs, files in os.walk(state[1]):
261 for file in files:
262 srcpath = os.path.join(walkroot, file)
263 dstpath = srcpath.replace(state[1], state[2])
264 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
265 sharedfiles.append(dstpath)
266 for dir in dirs:
267 srcdir = os.path.join(walkroot, dir)
268 dstdir = srcdir.replace(state[1], state[2])
269 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
270 if os.path.islink(srcdir):
271 sharedfiles.append(dstdir)
272 continue
273 if not dstdir.endswith("/"):
274 dstdir = dstdir + "/"
275 shareddirs.append(dstdir)
276
277 # Check the file list for conflicts against files which already exist
278 overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
279 match = []
280 for f in sharedfiles:
281 if os.path.exists(f):
282 f = os.path.normpath(f)
283 realmatch = True
284 for w in overlap_allowed:
285 w = os.path.normpath(w)
286 if f.startswith(w):
287 realmatch = False
288 break
289 if realmatch:
290 match.append(f)
291 sstate_search_cmd = "grep -rlF '%s' %s --exclude=index-* | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
292 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
293 if search_output:
294 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
295 else:
296 match.append(" (not matched to any task)")
297 if match:
298 bb.fatal("Recipe %s is trying to install files into a shared " \
299 "area when those files already exist. The files and the manifests listing " \
300 "them are:\n %s\n"
301 "Please adjust the recipes so only one recipe provides a given file. " % \
302 (d.getVar('PN'), "\n ".join(match)))
303
304 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
305 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
306 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
307
308 # Write out the manifest
309 with open(manifest, "w") as f:
310 for file in sharedfiles:
311 f.write(file + "\n")
312
313 # We want to ensure that directories appear at the end of the manifest
314 # so that when we test to see if they should be deleted any contents
315 # added by the task will have been removed first.
316 dirs = sorted(shareddirs, key=len)
317 # Must remove children first, which will have a longer path than the parent
318 for di in reversed(dirs):
319 f.write(di + "\n")
320
321 # Append to the list of manifests for this PACKAGE_ARCH
322
323 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
324 l = bb.utils.lockfile(i + ".lock")
325 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
326 manifests = []
327 if os.path.exists(i):
328 with open(i, "r") as f:
329 manifests = f.readlines()
330 # We append new entries, we don't remove older entries which may have the same
331 # manifest name but different versions from stamp/workdir. See below.
332 if filedata not in manifests:
333 with open(i, "a+") as f:
334 f.write(filedata)
335 bb.utils.unlockfile(l)
336
337 # Run the actual file install
338 for state in ss['dirs']:
339 if os.path.exists(state[1]):
340 oe.path.copyhardlinktree(state[1], state[2])
341
342 for plain in ss['plaindirs']:
343 workdir = d.getVar('WORKDIR')
344 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
345 src = sstateinst + "/" + plain.replace(workdir, '')
346 if sharedworkdir in plain:
347 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
348 dest = plain
349 bb.utils.mkdirhier(src)
350 prepdir(dest)
351 bb.utils.rename(src, dest)
352
353 for lock in locks:
354 bb.utils.unlockfile(lock)
355
356sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES SSTATE_MANMACH SSTATE_MANFILEPREFIX STAMP"
357
358def sstate_installpkg(ss, d):
359 from oe.gpg_sign import get_signer
360
361 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
362 d.setVar("SSTATE_CURRTASK", ss['task'])
363 sstatefetch = d.getVar('SSTATE_PKGNAME')
364 sstatepkg = d.getVar('SSTATE_PKG')
365 verify_sig = bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False)
366
367 if not os.path.exists(sstatepkg) or (verify_sig and not os.path.exists(sstatepkg + '.sig')):
368 pstaging_fetch(sstatefetch, d)
369
370 if not os.path.isfile(sstatepkg):
371 bb.note("Sstate package %s does not exist" % sstatepkg)
372 return False
373
374 sstate_clean(ss, d)
375
376 d.setVar('SSTATE_INSTDIR', sstateinst)
377
378 if verify_sig:
379 if not os.path.isfile(sstatepkg + '.sig'):
380 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
381 return False
382 signer = get_signer(d, 'local')
383 if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
384 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
385 return False
386
387 # Empty sstateinst directory, ensure its clean
388 if os.path.exists(sstateinst):
389 oe.path.remove(sstateinst)
390 bb.utils.mkdirhier(sstateinst)
391
392 sstateinst = d.getVar("SSTATE_INSTDIR")
393 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
394
395 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
396 # All hooks should run in the SSTATE_INSTDIR
397 bb.build.exec_func(f, d, (sstateinst,))
398
399 return sstate_installpkgdir(ss, d)
400
401def sstate_installpkgdir(ss, d):
402 import oe.path
403 import subprocess
404
405 sstateinst = d.getVar("SSTATE_INSTDIR")
406 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
407
408 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
409 # All hooks should run in the SSTATE_INSTDIR
410 bb.build.exec_func(f, d, (sstateinst,))
411
412 sstate_install(ss, d)
413
414 return True
415
416python sstate_hardcode_path_unpack () {
417 # Fixup hardcoded paths
418 #
419 # Note: The logic below must match the reverse logic in
420 # sstate_hardcode_path(d)
421 import subprocess
422
423 sstateinst = d.getVar('SSTATE_INSTDIR')
424 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
425 fixmefn = sstateinst + "fixmepath"
426 if os.path.isfile(fixmefn):
427 staging_target = d.getVar('RECIPE_SYSROOT')
428 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
429
430 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
431 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
432 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
433 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
434 else:
435 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
436
437 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
438 for fixmevar in extra_staging_fixmes.split():
439 fixme_path = d.getVar(fixmevar)
440 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
441
442 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
443 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
444
445 # Defer do_populate_sysroot relocation command
446 if sstatefixmedir:
447 bb.utils.mkdirhier(sstatefixmedir)
448 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
449 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
450 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
451 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
452 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
453 f.write(sstate_hardcode_cmd)
454 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
455 return
456
457 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
458 subprocess.check_call(sstate_hardcode_cmd, shell=True)
459
460 # Need to remove this or we'd copy it into the target directory and may
461 # conflict with another writer
462 os.remove(fixmefn)
463}
464
465def sstate_clean_cachefile(ss, d):
466 import oe.path
467
468 if d.getVarFlag('do_%s' % ss['task'], 'task'):
469 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
470 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
471 bb.note("Removing %s" % sstatepkgfile)
472 oe.path.remove(sstatepkgfile)
473
474def sstate_clean_cachefiles(d):
475 for task in (d.getVar('SSTATETASKS') or "").split():
476 ld = d.createCopy()
477 ss = sstate_state_fromvars(ld, task)
478 sstate_clean_cachefile(ss, ld)
479
480def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
481 import oe.path
482
483 with open(manifest) as mfile:
484 entries = mfile.readlines()
485
486 for entry in entries:
487 entry = entry.strip()
488 if prefix and not entry.startswith("/"):
489 entry = prefix + "/" + entry
490 bb.debug(2, "Removing manifest: %s" % entry)
491 # We can race against another package populating directories as we're removing them
492 # so we ignore errors here.
493 try:
494 if entry.endswith("/"):
495 if os.path.islink(entry[:-1]):
496 os.remove(entry[:-1])
497 elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
498 # Removing directories whilst builds are in progress exposes a race. Only
499 # do it in contexts where it is safe to do so.
500 os.rmdir(entry[:-1])
501 else:
502 os.remove(entry)
503 except OSError:
504 pass
505
506 postrm = manifest + ".postrm"
507 if os.path.exists(manifest + ".postrm"):
508 import subprocess
509 os.chmod(postrm, 0o755)
510 subprocess.check_call(postrm, shell=True)
511 oe.path.remove(postrm)
512
513 oe.path.remove(manifest)
514
515def sstate_clean(ss, d):
516 import oe.path
517 import glob
518
519 d2 = d.createCopy()
520 stamp_clean = d.getVar("STAMPCLEAN")
521 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
522 if extrainf:
523 d2.setVar("SSTATE_MANMACH", extrainf)
524 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
525 else:
526 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
527
528 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
529
530 if os.path.exists(manifest):
531 locks = []
532 for lock in ss['lockfiles-shared']:
533 locks.append(bb.utils.lockfile(lock))
534 for lock in ss['lockfiles']:
535 locks.append(bb.utils.lockfile(lock))
536
537 sstate_clean_manifest(manifest, d, canrace=True)
538
539 for lock in locks:
540 bb.utils.unlockfile(lock)
541
542 # Remove the current and previous stamps, but keep the sigdata.
543 #
544 # The glob() matches do_task* which may match multiple tasks, for
545 # example: do_package and do_package_write_ipk, so we need to
546 # exactly match *.do_task.* and *.do_task_setscene.*
547 rm_stamp = '.do_%s.' % ss['task']
548 rm_setscene = '.do_%s_setscene.' % ss['task']
549 # For BB_SIGNATURE_HANDLER = "noop"
550 rm_nohash = ".do_%s" % ss['task']
551 for stfile in glob.glob(wildcard_stfile):
552 # Keep the sigdata
553 if ".sigdata." in stfile or ".sigbasedata." in stfile:
554 continue
555 # Preserve taint files in the stamps directory
556 if stfile.endswith('.taint'):
557 continue
558 if rm_stamp in stfile or rm_setscene in stfile or \
559 stfile.endswith(rm_nohash):
560 oe.path.remove(stfile)
561
562sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
563
564CLEANFUNCS += "sstate_cleanall"
565
566python sstate_cleanall() {
567 bb.note("Removing shared state for package %s" % d.getVar('PN'))
568
569 manifest_dir = d.getVar('SSTATE_MANIFESTS')
570 if not os.path.exists(manifest_dir):
571 return
572
573 tasks = d.getVar('SSTATETASKS').split()
574 for name in tasks:
575 ld = d.createCopy()
576 shared_state = sstate_state_fromvars(ld, name)
577 sstate_clean(shared_state, ld)
578}
579
580python sstate_hardcode_path () {
581 import subprocess, platform
582
583 # Need to remove hardcoded paths and fix these when we install the
584 # staging packages.
585 #
586 # Note: the logic in this function needs to match the reverse logic
587 # in sstate_installpkg(ss, d)
588
589 staging_target = d.getVar('RECIPE_SYSROOT')
590 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
591 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
592
593 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
594 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
595 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
596 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
597 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
598 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
599 else:
600 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
601 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
602
603 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
604 for fixmevar in extra_staging_fixmes.split():
605 fixme_path = d.getVar(fixmevar)
606 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
607 sstate_grep_cmd += " -e '%s'" % (fixme_path)
608
609 fixmefn = sstate_builddir + "fixmepath"
610
611 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
612 sstate_filelist_cmd = "tee %s" % (fixmefn)
613
614 # fixmepath file needs relative paths, drop sstate_builddir prefix
615 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
616
617 xargs_no_empty_run_cmd = '--no-run-if-empty'
618 if platform.system() == 'Darwin':
619 xargs_no_empty_run_cmd = ''
620
621 # Limit the fixpaths and sed operations based on the initial grep search
622 # This has the side effect of making sure the vfs cache is hot
623 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
624
625 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
626 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
627
628 # If the fixmefn is empty, remove it..
629 if os.stat(fixmefn).st_size == 0:
630 os.remove(fixmefn)
631 else:
632 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
633 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
634}
635
636def sstate_package(ss, d):
637 import oe.path
638 import time
639
640 tmpdir = d.getVar('TMPDIR')
641
642 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
643 sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
644 d.setVar("SSTATE_CURRTASK", ss['task'])
645 bb.utils.remove(sstatebuild, recurse=True)
646 bb.utils.mkdirhier(sstatebuild)
647 exit = False
648 for state in ss['dirs']:
649 if not os.path.exists(state[1]):
650 continue
651 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
652 # Find and error for absolute symlinks. We could attempt to relocate but its not
653 # clear where the symlink is relative to in this context. We could add that markup
654 # to sstate tasks but there aren't many of these so better just avoid them entirely.
655 for walkroot, dirs, files in os.walk(state[1]):
656 for file in files + dirs:
657 srcpath = os.path.join(walkroot, file)
658 if not os.path.islink(srcpath):
659 continue
660 link = os.readlink(srcpath)
661 if not os.path.isabs(link):
662 continue
663 if not link.startswith(tmpdir):
664 continue
665 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
666 exit = True
667 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
668 bb.utils.rename(state[1], sstatebuild + state[0])
669 if exit:
670 bb.fatal("Failing task due to absolute path symlinks")
671
672 workdir = d.getVar('WORKDIR')
673 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
674 for plain in ss['plaindirs']:
675 pdir = plain.replace(workdir, sstatebuild)
676 if sharedworkdir in plain:
677 pdir = plain.replace(sharedworkdir, sstatebuild)
678 bb.utils.mkdirhier(plain)
679 bb.utils.mkdirhier(pdir)
680 bb.utils.rename(plain, pdir)
681
682 d.setVar('SSTATE_BUILDDIR', sstatebuild)
683 d.setVar('SSTATE_INSTDIR', sstatebuild)
684
685 if d.getVar('SSTATE_SKIP_CREATION') == '1':
686 return
687
688 sstate_create_package = ['sstate_report_unihash', 'sstate_create_and_sign_package']
689
690 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
691 sstate_create_package + \
692 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
693 # All hooks should run in SSTATE_BUILDDIR.
694 bb.build.exec_func(f, d, (sstatebuild,))
695
696 # SSTATE_PKG may have been changed by sstate_report_unihash
697 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
698 if not os.path.exists(siginfo):
699 bb.siggen.dump_this_task(siginfo, d)
700 else:
701 try:
702 os.utime(siginfo, None)
703 except PermissionError:
704 pass
705 except OSError as e:
706 # Handle read-only file systems gracefully
707 import errno
708 if e.errno != errno.EROFS:
709 raise e
710
711 return
712
713sstate_package[vardepsexclude] += "SSTATE_SIG_KEY SSTATE_PKG"
714
715def pstaging_fetch(sstatefetch, d):
716 import bb.fetch2
717
718 # Only try and fetch if the user has configured a mirror
719 mirrors = d.getVar('SSTATE_MIRRORS')
720 if not mirrors:
721 return
722
723 # Copy the data object and override DL_DIR and SRC_URI
724 localdata = bb.data.createCopy(d)
725
726 dldir = localdata.expand("${SSTATE_DIR}")
727
728 localdata.delVar('MIRRORS')
729 localdata.setVar('FILESPATH', dldir)
730 localdata.setVar('DL_DIR', dldir)
731 localdata.setVar('PREMIRRORS', mirrors)
732
733 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
734 # we'll want to allow network access for the current set of fetches.
735 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
736 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
737 localdata.delVar('BB_NO_NETWORK')
738
739 # Try a fetch from the sstate mirror, if it fails just return and
740 # we will build the package
741 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
742 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
743 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
744 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
745
746 with bb.utils.umask(bb.utils.to_filemode(d.getVar("OE_SHARED_UMASK"))):
747 bb.utils.mkdirhier(dldir)
748
749 for srcuri in uris:
750 localdata.delVar('SRC_URI')
751 localdata.setVar('SRC_URI', srcuri)
752 try:
753 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
754 fetcher.checkstatus()
755 fetcher.download()
756
757 except bb.fetch2.BBFetchException:
758 pass
759
760def sstate_setscene(d):
761 shared_state = sstate_state_fromvars(d)
762 accelerate = sstate_installpkg(shared_state, d)
763 if not accelerate:
764 msg = "No sstate archive obtainable, will run full task instead."
765 bb.warn(msg)
766 raise bb.BBHandledException(msg)
767
768python sstate_task_prefunc () {
769 shared_state = sstate_state_fromvars(d)
770 sstate_clean(shared_state, d)
771}
772sstate_task_prefunc[dirs] = "${WORKDIR}"
773
774python sstate_task_postfunc () {
775 shared_state = sstate_state_fromvars(d)
776
777 shared_umask = bb.utils.to_filemode(d.getVar("OE_SHARED_UMASK"))
778 omask = os.umask(shared_umask)
779 if omask != shared_umask:
780 bb.note("Using umask %0o (not %0o) for sstate packaging" % (shared_umask, omask))
781 sstate_package(shared_state, d)
782 os.umask(omask)
783
784 sstateinst = d.getVar("SSTATE_INSTDIR")
785 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
786
787 sstate_installpkgdir(shared_state, d)
788
789 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
790}
791sstate_task_postfunc[dirs] = "${WORKDIR}"
792
793# Create a sstate package
794# If enabled, sign the package.
795# Package and signature are created in a sub-directory
796# and renamed in place once created.
797python sstate_create_and_sign_package () {
798 from pathlib import Path
799
800 # Best effort touch
801 def touch(file):
802 try:
803 file.touch()
804 except:
805 pass
806
807 def update_file(src, dst, force=False):
808 if dst.is_symlink() and not dst.exists():
809 force=True
810 try:
811 # This relies on that src is a temporary file that can be renamed
812 # or left as is.
813 if force:
814 src.rename(dst)
815 else:
816 os.link(src, dst)
817 return True
818 except:
819 pass
820
821 if dst.exists():
822 touch(dst)
823
824 return False
825
826 sign_pkg = (
827 bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG")) and
828 bool(d.getVar("SSTATE_SIG_KEY"))
829 )
830
831 sstate_pkg = Path(d.getVar("SSTATE_PKG"))
832 sstate_pkg_sig = Path(str(sstate_pkg) + ".sig")
833 if sign_pkg:
834 if sstate_pkg.exists() and sstate_pkg_sig.exists():
835 touch(sstate_pkg)
836 touch(sstate_pkg_sig)
837 return
838 else:
839 if sstate_pkg.exists():
840 touch(sstate_pkg)
841 return
842
843 # Create the required sstate directory if it is not present.
844 if not sstate_pkg.parent.is_dir():
845 shared_umask = bb.utils.to_filemode(d.getVar("OE_SHARED_UMASK"))
846 with bb.utils.umask(shared_umask):
847 bb.utils.mkdirhier(str(sstate_pkg.parent))
848
849 if sign_pkg:
850 from tempfile import TemporaryDirectory
851 with TemporaryDirectory(dir=sstate_pkg.parent) as tmp_dir:
852 tmp_pkg = Path(tmp_dir) / sstate_pkg.name
853 sstate_archive_package(tmp_pkg, d)
854
855 from oe.gpg_sign import get_signer
856 signer = get_signer(d, 'local')
857 signer.detach_sign(str(tmp_pkg), d.getVar('SSTATE_SIG_KEY'), None,
858 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
859
860 tmp_pkg_sig = Path(tmp_dir) / sstate_pkg_sig.name
861 if not update_file(tmp_pkg_sig, sstate_pkg_sig):
862 # If the created signature file could not be copied into place,
863 # then we should not use the sstate package either.
864 return
865
866 # If the .sig file was updated, then the sstate package must also
867 # be updated.
868 update_file(tmp_pkg, sstate_pkg, force=True)
869 else:
870 from tempfile import NamedTemporaryFile
871 with NamedTemporaryFile(prefix=sstate_pkg.name, dir=sstate_pkg.parent) as tmp_pkg_fd:
872 tmp_pkg = tmp_pkg_fd.name
873 sstate_archive_package(tmp_pkg, d)
874 update_file(tmp_pkg, sstate_pkg)
875 # update_file() may have renamed tmp_pkg, which must exist when the
876 # NamedTemporaryFile() context handler ends.
877 touch(Path(tmp_pkg))
878
879}
880
881# Function to generate a sstate package from the current directory.
882# The calling function handles moving the sstate package into the final
883# destination.
884def sstate_archive_package(sstate_pkg, d):
885 import subprocess
886
887 cmd = [
888 "tar",
889 "-I", d.expand("pzstd -${SSTATE_ZSTD_CLEVEL} -p${ZSTD_THREADS}"),
890 "-cS",
891 "-f", sstate_pkg,
892 ]
893
894 # tar refuses to create an empty archive unless told explicitly
895 files = sorted(os.listdir("."))
896 if not files:
897 files = ["--files-from=/dev/null"]
898
899 try:
900 subprocess.run(cmd + files, check=True)
901 except subprocess.CalledProcessError as e:
902 # Ignore error 1 as this is caused by files changing
903 # (link count increasing from hardlinks being created).
904 if e.returncode != 1:
905 raise
906
907 os.chmod(sstate_pkg, 0o664)
908
909
910python sstate_report_unihash() {
911 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
912
913 if report_unihash:
914 ss = sstate_state_fromvars(d)
915 report_unihash(os.getcwd(), ss['task'], d)
916}
917
918#
919# Shell function to decompress and prepare a package for installation
920# Will be run from within SSTATE_INSTDIR.
921#
922sstate_unpack_package () {
923 ZSTD="zstd -T${ZSTD_THREADS}"
924 # Use pzstd if available
925 if [ -x "$(command -v pzstd)" ]; then
926 ZSTD="pzstd -p ${ZSTD_THREADS}"
927 fi
928
929 tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
930 # update .siginfo atime on local/NFS mirror if it is a symbolic link
931 [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
932 # update each symbolic link instead of any referenced file
933 touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
934 [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
935 [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
936}
937
938BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
939
940def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
941 import itertools
942
943 found = set()
944 missed = set()
945
946 def gethash(task):
947 return sq_data['unihash'][task]
948
949 def getpathcomponents(task, d):
950 # Magic data from BB_HASHFILENAME
951 splithashfn = sq_data['hashfn'][task].split(" ")
952 spec = splithashfn[1]
953 if splithashfn[0] == "True":
954 extrapath = d.getVar("NATIVELSBSTRING") + "/"
955 else:
956 extrapath = ""
957
958 tname = bb.runqueue.taskname_from_tid(task)[3:]
959
960 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
961 spec = splithashfn[2]
962 extrapath = ""
963
964 return spec, extrapath, tname
965
966 def getsstatefile(tid, siginfo, d):
967 spec, extrapath, tname = getpathcomponents(tid, d)
968 return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
969
970 for tid in sq_data['hash']:
971
972 sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
973
974 if os.path.exists(sstatefile):
975 oe.utils.touch(sstatefile)
976 found.add(tid)
977 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
978 else:
979 missed.add(tid)
980 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
981
982 foundLocal = len(found)
983 mirrors = d.getVar("SSTATE_MIRRORS")
984 if mirrors:
985 # Copy the data object and override DL_DIR and SRC_URI
986 localdata = bb.data.createCopy(d)
987
988 dldir = localdata.expand("${SSTATE_DIR}")
989 localdata.delVar('MIRRORS')
990 localdata.setVar('FILESPATH', dldir)
991 localdata.setVar('DL_DIR', dldir)
992 localdata.setVar('PREMIRRORS', mirrors)
993
994 bb.debug(2, "SState using premirror of: %s" % mirrors)
995
996 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
997 # we'll want to allow network access for the current set of fetches.
998 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
999 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
1000 localdata.delVar('BB_NO_NETWORK')
1001
1002 from bb.fetch2 import FetchConnectionCache
1003 def checkstatus_init():
1004 while not connection_cache_pool.full():
1005 connection_cache_pool.put(FetchConnectionCache())
1006
1007 def checkstatus_end():
1008 while not connection_cache_pool.empty():
1009 connection_cache = connection_cache_pool.get()
1010 connection_cache.close_connections()
1011
1012 def checkstatus(arg):
1013 (tid, sstatefile) = arg
1014
1015 connection_cache = connection_cache_pool.get()
1016 localdata2 = bb.data.createCopy(localdata)
1017 srcuri = "file://" + sstatefile
1018 localdata2.setVar('SRC_URI', srcuri)
1019 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
1020
1021 import traceback
1022
1023 try:
1024 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
1025 connection_cache=connection_cache)
1026 fetcher.checkstatus()
1027 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
1028 found.add(tid)
1029 missed.remove(tid)
1030 except bb.fetch2.FetchError as e:
1031 bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
1032 except Exception as e:
1033 bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
1034
1035 connection_cache_pool.put(connection_cache)
1036
1037 if progress:
1038 bb.event.fire(bb.event.ProcessProgress(msg, next(cnt_tasks_done)), d)
1039 bb.event.check_for_interrupts()
1040
1041 tasklist = []
1042 for tid in missed:
1043 sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1044 tasklist.append((tid, sstatefile))
1045
1046 if tasklist:
1047 nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1048
1049 ## thread-safe counter
1050 cnt_tasks_done = itertools.count(start = 1)
1051 progress = len(tasklist) >= 100
1052 if progress:
1053 msg = "Checking sstate mirror object availability"
1054 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1055
1056 # Have to setup the fetcher environment here rather than in each thread as it would race
1057 fetcherenv = bb.fetch2.get_fetcher_environment(d)
1058 with bb.utils.environment(**fetcherenv):
1059 bb.event.enable_threadlock()
1060 import concurrent.futures
1061 from queue import Queue
1062 connection_cache_pool = Queue(nproc)
1063 checkstatus_init()
1064 with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor:
1065 executor.map(checkstatus, tasklist.copy())
1066 checkstatus_end()
1067 bb.event.disable_threadlock()
1068
1069 if progress:
1070 bb.event.fire(bb.event.ProcessFinished(msg), d)
1071
1072 inheritlist = d.getVar("INHERIT")
1073 if "toaster" in inheritlist:
1074 evdata = {'missed': [], 'found': []};
1075 for tid in missed:
1076 sstatefile = d.expand(getsstatefile(tid, False, d))
1077 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1078 for tid in found:
1079 sstatefile = d.expand(getsstatefile(tid, False, d))
1080 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1081 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1082
1083 if summary:
1084 # Print some summary statistics about the current task completion and how much sstate
1085 # reuse there was. Avoid divide by zero errors.
1086 total = len(sq_data['hash'])
1087 complete = 0
1088 if currentcount:
1089 complete = (len(found) + currentcount) / (total + currentcount) * 100
1090 match = 0
1091 if total:
1092 match = len(found) / total * 100
1093 bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1094 (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1095
1096 if hasattr(bb.parse.siggen, "checkhashes"):
1097 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1098
1099 return found
1100setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT _SSTATE_EXCLUDEDEPS_SYSROOT"
1101
1102BB_SETSCENE_DEPVALID = "setscene_depvalid"
1103
1104def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1105 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1106 # task is included in taskdependees too
1107 # Return - False - We need this dependency
1108 # - True - We can skip this dependency
1109 import re
1110
1111 def logit(msg, log):
1112 if log is not None:
1113 log.append(msg)
1114 else:
1115 bb.debug(2, msg)
1116
1117 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1118
1119 directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx", "do_deploy_archives"]
1120
1121 def isNativeCross(x):
1122 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1123
1124 # We only need to trigger deploy_source_date_epoch through direct dependencies
1125 if taskdependees[task][1] in directtasks:
1126 return True
1127
1128 # We only need to trigger packagedata through direct dependencies
1129 # but need to preserve packagedata on packagedata links
1130 if taskdependees[task][1] == "do_packagedata":
1131 for dep in taskdependees:
1132 if taskdependees[dep][1] == "do_packagedata":
1133 return False
1134 return True
1135
1136 for dep in taskdependees:
1137 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1138 if task == dep:
1139 continue
1140 if dep in notneeded:
1141 continue
1142 # do_package_write_* and do_package doesn't need do_package
1143 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1144 continue
1145 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1146 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1147 return False
1148 # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1149 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1150 continue
1151 # Native/Cross packages don't exist and are noexec anyway
1152 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1153 continue
1154
1155 # Consider sysroot depending on sysroot tasks
1156 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1157 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1158 # specific dependency itself, rather than relying on one of its dependees to pull
1159 # them in.
1160 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1161 not_needed = False
1162 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1163 if excludedeps is None:
1164 # Cache the regular expressions for speed
1165 excludedeps = []
1166 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1167 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1168 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1169 for excl in excludedeps:
1170 if excl[0].match(taskdependees[dep][0]):
1171 if excl[1].match(taskdependees[task][0]):
1172 not_needed = True
1173 break
1174 if not_needed:
1175 continue
1176 # For meta-extsdk-toolchain we want all sysroot dependencies
1177 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1178 return False
1179 # Native/Cross populate_sysroot need their dependencies
1180 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1181 return False
1182 # Target populate_sysroot depended on by cross tools need to be installed
1183 if isNativeCross(taskdependees[dep][0]):
1184 return False
1185 # Native/cross tools depended upon by target sysroot are not needed
1186 # Add an exception for shadow-native as required by useradd.bbclass
1187 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1188 continue
1189 # Target populate_sysroot need their dependencies
1190 return False
1191
1192 if taskdependees[dep][1] in directtasks:
1193 continue
1194
1195 # Safe fallthrough default
1196 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1197 return False
1198 return True
1199
1200addhandler sstate_eventhandler
1201sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1202python sstate_eventhandler() {
1203 d = e.data
1204 writtensstate = d.getVar('SSTATE_CURRTASK')
1205 if not writtensstate:
1206 taskname = d.getVar("BB_RUNTASK")[3:]
1207 spec = d.getVar('SSTATE_PKGSPEC')
1208 swspec = d.getVar('SSTATE_SWSPEC')
1209 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1210 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1211 d.setVar("SSTATE_EXTRAPATH", "")
1212 d.setVar("SSTATE_CURRTASK", taskname)
1213 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1214 if not os.path.exists(siginfo):
1215 bb.siggen.dump_this_task(siginfo, d)
1216 else:
1217 oe.utils.touch(siginfo)
1218}
1219
1220SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1221
1222#
1223# Event handler which removes manifests and stamps file for recipes which are no
1224# longer 'reachable' in a build where they once were. 'Reachable' refers to
1225# whether a recipe is parsed so recipes in a layer which was removed would no
1226# longer be reachable. Switching between systemd and sysvinit where recipes
1227# became skipped would be another example.
1228#
1229# Also optionally removes the workdir of those tasks/recipes
1230#
1231addhandler sstate_eventhandler_reachablestamps
1232sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1233python sstate_eventhandler_reachablestamps() {
1234 import glob
1235 d = e.data
1236 stamps = e.stamps.values()
1237 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1238 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1239 preservestamps = []
1240 if os.path.exists(preservestampfile):
1241 with open(preservestampfile, 'r') as f:
1242 preservestamps = f.readlines()
1243 seen = []
1244
1245 # The machine index contains all the stamps this machine has ever seen in this build directory.
1246 # We should only remove things which this machine once accessed but no longer does.
1247 machineindex = set()
1248 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1249 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1250 if os.path.exists(mi):
1251 with open(mi, "r") as f:
1252 machineindex = set(line.strip() for line in f.readlines())
1253
1254 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1255 toremove = []
1256 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1257 if not os.path.exists(i):
1258 continue
1259 manseen = set()
1260 ignore = []
1261 with open(i, "r") as f:
1262 lines = f.readlines()
1263 for l in reversed(lines):
1264 try:
1265 (stamp, manifest, workdir) = l.split()
1266 # The index may have multiple entries for the same manifest as the code above only appends
1267 # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1268 # The last entry in the list is the valid one, any earlier entries with matching manifests
1269 # should be ignored.
1270 if manifest in manseen:
1271 ignore.append(l)
1272 continue
1273 manseen.add(manifest)
1274 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1275 toremove.append(l)
1276 if stamp not in seen:
1277 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1278 seen.append(stamp)
1279 except ValueError:
1280 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1281
1282 if toremove:
1283 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1284 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1285
1286 removed = 0
1287 for r in toremove:
1288 (stamp, manifest, workdir) = r.split()
1289 for m in glob.glob(manifest + ".*"):
1290 if m.endswith(".postrm"):
1291 continue
1292 sstate_clean_manifest(m, d)
1293 bb.utils.remove(stamp + "*")
1294 if removeworkdir:
1295 bb.utils.remove(workdir, recurse = True)
1296 lines.remove(r)
1297 removed = removed + 1
1298 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1299 bb.event.check_for_interrupts()
1300
1301 bb.event.fire(bb.event.ProcessFinished(msg), d)
1302
1303 with open(i, "w") as f:
1304 for l in lines:
1305 if l in ignore:
1306 continue
1307 f.write(l)
1308 machineindex |= set(stamps)
1309 with open(mi, "w") as f:
1310 for l in machineindex:
1311 f.write(l + "\n")
1312
1313 if preservestamps:
1314 os.remove(preservestampfile)
1315}
1316
1317
1318#
1319# Bitbake can generate an event showing which setscene tasks are 'stale',
1320# i.e. which ones will be rerun. These are ones where a stamp file is present but
1321# it is stable (e.g. taskhash doesn't match). With that list we can go through
1322# the manifests for matching tasks and "uninstall" those manifests now. We do
1323# this now rather than mid build since the distribution of files between sstate
1324# objects may have changed, new tasks may run first and if those new tasks overlap
1325# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1326# removing these files is fast.
1327#
1328addhandler sstate_eventhandler_stalesstate
1329sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1330python sstate_eventhandler_stalesstate() {
1331 d = e.data
1332 tasks = e.tasks
1333
1334 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1335
1336 for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1337 toremove = []
1338 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1339 if not os.path.exists(i):
1340 continue
1341 with open(i, "r") as f:
1342 lines = f.readlines()
1343 for l in lines:
1344 try:
1345 (stamp, manifest, workdir) = l.split()
1346 for tid in tasks:
1347 for s in tasks[tid]:
1348 if s.startswith(stamp):
1349 taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1350 manname = manifest + "." + taskname
1351 if os.path.exists(manname):
1352 bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1353 toremove.append((manname, tid, tasks[tid]))
1354 break
1355 except ValueError:
1356 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1357
1358 if toremove:
1359 msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1360 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1361
1362 removed = 0
1363 for (manname, tid, stamps) in toremove:
1364 sstate_clean_manifest(manname, d)
1365 for stamp in stamps:
1366 bb.utils.remove(stamp)
1367 removed = removed + 1
1368 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1369 bb.event.check_for_interrupts()
1370
1371 bb.event.fire(bb.event.ProcessFinished(msg), d)
1372}
diff --git a/meta/classes-global/staging.bbclass b/meta/classes-global/staging.bbclass
deleted file mode 100644
index 1008867a6c..0000000000
--- a/meta/classes-global/staging.bbclass
+++ /dev/null
@@ -1,702 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# These directories will be staged in the sysroot
8SYSROOT_DIRS = " \
9 ${includedir} \
10 ${libdir} \
11 ${base_libdir} \
12 ${nonarch_base_libdir} \
13 ${datadir} \
14 /sysroot-only \
15"
16
17# These directories are also staged in the sysroot when they contain files that
18# are usable on the build system
19SYSROOT_DIRS_NATIVE = " \
20 ${bindir} \
21 ${sbindir} \
22 ${base_bindir} \
23 ${base_sbindir} \
24 ${libexecdir} \
25 ${sysconfdir} \
26 ${localstatedir} \
27"
28SYSROOT_DIRS:append:class-native = " ${SYSROOT_DIRS_NATIVE}"
29SYSROOT_DIRS:append:class-cross = " ${SYSROOT_DIRS_NATIVE}"
30SYSROOT_DIRS:append:class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
31
32# These directories will not be staged in the sysroot
33SYSROOT_DIRS_IGNORE = " \
34 ${mandir} \
35 ${docdir} \
36 ${infodir} \
37 ${datadir}/X11/locale \
38 ${datadir}/applications \
39 ${datadir}/bash-completion \
40 ${datadir}/fonts \
41 ${datadir}/gtk-doc/html \
42 ${datadir}/installed-tests \
43 ${datadir}/locale \
44 ${datadir}/pixmaps \
45 ${datadir}/terminfo \
46 ${libdir}/${BPN}/ptest \
47"
48
49sysroot_stage_dir() {
50 src="$1"
51 dest="$2"
52 # if the src doesn't exist don't do anything
53 if [ ! -d "$src" ]; then
54 return
55 fi
56
57 mkdir -p "$dest"
58 rdest=$(realpath --relative-to="$src" "$dest")
59 (
60 cd $src
61 find . -print0 | cpio --null -pdlu $rdest
62 )
63}
64
65sysroot_stage_dirs() {
66 from="$1"
67 to="$2"
68
69 for dir in ${SYSROOT_DIRS}; do
70 sysroot_stage_dir "$from$dir" "$to$dir"
71 done
72
73 # Remove directories we do not care about
74 for dir in ${SYSROOT_DIRS_IGNORE}; do
75 rm -rf "$to$dir"
76 done
77}
78
79sysroot_stage_all() {
80 sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
81}
82
83python sysroot_strip () {
84 inhibit_sysroot = d.getVar('INHIBIT_SYSROOT_STRIP')
85 if inhibit_sysroot and oe.types.boolean(inhibit_sysroot):
86 return
87
88 dstdir = d.getVar('SYSROOT_DESTDIR')
89 pn = d.getVar('PN')
90 libdir = d.getVar("libdir")
91 base_libdir = d.getVar("base_libdir")
92 qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split()
93 strip_cmd = d.getVar("STRIP")
94
95 max_process = oe.utils.get_bb_number_threads(d)
96 oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, max_process,
97 qa_already_stripped=qa_already_stripped)
98}
99
100do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
101
102addtask populate_sysroot after do_install
103
104SYSROOT_PREPROCESS_FUNCS ?= ""
105SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir"
106
107python do_populate_sysroot () {
108 # SYSROOT 'version' 2
109 bb.build.exec_func("sysroot_stage_all", d)
110 bb.build.exec_func("sysroot_strip", d)
111 for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split():
112 bb.build.exec_func(f, d)
113 pn = d.getVar("PN")
114 multiprov = d.getVar("BB_MULTI_PROVIDER_ALLOWED").split()
115 provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
116 bb.utils.mkdirhier(provdir)
117 for p in d.getVar("PROVIDES").split():
118 if p in multiprov:
119 continue
120 p = p.replace("/", "_")
121 with open(provdir + p, "w") as f:
122 f.write(pn)
123}
124
125do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
126do_populate_sysroot[vardepsexclude] += "BB_MULTI_PROVIDER_ALLOWED"
127
128POPULATESYSROOTDEPS = ""
129POPULATESYSROOTDEPS:class-target = "virtual/cross-binutils:do_populate_sysroot"
130POPULATESYSROOTDEPS:class-nativesdk = "virtual/nativesdk-cross-binutils:do_populate_sysroot"
131do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}"
132
133SSTATETASKS += "do_populate_sysroot"
134do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
135do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
136do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
137do_populate_sysroot[sstate-fixmedir] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
138
139python do_populate_sysroot_setscene () {
140 sstate_setscene(d)
141}
142addtask do_populate_sysroot_setscene
143
144def staging_copyfile(c, target, dest, postinsts, seendirs):
145 import errno
146
147 destdir = os.path.dirname(dest)
148 if destdir not in seendirs:
149 bb.utils.mkdirhier(destdir)
150 seendirs.add(destdir)
151 if "/usr/bin/postinst-" in c:
152 postinsts.append(dest)
153 if os.path.islink(c):
154 linkto = os.readlink(c)
155 if os.path.lexists(dest):
156 if not os.path.islink(dest):
157 raise OSError(errno.EEXIST, "Link %s already exists as a file" % dest, dest)
158 if os.readlink(dest) == linkto:
159 return dest
160 raise OSError(errno.EEXIST, "Link %s already exists to a different location? (%s vs %s)" % (dest, os.readlink(dest), linkto), dest)
161 os.symlink(linkto, dest)
162 #bb.warn(c)
163 else:
164 try:
165 os.link(c, dest)
166 except OSError as err:
167 if err.errno == errno.EXDEV:
168 bb.utils.copyfile(c, dest)
169 else:
170 raise
171 return dest
172
173def staging_copydir(c, target, dest, seendirs):
174 if dest not in seendirs:
175 bb.utils.mkdirhier(dest)
176 seendirs.add(dest)
177
178def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
179 import subprocess
180
181 if not fixme:
182 return
183 cmd = "sed -e 's:^[^/]*/:%s/:g' %s | xargs sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (target, " ".join(fixme), recipesysroot, recipesysrootnative)
184 for fixmevar in ['PSEUDO_SYSROOT', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
185 fixme_path = d.getVar(fixmevar)
186 cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
187 bb.debug(2, cmd)
188 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
189
190
191def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
192 import glob
193 import subprocess
194 import errno
195
196 fixme = []
197 postinsts = []
198 seendirs = set()
199 stagingdir = d.getVar("STAGING_DIR")
200 if native:
201 pkgarchs = ['${BUILD_ARCH}', '${BUILD_ARCH}_*']
202 targetdir = nativesysroot
203 else:
204 pkgarchs = ['${MACHINE_ARCH}']
205 pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
206 pkgarchs.append('allarch')
207 targetdir = targetsysroot
208
209 bb.utils.mkdirhier(targetdir)
210 for pkgarch in pkgarchs:
211 for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.populate_sysroot" % pkgarch)):
212 if manifest.endswith("-initial.populate_sysroot"):
213 # skip libgcc-initial due to file overlap
214 continue
215 if not native and (manifest.endswith("-native.populate_sysroot") or "nativesdk-" in manifest):
216 continue
217 if native and not (manifest.endswith("-native.populate_sysroot") or manifest.endswith("-cross.populate_sysroot") or "-cross-" in manifest):
218 continue
219 tmanifest = targetdir + "/" + os.path.basename(manifest)
220 if os.path.exists(tmanifest):
221 continue
222 try:
223 os.link(manifest, tmanifest)
224 except OSError as err:
225 if err.errno == errno.EXDEV:
226 bb.utils.copyfile(manifest, tmanifest)
227 else:
228 raise
229 with open(manifest, "r") as f:
230 for l in f:
231 l = l.strip()
232 if l.endswith("/fixmepath"):
233 fixme.append(l)
234 continue
235 if l.endswith("/fixmepath.cmd"):
236 continue
237 dest = l.replace(stagingdir, "")
238 dest = targetdir + "/" + "/".join(dest.split("/")[3:])
239 if l.endswith("/"):
240 staging_copydir(l, targetdir, dest, seendirs)
241 continue
242 try:
243 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
244 except FileExistsError:
245 continue
246
247 staging_processfixme(fixme, targetdir, targetsysroot, nativesysroot, d)
248 for p in sorted(postinsts):
249 bb.note("Running postinst {}, output:\n{}".format(p, subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)))
250
251#
252# Manifests here are complicated. The main sysroot area has the unpacked sstate
253# which us unrelocated and tracked by the main sstate manifests. Each recipe
254# specific sysroot has manifests for each dependency that is installed there.
255# The task hash is used to tell whether the data needs to be reinstalled. We
256# use a symlink to point to the currently installed hash. There is also a
257# "complete" stamp file which is used to mark if installation completed. If
258# something fails (e.g. a postinst), this won't get written and we would
259# remove and reinstall the dependency. This also means partially installed
260# dependencies should get cleaned up correctly.
261#
262
263python extend_recipe_sysroot() {
264 import copy
265 import subprocess
266 import errno
267 import collections
268 import glob
269
270 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
271 mytaskname = d.getVar("BB_RUNTASK")
272 if mytaskname.endswith("_setscene"):
273 mytaskname = mytaskname.replace("_setscene", "")
274 workdir = d.getVar("WORKDIR")
275 #bb.warn(str(taskdepdata))
276 pn = d.getVar("PN")
277 stagingdir = d.getVar("STAGING_DIR")
278 sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
279 # only needed by multilib cross-canadian since it redefines RECIPE_SYSROOT
280 manifestprefix = d.getVar("RECIPE_SYSROOT_MANIFEST_SUBDIR")
281 if manifestprefix:
282 sharedmanifests = sharedmanifests + "/" + manifestprefix
283 recipesysroot = d.getVar("RECIPE_SYSROOT")
284 recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
285
286 # Detect bitbake -b usage
287 nodeps = d.getVar("BB_LIMITEDDEPS") or False
288 if nodeps:
289 lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
290 staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, True, d)
291 staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, False, d)
292 bb.utils.unlockfile(lock)
293 return
294
295 start = None
296 configuredeps = []
297 owntaskdeps = []
298 for dep in taskdepdata:
299 data = taskdepdata[dep]
300 if data[1] == mytaskname and data[0] == pn:
301 start = dep
302 elif data[0] == pn:
303 owntaskdeps.append(data[1])
304 if start is None:
305 bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
306
307 # We need to figure out which sysroot files we need to expose to this task.
308 # This needs to match what would get restored from sstate, which is controlled
309 # ultimately by calls from bitbake to setscene_depvalid().
310 # That function expects a setscene dependency tree. We build a dependency tree
311 # condensed to inter-sstate task dependencies, similar to that used by setscene
312 # tasks. We can then call into setscene_depvalid() and decide
313 # which dependencies we can "see" and should expose in the recipe specific sysroot.
314 setscenedeps = copy.deepcopy(taskdepdata)
315
316 start = set([start])
317
318 sstatetasks = d.getVar("SSTATETASKS").split()
319 # Add recipe specific tasks referenced by setscene_depvalid()
320 sstatetasks.append("do_stash_locale")
321 sstatetasks.append("do_deploy")
322
323 def print_dep_tree(deptree):
324 data = ""
325 for dep in deptree:
326 deps = " " + "\n ".join(deptree[dep][3]) + "\n"
327 data = data + "%s:\n %s\n %s\n%s %s\n %s\n" % (deptree[dep][0], deptree[dep][1], deptree[dep][2], deps, deptree[dep][4], deptree[dep][5])
328 return data
329
330 #bb.note("Full dep tree is:\n%s" % print_dep_tree(taskdepdata))
331
332 #bb.note(" start2 is %s" % str(start))
333
334 # If start is an sstate task (like do_package) we need to add in its direct dependencies
335 # else the code below won't recurse into them.
336 for dep in set(start):
337 for dep2 in setscenedeps[dep][3]:
338 start.add(dep2)
339 start.remove(dep)
340
341 #bb.note(" start3 is %s" % str(start))
342
343 # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
344 for dep in taskdepdata:
345 data = setscenedeps[dep]
346 if data[1] not in sstatetasks:
347 for dep2 in setscenedeps:
348 data2 = setscenedeps[dep2]
349 if dep in data2[3]:
350 data2[3].update(setscenedeps[dep][3])
351 data2[3].remove(dep)
352 if dep in start:
353 start.update(setscenedeps[dep][3])
354 start.remove(dep)
355 del setscenedeps[dep]
356
357 # Remove circular references
358 for dep in setscenedeps:
359 if dep in setscenedeps[dep][3]:
360 setscenedeps[dep][3].remove(dep)
361
362 #bb.note("Computed dep tree is:\n%s" % print_dep_tree(setscenedeps))
363 #bb.note(" start is %s" % str(start))
364
365 # Direct dependencies should be present and can be depended upon
366 for dep in sorted(set(start)):
367 if setscenedeps[dep][1] == "do_populate_sysroot":
368 if dep not in configuredeps:
369 configuredeps.append(dep)
370 bb.note("Direct dependencies are %s" % str(configuredeps))
371 #bb.note(" or %s" % str(start))
372
373 msgbuf = []
374 # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
375 # for ones that would be restored from sstate.
376 done = list(start)
377 next = list(start)
378 while next:
379 new = []
380 for dep in next:
381 data = setscenedeps[dep]
382 for datadep in data[3]:
383 if datadep in done:
384 continue
385 taskdeps = {}
386 taskdeps[dep] = setscenedeps[dep][:2]
387 taskdeps[datadep] = setscenedeps[datadep][:2]
388 retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
389 if retval:
390 msgbuf.append("Skipping setscene dependency %s for installation into the sysroot" % datadep)
391 continue
392 done.append(datadep)
393 new.append(datadep)
394 if datadep not in configuredeps and setscenedeps[datadep][1] == "do_populate_sysroot":
395 configuredeps.append(datadep)
396 msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
397 else:
398 msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
399 next = new
400
401 # This logging is too verbose for day to day use sadly
402 #bb.debug(2, "\n".join(msgbuf))
403
404 depdir = recipesysrootnative + "/installeddeps"
405 bb.utils.mkdirhier(depdir)
406 bb.utils.mkdirhier(sharedmanifests)
407
408 lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
409
410 fixme = {}
411 seendirs = set()
412 postinsts = []
413 multilibs = {}
414 manifests = {}
415 # All files that we're going to be installing, to find conflicts.
416 fileset = {}
417
418 invalidate_tasks = set()
419 for f in os.listdir(depdir):
420 removed = []
421 if not f.endswith(".complete"):
422 continue
423 f = depdir + "/" + f
424 if os.path.islink(f) and not os.path.exists(f):
425 bb.note("%s no longer exists, removing from sysroot" % f)
426 lnk = os.readlink(f.replace(".complete", ""))
427 sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
428 os.unlink(f)
429 os.unlink(f.replace(".complete", ""))
430 removed.append(os.path.basename(f.replace(".complete", "")))
431
432 # If we've removed files from the sysroot above, the task that installed them may still
433 # have a stamp file present for the task. This is probably invalid right now but may become
434 # valid again if the user were to change configuration back for example. Since we've removed
435 # the files a task might need, remove the stamp file too to force it to rerun.
436 # YOCTO #14790
437 if removed:
438 for i in glob.glob(depdir + "/index.*"):
439 if i.endswith("." + mytaskname):
440 continue
441 with open(i, "r") as f:
442 for l in f:
443 if l.startswith("TaskDeps:"):
444 continue
445 l = l.strip()
446 if l in removed:
447 invalidate_tasks.add(i.rsplit(".", 1)[1])
448 break
449 for t in invalidate_tasks:
450 bb.note("Invalidating stamps for task %s" % t)
451 bb.build.clean_stamp(t, d)
452
453 installed = []
454 for dep in configuredeps:
455 c = setscenedeps[dep][0]
456 if mytaskname in ["do_sdk_depends", "do_populate_sdk_ext"] and c.endswith("-initial"):
457 bb.note("Skipping initial setscene dependency %s for installation into the sysroot" % c)
458 continue
459 installed.append(c)
460
461 # We want to remove anything which this task previously installed but is no longer a dependency
462 taskindex = depdir + "/" + "index." + mytaskname
463 if os.path.exists(taskindex):
464 potential = []
465 with open(taskindex, "r") as f:
466 for l in f:
467 l = l.strip()
468 if l not in installed:
469 fl = depdir + "/" + l
470 if not os.path.exists(fl):
471 # Was likely already uninstalled
472 continue
473 potential.append(l)
474 # We need to ensure no other task needs this dependency. We hold the sysroot
475 # lock so we ca search the indexes to check
476 if potential:
477 for i in glob.glob(depdir + "/index.*"):
478 if i.endswith("." + mytaskname):
479 continue
480 with open(i, "r") as f:
481 for l in f:
482 if l.startswith("TaskDeps:"):
483 prevtasks = l.split()[1:]
484 if mytaskname in prevtasks:
485 # We're a dependency of this task so we can clear items out the sysroot
486 break
487 l = l.strip()
488 if l in potential:
489 potential.remove(l)
490 for l in potential:
491 fl = depdir + "/" + l
492 bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
493 lnk = os.readlink(fl)
494 sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
495 os.unlink(fl)
496 os.unlink(fl + ".complete")
497
498 msg_exists = []
499 msg_adding = []
500
501 # Handle all removals first since files may move between recipes
502 for dep in configuredeps:
503 c = setscenedeps[dep][0]
504 if c not in installed:
505 continue
506 taskhash = setscenedeps[dep][5]
507 taskmanifest = depdir + "/" + c + "." + taskhash
508
509 if os.path.exists(depdir + "/" + c):
510 lnk = os.readlink(depdir + "/" + c)
511 if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
512 continue
513 else:
514 bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
515 sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
516 os.unlink(depdir + "/" + c)
517 if os.path.lexists(depdir + "/" + c + ".complete"):
518 os.unlink(depdir + "/" + c + ".complete")
519 elif os.path.lexists(depdir + "/" + c):
520 os.unlink(depdir + "/" + c)
521
522 binfiles = {}
523 # Now handle installs
524 for dep in sorted(configuredeps):
525 c = setscenedeps[dep][0]
526 if c not in installed:
527 continue
528 taskhash = setscenedeps[dep][5]
529 taskmanifest = depdir + "/" + c + "." + taskhash
530
531 if os.path.exists(depdir + "/" + c):
532 lnk = os.readlink(depdir + "/" + c)
533 if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
534 msg_exists.append(c)
535 continue
536
537 msg_adding.append(c)
538
539 os.symlink(c + "." + taskhash, depdir + "/" + c)
540
541 manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "populate_sysroot", d, multilibs)
542 if d2 is not d:
543 # If we don't do this, the recipe sysroot will be placed in the wrong WORKDIR for multilibs
544 # We need a consistent WORKDIR for the image
545 d2.setVar("WORKDIR", d.getVar("WORKDIR"))
546 destsysroot = d2.getVar("RECIPE_SYSROOT")
547 # We put allarch recipes into the default sysroot
548 if manifest and "allarch" in manifest:
549 destsysroot = d.getVar("RECIPE_SYSROOT")
550
551 native = False
552 if c.endswith("-native") or "-cross-" in c or "-crosssdk" in c:
553 native = True
554
555 if manifest:
556 newmanifest = collections.OrderedDict()
557 targetdir = destsysroot
558 if native:
559 targetdir = recipesysrootnative
560 if targetdir not in fixme:
561 fixme[targetdir] = []
562 fm = fixme[targetdir]
563
564 with open(manifest, "r") as f:
565 manifests[dep] = manifest
566 for l in f:
567 l = l.strip()
568 if l.endswith("/fixmepath"):
569 fm.append(l)
570 continue
571 if l.endswith("/fixmepath.cmd"):
572 continue
573 dest = l.replace(stagingdir, "")
574 dest = "/" + "/".join(dest.split("/")[3:])
575 newmanifest[l] = targetdir + dest
576
577 # Check if files have already been installed by another
578 # recipe and abort if they have, explaining what recipes are
579 # conflicting.
580 hashname = targetdir + dest
581 if not hashname.endswith("/"):
582 if hashname in fileset:
583 bb.fatal("The file %s is installed by both %s and %s, aborting" % (dest, c, fileset[hashname]))
584 else:
585 fileset[hashname] = c
586
587 # Having multiple identical manifests in each sysroot eats diskspace so
588 # create a shared pool of them and hardlink if we can.
589 # We create the manifest in advance so that if something fails during installation,
590 # or the build is interrupted, subsequent exeuction can cleanup.
591 sharedm = sharedmanifests + "/" + os.path.basename(taskmanifest)
592 if not os.path.exists(sharedm):
593 smlock = bb.utils.lockfile(sharedm + ".lock")
594 # Can race here. You'd think it just means we may not end up with all copies hardlinked to each other
595 # but python can lose file handles so we need to do this under a lock.
596 if not os.path.exists(sharedm):
597 with open(sharedm, 'w') as m:
598 for l in newmanifest:
599 dest = newmanifest[l]
600 m.write(dest.replace(workdir + "/", "") + "\n")
601 bb.utils.unlockfile(smlock)
602 try:
603 os.link(sharedm, taskmanifest)
604 except OSError as err:
605 if err.errno == errno.EXDEV:
606 bb.utils.copyfile(sharedm, taskmanifest)
607 else:
608 raise
609 # Finally actually install the files
610 for l in newmanifest:
611 dest = newmanifest[l]
612 if l.endswith("/"):
613 staging_copydir(l, targetdir, dest, seendirs)
614 continue
615 if "/bin/" in l or "/sbin/" in l:
616 # defer /*bin/* files until last in case they need libs
617 binfiles[l] = (targetdir, dest)
618 else:
619 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
620
621 # Handle deferred binfiles
622 for l in binfiles:
623 (targetdir, dest) = binfiles[l]
624 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
625
626 bb.note("Installed into sysroot: %s" % str(msg_adding))
627 bb.note("Skipping as already exists in sysroot: %s" % str(msg_exists))
628
629 for f in fixme:
630 staging_processfixme(fixme[f], f, recipesysroot, recipesysrootnative, d)
631
632 for p in sorted(postinsts):
633 bb.note("Running postinst {}, output:\n{}".format(p, subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)))
634
635 for dep in manifests:
636 c = setscenedeps[dep][0]
637 os.symlink(manifests[dep], depdir + "/" + c + ".complete")
638
639 with open(taskindex, "w") as f:
640 f.write("TaskDeps: " + " ".join(owntaskdeps) + "\n")
641 for l in sorted(installed):
642 f.write(l + "\n")
643
644 bb.utils.unlockfile(lock)
645}
646extend_recipe_sysroot[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
647
648do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
649python do_prepare_recipe_sysroot () {
650 bb.build.exec_func("extend_recipe_sysroot", d)
651}
652addtask do_prepare_recipe_sysroot before do_configure after do_fetch
653
654python staging_taskhandler() {
655 EXCLUDED_TASKS = (
656 "do_prepare_recipe_sysroot",
657 "do_create_spdx",
658 )
659 bbtasks = e.tasklist
660 for task in bbtasks:
661 if task in EXCLUDED_TASKS:
662 continue
663
664 deps = d.getVarFlag(task, "depends")
665 if task == "do_configure" or (deps and "populate_sysroot" in deps):
666 d.prependVarFlag(task, "prefuncs", "extend_recipe_sysroot ")
667}
668staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
669addhandler staging_taskhandler
670
671
672#
673# Target build output, stored in do_populate_sysroot or do_package can depend
674# not only upon direct dependencies but also indirect ones. A good example is
675# linux-libc-headers. The toolchain depends on this but most target recipes do
676# not. There are some headers which are not used by the toolchain build and do
677# not change the toolchain task output, hence the task hashes can change without
678# changing the sysroot output of that recipe yet they can influence others.
679#
680# A specific example is rtc.h which can change rtcwake.c in util-linux but is not
681# used in the glibc or gcc build. To account for this, we need to account for the
682# populate_sysroot hashes in the task output hashes.
683#
684python target_add_sysroot_deps () {
685 current_task = "do_" + d.getVar("BB_CURRENTTASK")
686 if current_task not in ["do_populate_sysroot", "do_package"]:
687 return
688
689 pn = d.getVar("PN")
690 if pn.endswith("-native"):
691 return
692
693 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
694 deps = {}
695 for dep in taskdepdata.values():
696 if dep[1] == "do_populate_sysroot" and not dep[0].endswith(("-native", "-initial")) and "-cross-" not in dep[0] and dep[0] != pn:
697 deps[dep[0]] = dep[6]
698
699 d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
700}
701SSTATECREATEFUNCS += "target_add_sysroot_deps"
702
diff --git a/meta/classes-global/uninative.bbclass b/meta/classes-global/uninative.bbclass
deleted file mode 100644
index c246a1ecd6..0000000000
--- a/meta/classes-global/uninative.bbclass
+++ /dev/null
@@ -1,183 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}${@bb.utils.contains('BUILD_ARCH', 'ppc64le', 'ld64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'riscv64', 'ld-linux-riscv64-lp64d.so.1', '', d)}"
8UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
9
10UNINATIVE_URL ?= "unset"
11UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc-${UNINATIVE_VERSION}.tar.xz"
12# Example checksums
13#UNINATIVE_CHECKSUM[aarch64] = "dead"
14#UNINATIVE_CHECKSUM[i686] = "dead"
15#UNINATIVE_CHECKSUM[x86_64] = "dead"
16UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
17
18# Enabling uninative will change the following variables so they need to go the parsing ignored variables list to prevent multiple recipe parsing
19BB_HASHCONFIG_IGNORE_VARS += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
20
21addhandler uninative_event_fetchloader
22uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
23
24addhandler uninative_event_enable
25uninative_event_enable[eventmask] = "bb.event.ConfigParsed"
26
27python uninative_event_fetchloader() {
28 """
29 This event fires on the parent and will try to fetch the tarball if the
30 loader isn't already present.
31 """
32
33 chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH"))
34 if not chksum:
35 bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH"))
36
37 loader = d.getVar("UNINATIVE_LOADER")
38 loaderchksum = loader + ".chksum"
39 if os.path.exists(loader) and os.path.exists(loaderchksum):
40 with open(loaderchksum, "r") as f:
41 readchksum = f.read().strip()
42 if readchksum == chksum:
43 if "uninative" not in d.getVar("SSTATEPOSTUNPACKFUNCS"):
44 enable_uninative(d)
45 return
46
47 import subprocess
48 try:
49 # Save and restore cwd as Fetch.download() does a chdir()
50 olddir = os.getcwd()
51
52 tarball = d.getVar("UNINATIVE_TARBALL")
53 tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
54 tarballpath = os.path.join(tarballdir, tarball)
55
56 if not os.path.exists(tarballpath + ".done"):
57 bb.utils.mkdirhier(tarballdir)
58 if d.getVar("UNINATIVE_URL") == "unset":
59 bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
60
61 localdata = bb.data.createCopy(d)
62 localdata.setVar('FILESPATH', "")
63 localdata.setVar('DL_DIR', tarballdir)
64 # Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
65 # and we can't easily put 'chksum' into the url path from a url parameter with
66 # the current fetcher url handling
67 premirrors = bb.fetch2.mirror_from_string(localdata.getVar("PREMIRRORS"))
68 for line in premirrors:
69 try:
70 (find, replace) = line
71 except ValueError:
72 continue
73 if find.startswith("http"):
74 localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} %s/uninative/%s/${UNINATIVE_TARBALL}" % (replace, chksum))
75
76 srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
77 bb.note("Fetching uninative binary shim %s (will check PREMIRRORS first)" % srcuri)
78
79 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
80 fetcher.download()
81 localpath = fetcher.localpath(srcuri)
82 if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath):
83 # Follow the symlink behavior from the bitbake fetch2.
84 # This will cover the case where an existing symlink is broken
85 # as well as if there are two processes trying to create it
86 # at the same time.
87 if os.path.islink(tarballpath):
88 # Broken symbolic link
89 os.unlink(tarballpath)
90
91 # Deal with two processes trying to make symlink at once
92 try:
93 os.symlink(localpath, tarballpath)
94 except FileExistsError:
95 pass
96
97 # ldd output is "ldd (Ubuntu GLIBC 2.23-0ubuntu10) 2.23", extract last option from first line
98 glibcver = subprocess.check_output(["ldd", "--version"]).decode('utf-8').split('\n')[0].split()[-1]
99 if bb.utils.vercmp_string(d.getVar("UNINATIVE_MAXGLIBCVERSION"), glibcver) < 0:
100 raise RuntimeError("Your host glibc version (%s) is newer than that in uninative (%s). Disabling uninative so that sstate is not corrupted." % (glibcver, d.getVar("UNINATIVE_MAXGLIBCVERSION")))
101
102 cmd = d.expand("\
103mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; \
104cd ${UNINATIVE_STAGING_DIR}-uninative; \
105tar -xJf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; \
106${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
107 ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux \
108 ${UNINATIVE_LOADER} \
109 ${UNINATIVE_LOADER} \
110 ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
111 ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so*" % chksum)
112 subprocess.check_output(cmd, shell=True, text=True, stderr=subprocess.STDOUT)
113
114 with open(loaderchksum, "w") as f:
115 f.write(chksum)
116
117 enable_uninative(d)
118
119 except RuntimeError as e:
120 bb.warn(str(e))
121 except bb.fetch2.BBFetchException as exc:
122 bb.warn("Disabling uninative as unable to fetch uninative tarball: %s" % str(exc))
123 bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
124 except subprocess.CalledProcessError as exc:
125 bb.warn("Disabling uninative as unable to install uninative tarball:")
126 bb.warn(str(exc))
127 bb.warn(exc.stdout)
128 bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
129 finally:
130 os.chdir(olddir)
131}
132
133python uninative_event_enable() {
134 """
135 This event handler is called in the workers and is responsible for setting
136 up uninative if a loader is found.
137 """
138 enable_uninative(d)
139}
140
141def enable_uninative(d):
142 loader = d.getVar("UNINATIVE_LOADER")
143 if os.path.exists(loader):
144 bb.debug(2, "Enabling uninative")
145 d.setVar("NATIVELSBSTRING", "universal")
146 d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp")
147 d.appendVarFlag("SSTATEPOSTUNPACKFUNCS", "vardepvalueexclude", "| uninative_changeinterp")
148 d.appendVar("BUILD_LDFLAGS", " -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER} -pthread")
149 d.appendVarFlag("BUILD_LDFLAGS", "vardepvalueexclude", "| -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER} -pthread")
150 d.appendVarFlag("BUILD_LDFLAGS", "vardepsexclude", "UNINATIVE_LOADER")
151 d.prependVar("PATH", "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
152
153python uninative_changeinterp () {
154 import subprocess
155 import stat
156 import oe.qa
157
158 if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)):
159 return
160
161 sstateinst = d.getVar('SSTATE_INSTDIR')
162 for walkroot, dirs, files in os.walk(sstateinst):
163 for file in files:
164 if file.endswith(".so") or ".so." in file:
165 continue
166 f = os.path.join(walkroot, file)
167 if os.path.islink(f):
168 continue
169 s = os.stat(f)
170 if not ((s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH)):
171 continue
172 elf = oe.qa.ELFFile(f)
173 try:
174 elf.open()
175 except oe.qa.NotELFFileError:
176 continue
177 if not elf.isDynamic():
178 continue
179
180 os.chmod(f, s[stat.ST_MODE] | stat.S_IWUSR)
181 subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
182 os.chmod(f, s[stat.ST_MODE])
183}
diff --git a/meta/classes-global/utility-tasks.bbclass b/meta/classes-global/utility-tasks.bbclass
deleted file mode 100644
index 394cc3158d..0000000000
--- a/meta/classes-global/utility-tasks.bbclass
+++ /dev/null
@@ -1,59 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7addtask listtasks
8do_listtasks[nostamp] = "1"
9python do_listtasks() {
10 taskdescs = {}
11 maxlen = 0
12 for t in bb.build.listtasks(d):
13 maxlen = max(maxlen, len(t))
14
15 if t.endswith('_setscene'):
16 desc = "%s (setscene version)" % (d.getVarFlag(t[:-9], 'doc') or '')
17 else:
18 desc = d.getVarFlag(t, 'doc') or ''
19 taskdescs[t] = desc
20
21 for task, doc in sorted(taskdescs.items()):
22 bb.plain("%s %s" % (task.ljust(maxlen), doc))
23}
24
25CLEANFUNCS ?= ""
26
27T:task-clean = "${LOG_DIR}/cleanlogs/${PN}"
28addtask clean
29do_clean[nostamp] = "1"
30python do_clean() {
31 """clear the build and temp directories"""
32 dir = d.expand("${WORKDIR}")
33 bb.note("Removing " + dir)
34 oe.path.remove(dir)
35
36 dir = "%s.*" % d.getVar('STAMP')
37 bb.note("Removing " + dir)
38 oe.path.remove(dir)
39
40 for f in (d.getVar('CLEANFUNCS') or '').split():
41 bb.build.exec_func(f, d)
42}
43
44addtask checkuri
45do_checkuri[nostamp] = "1"
46do_checkuri[network] = "1"
47python do_checkuri() {
48 src_uri = (d.getVar('SRC_URI') or "").split()
49 if len(src_uri) == 0:
50 return
51
52 try:
53 fetcher = bb.fetch2.Fetch(src_uri, d)
54 fetcher.checkstatus()
55 except bb.fetch2.BBFetchException as e:
56 bb.fatal(str(e))
57}
58
59
diff --git a/meta/classes-global/utils.bbclass b/meta/classes-global/utils.bbclass
deleted file mode 100644
index 530a490ea8..0000000000
--- a/meta/classes-global/utils.bbclass
+++ /dev/null
@@ -1,379 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7oe_soinstall() {
8 # Purpose: Install shared library file and
9 # create the necessary links
10 # Example: oe_soinstall libfoo.so.1.2.3 ${D}${libdir}
11 libname=`basename $1`
12 case "$libname" in
13 *.so)
14 bbfatal "oe_soinstall: Shared library must haved versioned filename (e.g. libfoo.so.1.2.3)"
15 ;;
16 esac
17 install -m 755 $1 $2/$libname
18 sonamelink=`${OBJDUMP} -p $1 | grep SONAME | awk '{print $2}'`
19 if [ -z $sonamelink ]; then
20 bbfatal "oe_soinstall: $libname is missing ELF tag 'SONAME'."
21 fi
22 solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
23 ln -sf $libname $2/$sonamelink
24 ln -sf $libname $2/$solink
25}
26
27oe_libinstall() {
28 # Purpose: Install a library, in all its forms
29 # Example
30 #
31 # oe_libinstall libltdl ${STAGING_LIBDIR}/
32 # oe_libinstall -C src/libblah libblah ${D}/${libdir}/
33 dir=""
34 libtool=""
35 silent=""
36 require_static=""
37 require_shared=""
38 while [ "$#" -gt 0 ]; do
39 case "$1" in
40 -C)
41 shift
42 dir="$1"
43 ;;
44 -s)
45 silent=1
46 ;;
47 -a)
48 require_static=1
49 ;;
50 -so)
51 require_shared=1
52 ;;
53 -*)
54 bbfatal "oe_libinstall: unknown option: $1"
55 ;;
56 *)
57 break;
58 ;;
59 esac
60 shift
61 done
62
63 libname="$1"
64 shift
65 destpath="$1"
66 if [ -z "$destpath" ]; then
67 bbfatal "oe_libinstall: no destination path specified"
68 fi
69
70 __runcmd () {
71 if [ -z "$silent" ]; then
72 echo >&2 "oe_libinstall: $*"
73 fi
74 $*
75 }
76
77 if [ -z "$dir" ]; then
78 dir=`pwd`
79 fi
80
81 dotlai=$libname.lai
82
83 # Sanity check that the libname.lai is unique
84 number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
85 if [ $number_of_files -gt 1 ]; then
86 bbfatal "oe_libinstall: $dotlai is not unique in $dir"
87 fi
88
89
90 dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
91 olddir=`pwd`
92 __runcmd cd $dir
93
94 lafile=$libname.la
95
96 # If such file doesn't exist, try to cut version suffix
97 if [ ! -f "$lafile" ]; then
98 libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
99 lafile1=$libname.la
100 if [ -f "$lafile1" ]; then
101 libname=$libname1
102 lafile=$lafile1
103 fi
104 fi
105
106 if [ -f "$lafile" ]; then
107 # libtool archive
108 eval `cat $lafile|grep "^library_names="`
109 libtool=1
110 else
111 library_names="$libname.so* $libname.dll.a $libname.*.dylib"
112 fi
113
114 __runcmd install -d $destpath/
115 dota=$libname.a
116 if [ -f "$dota" -o -n "$require_static" ]; then
117 rm -f $destpath/$dota
118 __runcmd install -m 0644 $dota $destpath/
119 fi
120 if [ -f "$dotlai" -a -n "$libtool" ]; then
121 rm -f $destpath/$libname.la
122 __runcmd install -m 0644 $dotlai $destpath/$libname.la
123 fi
124
125 for name in $library_names; do
126 files=`eval echo $name`
127 for f in $files; do
128 if [ ! -e "$f" ]; then
129 if [ -n "$libtool" ]; then
130 bbfatal "oe_libinstall: $dir/$f not found."
131 fi
132 elif [ -L "$f" ]; then
133 __runcmd cp -P "$f" $destpath/
134 elif [ ! -L "$f" ]; then
135 libfile="$f"
136 rm -f $destpath/$libfile
137 __runcmd install -m 0755 $libfile $destpath/
138 fi
139 done
140 done
141
142 if [ -z "$libfile" ]; then
143 if [ -n "$require_shared" ]; then
144 bbfatal "oe_libinstall: unable to locate shared library"
145 fi
146 elif [ -z "$libtool" ]; then
147 # special case hack for non-libtool .so.#.#.# links
148 baselibfile=`basename "$libfile"`
149 if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
150 sonamelink=`${OBJDUMP} -p $libfile | grep SONAME | awk '{print $2}'`
151 solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
152 if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
153 __runcmd ln -sf $baselibfile $destpath/$sonamelink
154 fi
155 __runcmd ln -sf $baselibfile $destpath/$solink
156 fi
157 fi
158
159 __runcmd cd "$olddir"
160}
161
162create_cmdline_wrapper () {
163 # Create a wrapper script where commandline options are needed
164 #
165 # These are useful to work around relocation issues, by passing extra options
166 # to a program
167 #
168 # Usage: create_cmdline_wrapper FILENAME <extra-options>
169
170 cmd=$1
171 shift
172
173 echo "Generating wrapper script for $cmd"
174
175 mv $cmd $cmd.real
176 cmdname=`basename $cmd`
177 dirname=`dirname $cmd`
178 cmdoptions=$@
179 if [ "${base_prefix}" != "" ]; then
180 relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
181 cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
182 fi
183 cat <<END >$cmd
184#!/bin/bash
185realpath=\`readlink -fn \$0\`
186realdir=\`dirname \$realpath\`
187exec -a \$realdir/$cmdname \$realdir/$cmdname.real $cmdoptions "\$@"
188END
189 chmod +x $cmd
190}
191
192create_cmdline_shebang_wrapper () {
193 # Create a wrapper script where commandline options are needed
194 #
195 # These are useful to work around shebang relocation issues, where shebangs are too
196 # long or have arguments in them, thus preventing them from using the /usr/bin/env
197 # shebang
198 #
199 # Usage: create_cmdline_wrapper FILENAME <extra-options>
200
201 cmd=$1
202 shift
203
204 echo "Generating wrapper script for $cmd"
205
206 # Strip #! and get remaining interpreter + arg
207 argument="$(sed -ne 's/^#! *//p;q' $cmd)"
208 # strip the shebang from the real script as we do not want it to be usable anyway
209 tail -n +2 $cmd > $cmd.real
210 chown --reference=$cmd $cmd.real
211 chmod --reference=$cmd $cmd.real
212 rm -f $cmd
213 cmdname=$(basename $cmd)
214 dirname=$(dirname $cmd)
215 cmdoptions=$@
216 if [ "${base_prefix}" != "" ]; then
217 relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
218 cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
219 fi
220 cat <<END >$cmd
221#!/usr/bin/env bash
222realpath=\`readlink -fn \$0\`
223realdir=\`dirname \$realpath\`
224exec -a \$realdir/$cmdname $argument \$realdir/$cmdname.real $cmdoptions "\$@"
225END
226 chmod +x $cmd
227}
228
229create_wrapper () {
230 # Create a wrapper script where extra environment variables are needed
231 #
232 # These are useful to work around relocation issues, by setting environment
233 # variables which point to paths in the filesystem.
234 #
235 # Usage: create_wrapper FILENAME [[VAR=VALUE]..]
236
237 cmd=$1
238 shift
239
240 echo "Generating wrapper script for $cmd"
241
242 mv $cmd $cmd.real
243 cmdname=`basename $cmd`
244 dirname=`dirname $cmd`
245 exportstring=$@
246 if [ "${base_prefix}" != "" ]; then
247 relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
248 exportstring=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
249 fi
250 cat <<END >$cmd
251#!/bin/bash
252realpath=\`readlink -fn \$0\`
253realdir=\`dirname \$realpath\`
254export $exportstring
255exec -a "\$0" \$realdir/$cmdname.real "\$@"
256END
257 chmod +x $cmd
258}
259
260# Copy files/directories from $1 to $2 but using hardlinks
261# (preserve symlinks)
262hardlinkdir () {
263 from=$1
264 to=$2
265 (cd $from; find . -print0 | cpio --null -pdlu $to)
266}
267
268
269def check_app_exists(app, d):
270 app = d.expand(app).split()[0].strip()
271 path = d.getVar('PATH')
272 return bool(bb.utils.which(path, app))
273
274def explode_deps(s):
275 return bb.utils.explode_deps(s)
276
277def base_set_filespath(path, d):
278 filespath = []
279 extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
280 # Remove default flag which was used for checking
281 extrapaths = extrapaths.replace("__default:", "")
282 # Don't prepend empty strings to the path list
283 if extrapaths != "":
284 path = extrapaths.split(":") + path
285 # The ":" ensures we have an 'empty' override
286 overrides = (":" + (d.getVar("FILESOVERRIDES") or "")).split(":")
287 overrides.reverse()
288 for o in overrides:
289 for p in path:
290 if p != "":
291 filespath.append(os.path.join(p, o))
292 return ":".join(filespath)
293
294def extend_variants(d, var, extend, delim=':'):
295 """Return a string of all bb class extend variants for the given extend"""
296 variants = []
297 whole = d.getVar(var) or ""
298 for ext in whole.split():
299 eext = ext.split(delim)
300 if len(eext) > 1 and eext[0] == extend:
301 variants.append(eext[1])
302 return " ".join(variants)
303
304def multilib_pkg_extend(d, pkg):
305 variants = (d.getVar("MULTILIB_VARIANTS") or "").split()
306 if not variants:
307 return pkg
308 pkgs = pkg
309 for v in variants:
310 pkgs = pkgs + " " + v + "-" + pkg
311 return pkgs
312
313def get_multilib_datastore(variant, d):
314 return oe.utils.get_multilib_datastore(variant, d)
315
316def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
317 """Return a string of all ${var} in all multilib tune configuration"""
318 values = []
319 variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
320 for item in variants:
321 localdata = get_multilib_datastore(item, d)
322 # We need WORKDIR to be consistent with the original datastore
323 localdata.setVar("WORKDIR", d.getVar("WORKDIR"))
324 value = localdata.getVar(var) or ""
325 if value != "":
326 if need_split:
327 for item in value.split(delim):
328 values.append(item)
329 else:
330 values.append(value)
331 if unique:
332 #we do this to keep order as much as possible
333 ret = []
334 for value in values:
335 if not value in ret:
336 ret.append(value)
337 else:
338 ret = values
339 return " ".join(ret)
340
341def all_multilib_tune_list(vars, d):
342 """
343 Return a list of ${VAR} for each variable VAR in vars from each
344 multilib tune configuration.
345 Is safe to be called from a multilib recipe/context as it can
346 figure out the original tune and remove the multilib overrides.
347 """
348 values = {}
349 for v in vars:
350 values[v] = []
351 values['ml'] = ['']
352
353 variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
354 for item in variants:
355 localdata = get_multilib_datastore(item, d)
356 values[v].append(localdata.getVar(v))
357 values['ml'].append(item)
358 return values
359all_multilib_tune_list[vardepsexclude] = "OVERRIDES"
360
361# If the user hasn't set up their name/email, set some defaults
362check_git_config() {
363 if ! git config user.email > /dev/null ; then
364 git config --local user.email "${PATCH_GIT_USER_EMAIL}"
365 fi
366 if ! git config user.name > /dev/null ; then
367 git config --local user.name "${PATCH_GIT_USER_NAME}"
368 fi
369}
370
371# Sets fixed git committer and author for reproducible commits
372reproducible_git_committer_author() {
373 export GIT_COMMITTER_NAME="${PATCH_GIT_USER_NAME}"
374 export GIT_COMMITTER_EMAIL="${PATCH_GIT_USER_EMAIL}"
375 export GIT_COMMITTER_DATE="$(date -d @${SOURCE_DATE_EPOCH})"
376 export GIT_AUTHOR_NAME="${PATCH_GIT_USER_NAME}"
377 export GIT_AUTHOR_EMAIL="${PATCH_GIT_USER_EMAIL}"
378 export GIT_AUTHOR_DATE="$(date -d @${SOURCE_DATE_EPOCH})"
379}
diff --git a/meta/classes-global/yocto-check-layer.bbclass b/meta/classes-global/yocto-check-layer.bbclass
deleted file mode 100644
index ba93085325..0000000000
--- a/meta/classes-global/yocto-check-layer.bbclass
+++ /dev/null
@@ -1,62 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# This class is used by the yocto-check-layer script for additional
8# per-recipe tests.
9#
10# It adds an anonymous python function with extra processing to all recipes,
11# globally inheriting this class isn't advisable - yocto-check-layer script
12# handles that during its signature dump
13
14
15# Ensure that recipes don't skip required QA checks as listed
16# in CHECKLAYER_REQUIRED_TESTS, defined by insane.bbclass
17def check_insane_skip(d):
18 required_tests = set((d.getVar('CHECKLAYER_REQUIRED_TESTS') or '').split())
19 packages = set((d.getVar('PACKAGES') or '').split())
20 for package in packages:
21 skip = set((d.getVar('INSANE_SKIP') or "").split() +
22 (d.getVar('INSANE_SKIP:' + package) or "").split())
23 skip_required = skip & required_tests
24 if skip_required:
25 oe.qa.write_error(" ".join(skip_required), 'Package %s is skipping required QA tests.' % package, d)
26 bb.error("QA Issue: %s [%s]" % ('Package %s is skipping required QA tests.' % package, " ".join(skip_required)))
27 d.setVar("QA_ERRORS_FOUND", "True")
28
29
30# Check that no tasks (with rare exceptions) between do_fetch and do_build
31# use the network.
32def check_network_flag(d):
33 # BPN:task names that are allowed to reach the network, using fnmatch to compare.
34 allowed = []
35 # build-appliance-image uses pip at image time
36 allowed += ["build-appliance-image:do_image"]
37
38 def is_allowed(bpn, task):
39 from fnmatch import fnmatch
40 name = f"{bpn}:{task}"
41 return any(fnmatch(name, pattern) for pattern in allowed)
42
43 bpn = d.getVar("BPN")
44 seen = set()
45 stack = {"do_build"}
46 while stack:
47 task = stack.pop()
48 if task == "do_fetch":
49 continue
50
51 seen.add(task)
52 deps = d.getVarFlag(task, "deps") or []
53 stack |= {d for d in deps if d not in seen}
54
55 network = bb.utils.to_boolean(d.getVarFlag(task, "network"))
56 if network and not is_allowed(bpn, task):
57 bb.error(f"QA Issue: task {task} has network enabled")
58
59python () {
60 check_insane_skip(d)
61 check_network_flag(d)
62}