summaryrefslogtreecommitdiffstats
path: root/meta/classes-global
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes-global')
-rw-r--r--meta/classes-global/base.bbclass789
-rw-r--r--meta/classes-global/buildstats.bbclass302
-rw-r--r--meta/classes-global/debian.bbclass156
-rw-r--r--meta/classes-global/devshell.bbclass166
-rw-r--r--meta/classes-global/insane.bbclass1453
-rw-r--r--meta/classes-global/license.bbclass426
-rw-r--r--meta/classes-global/logging.bbclass107
-rw-r--r--meta/classes-global/mirrors.bbclass95
-rw-r--r--meta/classes-global/package.bbclass2558
-rw-r--r--meta/classes-global/package_deb.bbclass329
-rw-r--r--meta/classes-global/package_ipk.bbclass292
-rw-r--r--meta/classes-global/package_pkgdata.bbclass173
-rw-r--r--meta/classes-global/package_rpm.bbclass761
-rw-r--r--meta/classes-global/package_tar.bbclass77
-rw-r--r--meta/classes-global/packagedata.bbclass40
-rw-r--r--meta/classes-global/patch.bbclass171
-rw-r--r--meta/classes-global/sanity.bbclass1028
-rw-r--r--meta/classes-global/sstate.bbclass1364
-rw-r--r--meta/classes-global/staging.bbclass690
-rw-r--r--meta/classes-global/uninative.bbclass177
-rw-r--r--meta/classes-global/utility-tasks.bbclass60
-rw-r--r--meta/classes-global/utils.bbclass369
22 files changed, 11583 insertions, 0 deletions
diff --git a/meta/classes-global/base.bbclass b/meta/classes-global/base.bbclass
new file mode 100644
index 0000000000..8203f54519
--- /dev/null
+++ b/meta/classes-global/base.bbclass
@@ -0,0 +1,789 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7BB_DEFAULT_TASK ?= "build"
8CLASSOVERRIDE ?= "class-target"
9
10inherit patch
11inherit staging
12
13inherit mirrors
14inherit utils
15inherit utility-tasks
16inherit logging
17
18OE_EXTRA_IMPORTS ?= ""
19
20OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license oe.qa oe.reproducible oe.rust oe.buildcfg ${OE_EXTRA_IMPORTS}"
21OE_IMPORTS[type] = "list"
22
23PACKAGECONFIG_CONFARGS ??= ""
24
25def oe_import(d):
26 import sys
27
28 bbpath = [os.path.join(dir, "lib") for dir in d.getVar("BBPATH").split(":")]
29 sys.path[0:0] = [dir for dir in bbpath if dir not in sys.path]
30
31 import oe.data
32 for toimport in oe.data.typed_value("OE_IMPORTS", d):
33 try:
34 # Make a python object accessible from the metadata
35 bb.utils._context[toimport.split(".", 1)[0]] = __import__(toimport)
36 except AttributeError as e:
37 bb.error("Error importing OE modules: %s" % str(e))
38 return ""
39
40# We need the oe module name space early (before INHERITs get added)
41OE_IMPORTED := "${@oe_import(d)}"
42
43inherit metadata_scm
44
45def lsb_distro_identifier(d):
46 adjust = d.getVar('LSB_DISTRO_ADJUST')
47 adjust_func = None
48 if adjust:
49 try:
50 adjust_func = globals()[adjust]
51 except KeyError:
52 pass
53 return oe.lsb.distro_identifier(adjust_func)
54
55die() {
56 bbfatal_log "$*"
57}
58
59oe_runmake_call() {
60 bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
61 ${MAKE} ${EXTRA_OEMAKE} "$@"
62}
63
64oe_runmake() {
65 oe_runmake_call "$@" || die "oe_runmake failed"
66}
67
68
69def get_base_dep(d):
70 if d.getVar('INHIBIT_DEFAULT_DEPS', False):
71 return ""
72 return "${BASE_DEFAULT_DEPS}"
73
74BASE_DEFAULT_DEPS = "virtual/${HOST_PREFIX}gcc virtual/${HOST_PREFIX}compilerlibs virtual/libc"
75
76BASEDEPENDS = ""
77BASEDEPENDS:class-target = "${@get_base_dep(d)}"
78BASEDEPENDS:class-nativesdk = "${@get_base_dep(d)}"
79
80DEPENDS:prepend="${BASEDEPENDS} "
81
82FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
83# THISDIR only works properly with imediate expansion as it has to run
84# in the context of the location its used (:=)
85THISDIR = "${@os.path.dirname(d.getVar('FILE'))}"
86
87def extra_path_elements(d):
88 path = ""
89 elements = (d.getVar('EXTRANATIVEPATH') or "").split()
90 for e in elements:
91 path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
92 return path
93
94PATH:prepend = "${@extra_path_elements(d)}"
95
96def get_lic_checksum_file_list(d):
97 filelist = []
98 lic_files = d.getVar("LIC_FILES_CHKSUM") or ''
99 tmpdir = d.getVar("TMPDIR")
100 s = d.getVar("S")
101 b = d.getVar("B")
102 workdir = d.getVar("WORKDIR")
103
104 urls = lic_files.split()
105 for url in urls:
106 # We only care about items that are absolute paths since
107 # any others should be covered by SRC_URI.
108 try:
109 (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
110 if method != "file" or not path:
111 raise bb.fetch.MalformedUrl(url)
112
113 if path[0] == '/':
114 if path.startswith((tmpdir, s, b, workdir)):
115 continue
116 filelist.append(path + ":" + str(os.path.exists(path)))
117 except bb.fetch.MalformedUrl:
118 bb.fatal(d.getVar('PN') + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
119 return " ".join(filelist)
120
121def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
122 tools = d.getVar(toolsvar).split()
123 origbbenv = d.getVar("BB_ORIGENV", False)
124 path = origbbenv.getVar("PATH")
125 # Need to ignore our own scripts directories to avoid circular links
126 for p in path.split(":"):
127 if p.endswith("/scripts"):
128 path = path.replace(p, "/ignoreme")
129 bb.utils.mkdirhier(dest)
130 notfound = []
131 for tool in tools:
132 desttool = os.path.join(dest, tool)
133 if not os.path.exists(desttool):
134 # clean up dead symlink
135 if os.path.islink(desttool):
136 os.unlink(desttool)
137 srctool = bb.utils.which(path, tool, executable=True)
138 # gcc/g++ may link to ccache on some hosts, e.g.,
139 # /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
140 # would return /usr/local/bin/ccache/gcc, but what we need is
141 # /usr/bin/gcc, this code can check and fix that.
142 if "ccache" in srctool:
143 srctool = bb.utils.which(path, tool, executable=True, direction=1)
144 if srctool:
145 os.symlink(srctool, desttool)
146 else:
147 notfound.append(tool)
148
149 if notfound and fatal:
150 bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound))
151
152addtask fetch
153do_fetch[dirs] = "${DL_DIR}"
154do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
155do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
156do_fetch[vardeps] += "SRCREV"
157do_fetch[network] = "1"
158python base_do_fetch() {
159
160 src_uri = (d.getVar('SRC_URI') or "").split()
161 if not src_uri:
162 return
163
164 try:
165 fetcher = bb.fetch2.Fetch(src_uri, d)
166 fetcher.download()
167 except bb.fetch2.BBFetchException as e:
168 bb.fatal("Bitbake Fetcher Error: " + repr(e))
169}
170
171addtask unpack after do_fetch
172do_unpack[dirs] = "${WORKDIR}"
173
174do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != os.path.normpath(d.getVar('WORKDIR')) else os.path.join('${S}', 'patches')}"
175
176python base_do_unpack() {
177 src_uri = (d.getVar('SRC_URI') or "").split()
178 if not src_uri:
179 return
180
181 try:
182 fetcher = bb.fetch2.Fetch(src_uri, d)
183 fetcher.unpack(d.getVar('WORKDIR'))
184 except bb.fetch2.BBFetchException as e:
185 bb.fatal("Bitbake Fetcher Error: " + repr(e))
186}
187
188SSTATETASKS += "do_deploy_source_date_epoch"
189
190do_deploy_source_date_epoch () {
191 mkdir -p ${SDE_DEPLOYDIR}
192 if [ -e ${SDE_FILE} ]; then
193 echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
194 cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
195 else
196 echo "${SDE_FILE} not found!"
197 fi
198}
199
200python do_deploy_source_date_epoch_setscene () {
201 sstate_setscene(d)
202 bb.utils.mkdirhier(d.getVar('SDE_DIR'))
203 sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
204 if os.path.exists(sde_file):
205 target = d.getVar('SDE_FILE')
206 bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
207 bb.utils.rename(sde_file, target)
208 else:
209 bb.debug(1, "%s not found!" % sde_file)
210}
211
212do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
213do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
214addtask do_deploy_source_date_epoch_setscene
215addtask do_deploy_source_date_epoch before do_configure after do_patch
216
217python create_source_date_epoch_stamp() {
218 # Version: 1
219 source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
220 oe.reproducible.epochfile_write(source_date_epoch, d.getVar('SDE_FILE'), d)
221}
222do_unpack[postfuncs] += "create_source_date_epoch_stamp"
223
224def get_source_date_epoch_value(d):
225 return oe.reproducible.epochfile_read(d.getVar('SDE_FILE'), d)
226
227def get_layers_branch_rev(d):
228 revisions = oe.buildcfg.get_layer_revisions(d)
229 layers_branch_rev = ["%-20s = \"%s:%s\"" % (r[1], r[2], r[3]) for r in revisions]
230 i = len(layers_branch_rev)-1
231 p1 = layers_branch_rev[i].find("=")
232 s1 = layers_branch_rev[i][p1:]
233 while i > 0:
234 p2 = layers_branch_rev[i-1].find("=")
235 s2= layers_branch_rev[i-1][p2:]
236 if s1 == s2:
237 layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
238 i -= 1
239 else:
240 i -= 1
241 p1 = layers_branch_rev[i].find("=")
242 s1= layers_branch_rev[i][p1:]
243 return layers_branch_rev
244
245
246BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
247BUILDCFG_FUNCS[type] = "list"
248
249def buildcfg_vars(d):
250 statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
251 for var in statusvars:
252 value = d.getVar(var)
253 if value is not None:
254 yield '%-20s = "%s"' % (var, value)
255
256def buildcfg_neededvars(d):
257 needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
258 pesteruser = []
259 for v in needed_vars:
260 val = d.getVar(v)
261 if not val or val == 'INVALID':
262 pesteruser.append(v)
263
264 if pesteruser:
265 bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
266
267addhandler base_eventhandler
268base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.event.RecipeParsed"
269python base_eventhandler() {
270 import bb.runqueue
271
272 if isinstance(e, bb.event.ConfigParsed):
273 if not d.getVar("NATIVELSBSTRING", False):
274 d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
275 d.setVar("ORIGNATIVELSBSTRING", d.getVar("NATIVELSBSTRING", False))
276 d.setVar('BB_VERSION', bb.__version__)
277
278 # There might be no bb.event.ConfigParsed event if bitbake server is
279 # running, so check bb.event.BuildStarted too to make sure ${HOSTTOOLS_DIR}
280 # exists.
281 if isinstance(e, bb.event.ConfigParsed) or \
282 (isinstance(e, bb.event.BuildStarted) and not os.path.exists(d.getVar('HOSTTOOLS_DIR'))):
283 # Works with the line in layer.conf which changes PATH to point here
284 setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
285 setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
286
287 if isinstance(e, bb.event.MultiConfigParsed):
288 # We need to expand SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS in each of the multiconfig data stores
289 # own contexts so the variables get expanded correctly for that arch, then inject back into
290 # the main data store.
291 deps = []
292 for config in e.mcdata:
293 deps.append(e.mcdata[config].getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS"))
294 deps = " ".join(deps)
295 e.mcdata[''].setVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", deps)
296
297 if isinstance(e, bb.event.BuildStarted):
298 localdata = bb.data.createCopy(d)
299 statuslines = []
300 for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
301 g = globals()
302 if func not in g:
303 bb.warn("Build configuration function '%s' does not exist" % func)
304 else:
305 flines = g[func](localdata)
306 if flines:
307 statuslines.extend(flines)
308
309 statusheader = d.getVar('BUILDCFG_HEADER')
310 if statusheader:
311 bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
312
313 # This code is to silence warnings where the SDK variables overwrite the
314 # target ones and we'd see dulpicate key names overwriting each other
315 # for various PREFERRED_PROVIDERS
316 if isinstance(e, bb.event.RecipePreFinalise):
317 if d.getVar("TARGET_PREFIX") == d.getVar("SDK_PREFIX"):
318 d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
319 d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
320 d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
321 d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
322
323 if isinstance(e, bb.event.RecipeParsed):
324 #
325 # If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set
326 # skip parsing for all the other providers which will mean they get uninstalled from the
327 # sysroot since they're now "unreachable". This makes switching virtual/kernel work in
328 # particular.
329 #
330 pn = d.getVar('PN')
331 source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
332 if not source_mirror_fetch:
333 provs = (d.getVar("PROVIDES") or "").split()
334 multiprovidersallowed = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
335 for p in provs:
336 if p.startswith("virtual/") and p not in multiprovidersallowed:
337 profprov = d.getVar("PREFERRED_PROVIDER_" + p)
338 if profprov and pn != profprov:
339 raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
340}
341
342CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
343CLEANBROKEN = "0"
344
345addtask configure after do_patch
346do_configure[dirs] = "${B}"
347base_do_configure() {
348 if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
349 if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
350 cd ${B}
351 if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
352 oe_runmake clean
353 fi
354 # -ignore_readdir_race does not work correctly with -delete;
355 # use xargs to avoid spurious build failures
356 find ${B} -ignore_readdir_race -name \*.la -type f -print0 | xargs -0 rm -f
357 fi
358 fi
359 if [ -n "${CONFIGURESTAMPFILE}" ]; then
360 mkdir -p `dirname ${CONFIGURESTAMPFILE}`
361 echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
362 fi
363}
364
365addtask compile after do_configure
366do_compile[dirs] = "${B}"
367base_do_compile() {
368 if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
369 oe_runmake || die "make failed"
370 else
371 bbnote "nothing to compile"
372 fi
373}
374
375addtask install after do_compile
376do_install[dirs] = "${B}"
377# Remove and re-create ${D} so that is it guaranteed to be empty
378do_install[cleandirs] = "${D}"
379
380base_do_install() {
381 :
382}
383
384base_do_package() {
385 :
386}
387
388addtask build after do_populate_sysroot
389do_build[noexec] = "1"
390do_build[recrdeptask] += "do_deploy"
391do_build () {
392 :
393}
394
395def set_packagetriplet(d):
396 archs = []
397 tos = []
398 tvs = []
399
400 archs.append(d.getVar("PACKAGE_ARCHS").split())
401 tos.append(d.getVar("TARGET_OS"))
402 tvs.append(d.getVar("TARGET_VENDOR"))
403
404 def settriplet(d, varname, archs, tos, tvs):
405 triplets = []
406 for i in range(len(archs)):
407 for arch in archs[i]:
408 triplets.append(arch + tvs[i] + "-" + tos[i])
409 triplets.reverse()
410 d.setVar(varname, " ".join(triplets))
411
412 settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
413
414 variants = d.getVar("MULTILIB_VARIANTS") or ""
415 for item in variants.split():
416 localdata = bb.data.createCopy(d)
417 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
418 localdata.setVar("OVERRIDES", overrides)
419
420 archs.append(localdata.getVar("PACKAGE_ARCHS").split())
421 tos.append(localdata.getVar("TARGET_OS"))
422 tvs.append(localdata.getVar("TARGET_VENDOR"))
423
424 settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
425
426python () {
427 import string, re
428
429 # Handle backfilling
430 oe.utils.features_backfill("DISTRO_FEATURES", d)
431 oe.utils.features_backfill("MACHINE_FEATURES", d)
432
433 if d.getVar("S")[-1] == '/':
434 bb.warn("Recipe %s sets S variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("S")))
435 if d.getVar("B")[-1] == '/':
436 bb.warn("Recipe %s sets B variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("B")))
437
438 if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("S")):
439 d.appendVar("PSEUDO_IGNORE_PATHS", ",${S}")
440 if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("B")):
441 d.appendVar("PSEUDO_IGNORE_PATHS", ",${B}")
442
443 # To add a recipe to the skip list , set:
444 # SKIP_RECIPE[pn] = "message"
445 pn = d.getVar('PN')
446 skip_msg = d.getVarFlag('SKIP_RECIPE', pn)
447 if skip_msg:
448 bb.debug(1, "Skipping %s %s" % (pn, skip_msg))
449 raise bb.parse.SkipRecipe("Recipe will be skipped because: %s" % (skip_msg))
450
451 # Handle PACKAGECONFIG
452 #
453 # These take the form:
454 #
455 # PACKAGECONFIG ??= "<default options>"
456 # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends,foo_conflict_packageconfig"
457 pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
458 if pkgconfigflags:
459 pkgconfig = (d.getVar('PACKAGECONFIG') or "").split()
460 pn = d.getVar("PN")
461
462 mlprefix = d.getVar("MLPREFIX")
463
464 def expandFilter(appends, extension, prefix):
465 appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
466 newappends = []
467 for a in appends:
468 if a.endswith("-native") or ("-cross-" in a):
469 newappends.append(a)
470 elif a.startswith("virtual/"):
471 subs = a.split("/", 1)[1]
472 if subs.startswith(prefix):
473 newappends.append(a + extension)
474 else:
475 newappends.append("virtual/" + prefix + subs + extension)
476 else:
477 if a.startswith(prefix):
478 newappends.append(a + extension)
479 else:
480 newappends.append(prefix + a + extension)
481 return newappends
482
483 def appendVar(varname, appends):
484 if not appends:
485 return
486 if varname.find("DEPENDS") != -1:
487 if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d) :
488 appends = expandFilter(appends, "", "nativesdk-")
489 elif bb.data.inherits_class('native', d):
490 appends = expandFilter(appends, "-native", "")
491 elif mlprefix:
492 appends = expandFilter(appends, "", mlprefix)
493 varname = d.expand(varname)
494 d.appendVar(varname, " " + " ".join(appends))
495
496 extradeps = []
497 extrardeps = []
498 extrarrecs = []
499 extraconf = []
500 for flag, flagval in sorted(pkgconfigflags.items()):
501 items = flagval.split(",")
502 num = len(items)
503 if num > 6:
504 bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend,conflict_packageconfig can be specified!"
505 % (d.getVar('PN'), flag))
506
507 if flag in pkgconfig:
508 if num >= 3 and items[2]:
509 extradeps.append(items[2])
510 if num >= 4 and items[3]:
511 extrardeps.append(items[3])
512 if num >= 5 and items[4]:
513 extrarrecs.append(items[4])
514 if num >= 1 and items[0]:
515 extraconf.append(items[0])
516 elif num >= 2 and items[1]:
517 extraconf.append(items[1])
518
519 if num >= 6 and items[5]:
520 conflicts = set(items[5].split())
521 invalid = conflicts.difference(set(pkgconfigflags.keys()))
522 if invalid:
523 bb.error("%s: PACKAGECONFIG[%s] Invalid conflict package config%s '%s' specified."
524 % (d.getVar('PN'), flag, 's' if len(invalid) > 1 else '', ' '.join(invalid)))
525
526 if flag in pkgconfig:
527 intersec = conflicts.intersection(set(pkgconfig))
528 if intersec:
529 bb.fatal("%s: PACKAGECONFIG[%s] Conflict package config%s '%s' set in PACKAGECONFIG."
530 % (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec)))
531
532 appendVar('DEPENDS', extradeps)
533 appendVar('RDEPENDS:${PN}', extrardeps)
534 appendVar('RRECOMMENDS:${PN}', extrarrecs)
535 appendVar('PACKAGECONFIG_CONFARGS', extraconf)
536
537 pn = d.getVar('PN')
538 license = d.getVar('LICENSE')
539 if license == "INVALID" and pn != "defaultpkgname":
540 bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
541
542 if bb.data.inherits_class('license', d):
543 check_license_format(d)
544 unmatched_license_flags = check_license_flags(d)
545 if unmatched_license_flags:
546 if len(unmatched_license_flags) == 1:
547 message = "because it has a restricted license '{0}'. Which is not listed in LICENSE_FLAGS_ACCEPTED".format(unmatched_license_flags[0])
548 else:
549 message = "because it has restricted licenses {0}. Which are not listed in LICENSE_FLAGS_ACCEPTED".format(
550 ", ".join("'{0}'".format(f) for f in unmatched_license_flags))
551 bb.debug(1, "Skipping %s %s" % (pn, message))
552 raise bb.parse.SkipRecipe(message)
553
554 # If we're building a target package we need to use fakeroot (pseudo)
555 # in order to capture permissions, owners, groups and special files
556 if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
557 d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
558 d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
559 d.setVarFlag('do_install', 'fakeroot', '1')
560 d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
561 d.setVarFlag('do_package', 'fakeroot', '1')
562 d.setVarFlag('do_package_setscene', 'fakeroot', '1')
563 d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
564 d.setVarFlag('do_devshell', 'fakeroot', '1')
565 d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
566
567 need_machine = d.getVar('COMPATIBLE_MACHINE')
568 if need_machine and not d.getVar('PARSE_ALL_RECIPES', False):
569 import re
570 compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":")
571 for m in compat_machines:
572 if re.match(need_machine, m):
573 break
574 else:
575 raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
576
577 source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False) or d.getVar('PARSE_ALL_RECIPES', False)
578 if not source_mirror_fetch:
579 need_host = d.getVar('COMPATIBLE_HOST')
580 if need_host:
581 import re
582 this_host = d.getVar('HOST_SYS')
583 if not re.match(need_host, this_host):
584 raise bb.parse.SkipRecipe("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
585
586 bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
587
588 check_license = False if pn.startswith("nativesdk-") else True
589 for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}",
590 "-crosssdk-${SDK_SYS}", "-crosssdk-initial-${SDK_SYS}",
591 "-cross-canadian-${TRANSLATED_TARGET_ARCH}"]:
592 if pn.endswith(d.expand(t)):
593 check_license = False
594 if pn.startswith("gcc-source-"):
595 check_license = False
596
597 if check_license and bad_licenses:
598 bad_licenses = expand_wildcard_licenses(d, bad_licenses)
599
600 exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
601
602 for lic_exception in exceptions:
603 if ":" in lic_exception:
604 lic_exception = lic_exception.split(":")[1]
605 if lic_exception in oe.license.obsolete_license_list():
606 bb.fatal("Obsolete license %s used in INCOMPATIBLE_LICENSE_EXCEPTIONS" % lic_exception)
607
608 pkgs = d.getVar('PACKAGES').split()
609 skipped_pkgs = {}
610 unskipped_pkgs = []
611 for pkg in pkgs:
612 remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
613
614 incompatible_lic = incompatible_license(d, remaining_bad_licenses, pkg)
615 if incompatible_lic:
616 skipped_pkgs[pkg] = incompatible_lic
617 else:
618 unskipped_pkgs.append(pkg)
619
620 if unskipped_pkgs:
621 for pkg in skipped_pkgs:
622 bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
623 d.setVar('_exclude_incompatible-' + pkg, ' '.join(skipped_pkgs[pkg]))
624 for pkg in unskipped_pkgs:
625 bb.debug(1, "Including the package %s" % pkg)
626 else:
627 incompatible_lic = incompatible_license(d, bad_licenses)
628 for pkg in skipped_pkgs:
629 incompatible_lic += skipped_pkgs[pkg]
630 incompatible_lic = sorted(list(set(incompatible_lic)))
631
632 if incompatible_lic:
633 bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
634 raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
635
636 needsrcrev = False
637 srcuri = d.getVar('SRC_URI')
638 for uri_string in srcuri.split():
639 uri = bb.fetch.URI(uri_string)
640 # Also check downloadfilename as the URL path might not be useful for sniffing
641 path = uri.params.get("downloadfilename", uri.path)
642
643 # HTTP/FTP use the wget fetcher
644 if uri.scheme in ("http", "https", "ftp"):
645 d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot')
646
647 # Svn packages should DEPEND on subversion-native
648 if uri.scheme == "svn":
649 needsrcrev = True
650 d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
651
652 # Git packages should DEPEND on git-native
653 elif uri.scheme in ("git", "gitsm"):
654 needsrcrev = True
655 d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
656
657 # Mercurial packages should DEPEND on mercurial-native
658 elif uri.scheme == "hg":
659 needsrcrev = True
660 d.appendVar("EXTRANATIVEPATH", ' python3-native ')
661 d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
662
663 # Perforce packages support SRCREV = "${AUTOREV}"
664 elif uri.scheme == "p4":
665 needsrcrev = True
666
667 # OSC packages should DEPEND on osc-native
668 elif uri.scheme == "osc":
669 d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
670
671 elif uri.scheme == "npm":
672 d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
673
674 elif uri.scheme == "repo":
675 needsrcrev = True
676 d.appendVarFlag('do_fetch', 'depends', ' repo-native:do_populate_sysroot')
677
678 # *.lz4 should DEPEND on lz4-native for unpacking
679 if path.endswith('.lz4'):
680 d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
681
682 # *.zst should DEPEND on zstd-native for unpacking
683 elif path.endswith('.zst'):
684 d.appendVarFlag('do_unpack', 'depends', ' zstd-native:do_populate_sysroot')
685
686 # *.lz should DEPEND on lzip-native for unpacking
687 elif path.endswith('.lz'):
688 d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
689
690 # *.xz should DEPEND on xz-native for unpacking
691 elif path.endswith('.xz') or path.endswith('.txz'):
692 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
693
694 # .zip should DEPEND on unzip-native for unpacking
695 elif path.endswith('.zip') or path.endswith('.jar'):
696 d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
697
698 # Some rpm files may be compressed internally using xz (for example, rpms from Fedora)
699 elif path.endswith('.rpm'):
700 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
701
702 # *.deb should DEPEND on xz-native for unpacking
703 elif path.endswith('.deb'):
704 d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
705
706 if needsrcrev:
707 d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}")
708
709 # Gather all named SRCREVs to add to the sstate hash calculation
710 # This anonymous python snippet is called multiple times so we
711 # need to be careful to not double up the appends here and cause
712 # the base hash to mismatch the task hash
713 for uri in srcuri.split():
714 parm = bb.fetch.decodeurl(uri)[5]
715 uri_names = parm.get("name", "").split(",")
716 for uri_name in filter(None, uri_names):
717 srcrev_name = "SRCREV_{}".format(uri_name)
718 if srcrev_name not in (d.getVarFlag("do_fetch", "vardeps") or "").split():
719 d.appendVarFlag("do_fetch", "vardeps", " {}".format(srcrev_name))
720
721 set_packagetriplet(d)
722
723 # 'multimachine' handling
724 mach_arch = d.getVar('MACHINE_ARCH')
725 pkg_arch = d.getVar('PACKAGE_ARCH')
726
727 if (pkg_arch == mach_arch):
728 # Already machine specific - nothing further to do
729 return
730
731 #
732 # We always try to scan SRC_URI for urls with machine overrides
733 # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
734 #
735 override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH')
736 if override != '0':
737 paths = []
738 fpaths = (d.getVar('FILESPATH') or '').split(':')
739 machine = d.getVar('MACHINE')
740 for p in fpaths:
741 if os.path.basename(p) == machine and os.path.isdir(p):
742 paths.append(p)
743
744 if paths:
745 for s in srcuri.split():
746 if not s.startswith("file://"):
747 continue
748 fetcher = bb.fetch2.Fetch([s], d)
749 local = fetcher.localpath(s)
750 for mp in paths:
751 if local.startswith(mp):
752 #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
753 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
754 return
755
756 packages = d.getVar('PACKAGES').split()
757 for pkg in packages:
758 pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg)
759
760 # We could look for != PACKAGE_ARCH here but how to choose
761 # if multiple differences are present?
762 # Look through PACKAGE_ARCHS for the priority order?
763 if pkgarch and pkgarch == mach_arch:
764 d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
765 bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN"))
766}
767
768addtask cleansstate after do_clean
769python do_cleansstate() {
770 sstate_clean_cachefiles(d)
771}
772addtask cleanall after do_cleansstate
773do_cleansstate[nostamp] = "1"
774
775python do_cleanall() {
776 src_uri = (d.getVar('SRC_URI') or "").split()
777 if not src_uri:
778 return
779
780 try:
781 fetcher = bb.fetch2.Fetch(src_uri, d)
782 fetcher.clean()
783 except bb.fetch2.BBFetchException as e:
784 bb.fatal(str(e))
785}
786do_cleanall[nostamp] = "1"
787
788
789EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package
diff --git a/meta/classes-global/buildstats.bbclass b/meta/classes-global/buildstats.bbclass
new file mode 100644
index 0000000000..f49a67aa4f
--- /dev/null
+++ b/meta/classes-global/buildstats.bbclass
@@ -0,0 +1,302 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
8
9################################################################################
10# Build statistics gathering.
11#
12# The CPU and Time gathering/tracking functions and bbevent inspiration
13# were written by Christopher Larson.
14#
15################################################################################
16
17def get_buildprocess_cputime(pid):
18 with open("/proc/%d/stat" % pid, "r") as f:
19 fields = f.readline().rstrip().split()
20 # 13: utime, 14: stime, 15: cutime, 16: cstime
21 return sum(int(field) for field in fields[13:16])
22
23def get_process_cputime(pid):
24 import resource
25 with open("/proc/%d/stat" % pid, "r") as f:
26 fields = f.readline().rstrip().split()
27 stats = {
28 'utime' : fields[13],
29 'stime' : fields[14],
30 'cutime' : fields[15],
31 'cstime' : fields[16],
32 }
33 iostats = {}
34 if os.path.isfile("/proc/%d/io" % pid):
35 with open("/proc/%d/io" % pid, "r") as f:
36 while True:
37 i = f.readline().strip()
38 if not i:
39 break
40 if not ":" in i:
41 # one more extra line is appended (empty or containing "0")
42 # most probably due to race condition in kernel while
43 # updating IO stats
44 break
45 i = i.split(": ")
46 iostats[i[0]] = i[1]
47 resources = resource.getrusage(resource.RUSAGE_SELF)
48 childres = resource.getrusage(resource.RUSAGE_CHILDREN)
49 return stats, iostats, resources, childres
50
51def get_cputime():
52 with open("/proc/stat", "r") as f:
53 fields = f.readline().rstrip().split()[1:]
54 return sum(int(field) for field in fields)
55
56def set_timedata(var, d, server_time):
57 d.setVar(var, server_time)
58
59def get_timedata(var, d, end_time):
60 oldtime = d.getVar(var, False)
61 if oldtime is None:
62 return
63 return end_time - oldtime
64
65def set_buildtimedata(var, d):
66 import time
67 time = time.time()
68 cputime = get_cputime()
69 proctime = get_buildprocess_cputime(os.getpid())
70 d.setVar(var, (time, cputime, proctime))
71
72def get_buildtimedata(var, d):
73 import time
74 timedata = d.getVar(var, False)
75 if timedata is None:
76 return
77 oldtime, oldcpu, oldproc = timedata
78 procdiff = get_buildprocess_cputime(os.getpid()) - oldproc
79 cpudiff = get_cputime() - oldcpu
80 end_time = time.time()
81 timediff = end_time - oldtime
82 if cpudiff > 0:
83 cpuperc = float(procdiff) * 100 / cpudiff
84 else:
85 cpuperc = None
86 return timediff, cpuperc
87
88def write_task_data(status, logfile, e, d):
89 with open(os.path.join(logfile), "a") as f:
90 elapsedtime = get_timedata("__timedata_task", d, e.time)
91 if elapsedtime:
92 f.write(d.expand("${PF}: %s\n" % e.task))
93 f.write(d.expand("Elapsed time: %0.2f seconds\n" % elapsedtime))
94 cpu, iostats, resources, childres = get_process_cputime(os.getpid())
95 if cpu:
96 f.write("utime: %s\n" % cpu['utime'])
97 f.write("stime: %s\n" % cpu['stime'])
98 f.write("cutime: %s\n" % cpu['cutime'])
99 f.write("cstime: %s\n" % cpu['cstime'])
100 for i in iostats:
101 f.write("IO %s: %s\n" % (i, iostats[i]))
102 rusages = ["ru_utime", "ru_stime", "ru_maxrss", "ru_minflt", "ru_majflt", "ru_inblock", "ru_oublock", "ru_nvcsw", "ru_nivcsw"]
103 for i in rusages:
104 f.write("rusage %s: %s\n" % (i, getattr(resources, i)))
105 for i in rusages:
106 f.write("Child rusage %s: %s\n" % (i, getattr(childres, i)))
107 if status == "passed":
108 f.write("Status: PASSED \n")
109 else:
110 f.write("Status: FAILED \n")
111 f.write("Ended: %0.2f \n" % e.time)
112
113def write_host_data(logfile, e, d, type):
114 import subprocess, os, datetime
115 # minimum time allowed for each command to run, in seconds
116 time_threshold = 0.5
117 limit = 10
118 # the total number of commands
119 num_cmds = 0
120 msg = ""
121 if type == "interval":
122 # interval at which data will be logged
123 interval = d.getVar("BB_HEARTBEAT_EVENT", False)
124 if interval is None:
125 bb.warn("buildstats: Collecting host data at intervals failed. Set BB_HEARTBEAT_EVENT=\"<interval>\" in conf/local.conf for the interval at which host data will be logged.")
126 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
127 return
128 interval = int(interval)
129 cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_INTERVAL')
130 msg = "Host Stats: Collecting data at %d second intervals.\n" % interval
131 if cmds is None:
132 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
133 bb.warn("buildstats: Collecting host data at intervals failed. Set BB_LOG_HOST_STAT_CMDS_INTERVAL=\"command1 ; command2 ; ... \" in conf/local.conf.")
134 return
135 if type == "failure":
136 cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_FAILURE')
137 msg = "Host Stats: Collecting data on failure.\n"
138 msg += "Failed at task: " + e.task + "\n"
139 if cmds is None:
140 d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
141 bb.warn("buildstats: Collecting host data on failure failed. Set BB_LOG_HOST_STAT_CMDS_FAILURE=\"command1 ; command2 ; ... \" in conf/local.conf.")
142 return
143 c_san = []
144 for cmd in cmds.split(";"):
145 if len(cmd) == 0:
146 continue
147 num_cmds += 1
148 c_san.append(cmd)
149 if num_cmds == 0:
150 if type == "interval":
151 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
152 if type == "failure":
153 d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
154 return
155
156 # return if the interval is not enough to run all commands within the specified BB_HEARTBEAT_EVENT interval
157 if type == "interval":
158 limit = interval / num_cmds
159 if limit <= time_threshold:
160 d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
161 bb.warn("buildstats: Collecting host data failed. BB_HEARTBEAT_EVENT interval not enough to run the specified commands. Increase value of BB_HEARTBEAT_EVENT in conf/local.conf.")
162 return
163
164 # set the environment variables
165 path = d.getVar("PATH")
166 opath = d.getVar("BB_ORIGENV", False).getVar("PATH")
167 ospath = os.environ['PATH']
168 os.environ['PATH'] = path + ":" + opath + ":" + ospath
169 with open(logfile, "a") as f:
170 f.write("Event Time: %f\nDate: %s\n" % (e.time, datetime.datetime.now()))
171 f.write("%s" % msg)
172 for c in c_san:
173 try:
174 output = subprocess.check_output(c.split(), stderr=subprocess.STDOUT, timeout=limit).decode('utf-8')
175 except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError) as err:
176 output = "Error running command: %s\n%s\n" % (c, err)
177 f.write("%s\n%s\n" % (c, output))
178 # reset the environment
179 os.environ['PATH'] = ospath
180
181python run_buildstats () {
182 import bb.build
183 import bb.event
184 import time, subprocess, platform
185
186 bn = d.getVar('BUILDNAME')
187 ########################################################################
188 # bitbake fires HeartbeatEvent even before a build has been
189 # triggered, causing BUILDNAME to be None
190 ########################################################################
191 if bn is not None:
192 bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
193 taskdir = os.path.join(bsdir, d.getVar('PF'))
194 if isinstance(e, bb.event.HeartbeatEvent) and bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_INTERVAL")):
195 bb.utils.mkdirhier(bsdir)
196 write_host_data(os.path.join(bsdir, "host_stats_interval"), e, d, "interval")
197
198 if isinstance(e, bb.event.BuildStarted):
199 ########################################################################
200 # If the kernel was not configured to provide I/O statistics, issue
201 # a one time warning.
202 ########################################################################
203 if not os.path.isfile("/proc/%d/io" % os.getpid()):
204 bb.warn("The Linux kernel on your build host was not configured to provide process I/O statistics. (CONFIG_TASK_IO_ACCOUNTING is not set)")
205
206 ########################################################################
207 # at first pass make the buildstats hierarchy and then
208 # set the buildname
209 ########################################################################
210 bb.utils.mkdirhier(bsdir)
211 set_buildtimedata("__timedata_build", d)
212 build_time = os.path.join(bsdir, "build_stats")
213 # write start of build into build_time
214 with open(build_time, "a") as f:
215 host_info = platform.uname()
216 f.write("Host Info: ")
217 for x in host_info:
218 if x:
219 f.write(x + " ")
220 f.write("\n")
221 f.write("Build Started: %0.2f \n" % d.getVar('__timedata_build', False)[0])
222
223 elif isinstance(e, bb.event.BuildCompleted):
224 build_time = os.path.join(bsdir, "build_stats")
225 with open(build_time, "a") as f:
226 ########################################################################
227 # Write build statistics for the build
228 ########################################################################
229 timedata = get_buildtimedata("__timedata_build", d)
230 if timedata:
231 time, cpu = timedata
232 # write end of build and cpu used into build_time
233 f.write("Elapsed time: %0.2f seconds \n" % (time))
234 if cpu:
235 f.write("CPU usage: %0.1f%% \n" % cpu)
236
237 if isinstance(e, bb.build.TaskStarted):
238 set_timedata("__timedata_task", d, e.time)
239 bb.utils.mkdirhier(taskdir)
240 # write into the task event file the name and start time
241 with open(os.path.join(taskdir, e.task), "a") as f:
242 f.write("Event: %s \n" % bb.event.getName(e))
243 f.write("Started: %0.2f \n" % e.time)
244
245 elif isinstance(e, bb.build.TaskSucceeded):
246 write_task_data("passed", os.path.join(taskdir, e.task), e, d)
247 if e.task == "do_rootfs":
248 bs = os.path.join(bsdir, "build_stats")
249 with open(bs, "a") as f:
250 rootfs = d.getVar('IMAGE_ROOTFS')
251 if os.path.isdir(rootfs):
252 try:
253 rootfs_size = subprocess.check_output(["du", "-sh", rootfs],
254 stderr=subprocess.STDOUT).decode('utf-8')
255 f.write("Uncompressed Rootfs size: %s" % rootfs_size)
256 except subprocess.CalledProcessError as err:
257 bb.warn("Failed to get rootfs size: %s" % err.output.decode('utf-8'))
258
259 elif isinstance(e, bb.build.TaskFailed):
260 # Can have a failure before TaskStarted so need to mkdir here too
261 bb.utils.mkdirhier(taskdir)
262 write_task_data("failed", os.path.join(taskdir, e.task), e, d)
263 ########################################################################
264 # Lets make things easier and tell people where the build failed in
265 # build_status. We do this here because BuildCompleted triggers no
266 # matter what the status of the build actually is
267 ########################################################################
268 build_status = os.path.join(bsdir, "build_stats")
269 with open(build_status, "a") as f:
270 f.write(d.expand("Failed at: ${PF} at task: %s \n" % e.task))
271 if bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_FAILURE")):
272 write_host_data(os.path.join(bsdir, "host_stats_%s_failure" % e.task), e, d, "failure")
273}
274
275addhandler run_buildstats
276run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.event.HeartbeatEvent bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
277
278python runqueue_stats () {
279 import buildstats
280 from bb import event, runqueue
281 # We should not record any samples before the first task has started,
282 # because that's the first activity shown in the process chart.
283 # Besides, at that point we are sure that the build variables
284 # are available that we need to find the output directory.
285 # The persistent SystemStats is stored in the datastore and
286 # closed when the build is done.
287 system_stats = d.getVar('_buildstats_system_stats', False)
288 if not system_stats and isinstance(e, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted)):
289 system_stats = buildstats.SystemStats(d)
290 d.setVar('_buildstats_system_stats', system_stats)
291 if system_stats:
292 # Ensure that we sample at important events.
293 done = isinstance(e, bb.event.BuildCompleted)
294 if system_stats.sample(e, force=done):
295 d.setVar('_buildstats_system_stats', system_stats)
296 if done:
297 system_stats.close()
298 d.delVar('_buildstats_system_stats')
299}
300
301addhandler runqueue_stats
302runqueue_stats[eventmask] = "bb.runqueue.sceneQueueTaskStarted bb.runqueue.runQueueTaskStarted bb.event.HeartbeatEvent bb.event.BuildCompleted bb.event.MonitorDiskEvent"
diff --git a/meta/classes-global/debian.bbclass b/meta/classes-global/debian.bbclass
new file mode 100644
index 0000000000..7135d74837
--- /dev/null
+++ b/meta/classes-global/debian.bbclass
@@ -0,0 +1,156 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Debian package renaming only occurs when a package is built
8# We therefore have to make sure we build all runtime packages
9# before building the current package to make the packages runtime
10# depends are correct
11#
12# Custom library package names can be defined setting
13# DEBIANNAME: + pkgname to the desired name.
14#
15# Better expressed as ensure all RDEPENDS package before we package
16# This means we can't have circular RDEPENDS/RRECOMMENDS
17
18AUTO_LIBNAME_PKGS = "${PACKAGES}"
19
20inherit package
21
22DEBIANRDEP = "do_packagedata"
23do_package_write_ipk[deptask] = "${DEBIANRDEP}"
24do_package_write_deb[deptask] = "${DEBIANRDEP}"
25do_package_write_tar[deptask] = "${DEBIANRDEP}"
26do_package_write_rpm[deptask] = "${DEBIANRDEP}"
27do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
28do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
29do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
30do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
31
32python () {
33 if not d.getVar("PACKAGES"):
34 d.setVar("DEBIANRDEP", "")
35}
36
37python debian_package_name_hook () {
38 import glob, copy, stat, errno, re, pathlib, subprocess
39
40 pkgdest = d.getVar("PKGDEST")
41 packages = d.getVar('PACKAGES')
42 so_re = re.compile(r"lib.*\.so")
43
44 def socrunch(s):
45 s = s.lower().replace('_', '-')
46 m = re.match(r"^(.*)(.)\.so\.(.*)$", s)
47 if m is None:
48 return None
49 if m.group(2) in '0123456789':
50 bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
51 else:
52 bin = m.group(1) + m.group(2) + m.group(3)
53 dev = m.group(1) + m.group(2)
54 return (bin, dev)
55
56 def isexec(path):
57 try:
58 s = os.stat(path)
59 except (os.error, AttributeError):
60 return 0
61 return (s[stat.ST_MODE] & stat.S_IEXEC)
62
63 def add_rprovides(pkg, d):
64 newpkg = d.getVar('PKG:' + pkg)
65 if newpkg and newpkg != pkg:
66 provs = (d.getVar('RPROVIDES:' + pkg) or "").split()
67 if pkg not in provs:
68 d.appendVar('RPROVIDES:' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
69
70 def auto_libname(packages, orig_pkg):
71 p = lambda var: pathlib.PurePath(d.getVar(var))
72 libdirs = (p("base_libdir"), p("libdir"))
73 bindirs = (p("base_bindir"), p("base_sbindir"), p("bindir"), p("sbindir"))
74
75 sonames = []
76 has_bins = 0
77 has_libs = 0
78 for f in pkgfiles[orig_pkg]:
79 # This is .../packages-split/orig_pkg/
80 pkgpath = pathlib.PurePath(pkgdest, orig_pkg)
81 # Strip pkgpath off the full path to a file in the package, re-root
82 # so it is absolute, and then get the parent directory of the file.
83 path = pathlib.PurePath("/") / (pathlib.PurePath(f).relative_to(pkgpath).parent)
84 if path in bindirs:
85 has_bins = 1
86 if path in libdirs:
87 has_libs = 1
88 if so_re.match(os.path.basename(f)):
89 try:
90 cmd = [d.expand("${TARGET_PREFIX}objdump"), "-p", f]
91 output = subprocess.check_output(cmd).decode("utf-8")
92 for m in re.finditer(r"\s+SONAME\s+([^\s]+)", output):
93 if m.group(1) not in sonames:
94 sonames.append(m.group(1))
95 except subprocess.CalledProcessError:
96 pass
97 bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
98 soname = None
99 if len(sonames) == 1:
100 soname = sonames[0]
101 elif len(sonames) > 1:
102 lead = d.getVar('LEAD_SONAME')
103 if lead:
104 r = re.compile(lead)
105 filtered = []
106 for s in sonames:
107 if r.match(s):
108 filtered.append(s)
109 if len(filtered) == 1:
110 soname = filtered[0]
111 elif len(filtered) > 1:
112 bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
113 else:
114 bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
115 else:
116 bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
117
118 if has_libs and not has_bins and soname:
119 soname_result = socrunch(soname)
120 if soname_result:
121 (pkgname, devname) = soname_result
122 for pkg in packages.split():
123 if (d.getVar('PKG:' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME:' + pkg, False)):
124 add_rprovides(pkg, d)
125 continue
126 debian_pn = d.getVar('DEBIANNAME:' + pkg, False)
127 if debian_pn:
128 newpkg = debian_pn
129 elif pkg == orig_pkg:
130 newpkg = pkgname
131 else:
132 newpkg = pkg.replace(orig_pkg, devname, 1)
133 mlpre=d.getVar('MLPREFIX')
134 if mlpre:
135 if not newpkg.find(mlpre) == 0:
136 newpkg = mlpre + newpkg
137 if newpkg != pkg:
138 bb.note("debian: renaming %s to %s" % (pkg, newpkg))
139 d.setVar('PKG:' + pkg, newpkg)
140 add_rprovides(pkg, d)
141 else:
142 add_rprovides(orig_pkg, d)
143
144 # reversed sort is needed when some package is substring of another
145 # ie in ncurses we get without reverse sort:
146 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
147 # and later
148 # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
149 # so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
150 for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS') or "").split(), reverse=True):
151 auto_libname(packages, pkg)
152}
153
154EXPORT_FUNCTIONS package_name_hook
155
156DEBIAN_NAMES = "1"
diff --git a/meta/classes-global/devshell.bbclass b/meta/classes-global/devshell.bbclass
new file mode 100644
index 0000000000..03af56b7a2
--- /dev/null
+++ b/meta/classes-global/devshell.bbclass
@@ -0,0 +1,166 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit terminal
8
9DEVSHELL = "${SHELL}"
10
11PATH:prepend:task-devshell = "${COREBASE}/scripts/git-intercept:"
12
13python do_devshell () {
14 if d.getVarFlag("do_devshell", "manualfakeroot"):
15 d.prependVar("DEVSHELL", "pseudo ")
16 fakeenv = d.getVar("FAKEROOTENV").split()
17 for f in fakeenv:
18 k = f.split("=")
19 d.setVar(k[0], k[1])
20 d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
21 d.delVarFlag("do_devshell", "fakeroot")
22
23 oe_terminal(d.getVar('DEVSHELL'), 'OpenEmbedded Developer Shell', d)
24}
25
26addtask devshell after do_patch do_prepare_recipe_sysroot
27
28# The directory that the terminal starts in
29DEVSHELL_STARTDIR ?= "${S}"
30do_devshell[dirs] = "${DEVSHELL_STARTDIR}"
31do_devshell[nostamp] = "1"
32do_devshell[network] = "1"
33
34# devshell and fakeroot/pseudo need careful handling since only the final
35# command should run under fakeroot emulation, any X connection should
36# be done as the normal user. We therfore carefully construct the envionment
37# manually
38python () {
39 if d.getVarFlag("do_devshell", "fakeroot"):
40 # We need to signal our code that we want fakeroot however we
41 # can't manipulate the environment and variables here yet (see YOCTO #4795)
42 d.setVarFlag("do_devshell", "manualfakeroot", "1")
43 d.delVarFlag("do_devshell", "fakeroot")
44}
45
46def pydevshell(d):
47
48 import code
49 import select
50 import signal
51 import termios
52
53 m, s = os.openpty()
54 sname = os.ttyname(s)
55
56 def noechoicanon(fd):
57 old = termios.tcgetattr(fd)
58 old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
59 # &~ termios.ISIG
60 termios.tcsetattr(fd, termios.TCSADRAIN, old)
61
62 # No echo or buffering over the pty
63 noechoicanon(s)
64
65 pid = os.fork()
66 if pid:
67 os.close(m)
68 oe_terminal("oepydevshell-internal.py %s %d" % (sname, pid), 'OpenEmbedded Developer PyShell', d)
69 os._exit(0)
70 else:
71 os.close(s)
72
73 os.dup2(m, sys.stdin.fileno())
74 os.dup2(m, sys.stdout.fileno())
75 os.dup2(m, sys.stderr.fileno())
76
77 bb.utils.nonblockingfd(sys.stdout)
78 bb.utils.nonblockingfd(sys.stderr)
79 bb.utils.nonblockingfd(sys.stdin)
80
81 _context = {
82 "os": os,
83 "bb": bb,
84 "time": time,
85 "d": d,
86 }
87
88 ps1 = "pydevshell> "
89 ps2 = "... "
90 buf = []
91 more = False
92
93 i = code.InteractiveInterpreter(locals=_context)
94 print("OE PyShell (PN = %s)\n" % d.getVar("PN"))
95
96 def prompt(more):
97 if more:
98 prompt = ps2
99 else:
100 prompt = ps1
101 sys.stdout.write(prompt)
102 sys.stdout.flush()
103
104 # Restore Ctrl+C since bitbake masks this
105 def signal_handler(signal, frame):
106 raise KeyboardInterrupt
107 signal.signal(signal.SIGINT, signal_handler)
108
109 child = None
110
111 prompt(more)
112 while True:
113 try:
114 try:
115 (r, _, _) = select.select([sys.stdin], [], [], 1)
116 if not r:
117 continue
118 line = sys.stdin.readline().strip()
119 if not line:
120 prompt(more)
121 continue
122 except EOFError as e:
123 sys.stdout.write("\n")
124 sys.stdout.flush()
125 except (OSError, IOError) as e:
126 if e.errno == 11:
127 continue
128 if e.errno == 5:
129 return
130 raise
131 else:
132 if not child:
133 child = int(line)
134 continue
135 buf.append(line)
136 source = "\n".join(buf)
137 more = i.runsource(source, "<pyshell>")
138 if not more:
139 buf = []
140 sys.stderr.flush()
141 prompt(more)
142 except KeyboardInterrupt:
143 i.write("\nKeyboardInterrupt\n")
144 buf = []
145 more = False
146 prompt(more)
147 except SystemExit:
148 # Easiest way to ensure everything exits
149 os.kill(child, signal.SIGTERM)
150 break
151
152python do_pydevshell() {
153 import signal
154
155 try:
156 pydevshell(d)
157 except SystemExit:
158 # Stop the SIGTERM above causing an error exit code
159 return
160 finally:
161 return
162}
163addtask pydevshell after do_patch
164
165do_pydevshell[nostamp] = "1"
166do_pydevshell[network] = "1"
diff --git a/meta/classes-global/insane.bbclass b/meta/classes-global/insane.bbclass
new file mode 100644
index 0000000000..46ea41e271
--- /dev/null
+++ b/meta/classes-global/insane.bbclass
@@ -0,0 +1,1453 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# BB Class inspired by ebuild.sh
8#
9# This class will test files after installation for certain
10# security issues and other kind of issues.
11#
12# Checks we do:
13# -Check the ownership and permissions
14# -Check the RUNTIME path for the $TMPDIR
15# -Check if .la files wrongly point to workdir
16# -Check if .pc files wrongly point to workdir
17# -Check if packages contains .debug directories or .so files
18# where they should be in -dev or -dbg
19# -Check if config.log contains traces to broken autoconf tests
20# -Check invalid characters (non-utf8) on some package metadata
21# -Ensure that binaries in base_[bindir|sbindir|libdir] do not link
22# into exec_prefix
23# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
24# files under exec_prefix
25# -Check if the package name is upper case
26
27# Elect whether a given type of error is a warning or error, they may
28# have been set by other files.
29WARN_QA ?= " libdir xorg-driver-abi buildpaths \
30 textrel incompatible-license files-invalid \
31 infodir build-deps src-uri-bad symlink-to-sysroot multilib \
32 invalid-packageconfig host-user-contaminated uppercase-pn patch-fuzz \
33 mime mime-xdg unlisted-pkg-lics unhandled-features-check \
34 missing-update-alternatives native-last missing-ptest \
35 license-exists license-no-generic license-syntax license-format \
36 license-incompatible license-file-missing obsolete-license \
37 "
38ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
39 perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
40 split-strip packages-list pkgv-undefined var-undefined \
41 version-going-backwards expanded-d invalid-chars \
42 license-checksum dev-elf file-rdeps configure-unsafe \
43 configure-gettext perllocalpod shebang-size \
44 already-stripped installed-vs-shipped ldflags compile-host-path \
45 install-host-path pn-overrides unknown-configure-option \
46 useless-rpaths rpaths staticdev empty-dirs \
47 "
48# Add usrmerge QA check based on distro feature
49ERROR_QA:append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
50
51FAKEROOT_QA = "host-user-contaminated"
52FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
53enabled tests are listed here, the do_package_qa task will run under fakeroot."
54
55ALL_QA = "${WARN_QA} ${ERROR_QA}"
56
57UNKNOWN_CONFIGURE_OPT_IGNORE ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
58
59# This is a list of directories that are expected to be empty.
60QA_EMPTY_DIRS ?= " \
61 /dev/pts \
62 /media \
63 /proc \
64 /run \
65 /tmp \
66 ${localstatedir}/run \
67 ${localstatedir}/volatile \
68"
69# It is possible to specify why a directory is expected to be empty by defining
70# QA_EMPTY_DIRS_RECOMMENDATION:<path>, which will then be included in the error
71# message if the directory is not empty. If it is not specified for a directory,
72# then "but it is expected to be empty" will be used.
73
74def package_qa_clean_path(path, d, pkg=None):
75 """
76 Remove redundant paths from the path for display. If pkg isn't set then
77 TMPDIR is stripped, otherwise PKGDEST/pkg is stripped.
78 """
79 if pkg:
80 path = path.replace(os.path.join(d.getVar("PKGDEST"), pkg), "/")
81 return path.replace(d.getVar("TMPDIR"), "/").replace("//", "/")
82
83QAPATHTEST[shebang-size] = "package_qa_check_shebang_size"
84def package_qa_check_shebang_size(path, name, d, elf, messages):
85 import stat
86 if os.path.islink(path) or stat.S_ISFIFO(os.stat(path).st_mode) or elf:
87 return
88
89 try:
90 with open(path, 'rb') as f:
91 stanza = f.readline(130)
92 except IOError:
93 return
94
95 if stanza.startswith(b'#!'):
96 #Shebang not found
97 try:
98 stanza = stanza.decode("utf-8")
99 except UnicodeDecodeError:
100 #If it is not a text file, it is not a script
101 return
102
103 if len(stanza) > 129:
104 oe.qa.add_message(messages, "shebang-size", "%s: %s maximum shebang size exceeded, the maximum size is 128." % (name, package_qa_clean_path(path, d)))
105 return
106
107QAPATHTEST[libexec] = "package_qa_check_libexec"
108def package_qa_check_libexec(path,name, d, elf, messages):
109
110 # Skip the case where the default is explicitly /usr/libexec
111 libexec = d.getVar('libexecdir')
112 if libexec == "/usr/libexec":
113 return True
114
115 if 'libexec' in path.split(os.path.sep):
116 oe.qa.add_message(messages, "libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
117 return False
118
119 return True
120
121QAPATHTEST[rpaths] = "package_qa_check_rpath"
122def package_qa_check_rpath(file,name, d, elf, messages):
123 """
124 Check for dangerous RPATHs
125 """
126 if not elf:
127 return
128
129 if os.path.islink(file):
130 return
131
132 bad_dirs = [d.getVar('BASE_WORKDIR'), d.getVar('STAGING_DIR_TARGET')]
133
134 phdrs = elf.run_objdump("-p", d)
135
136 import re
137 rpath_re = re.compile(r"\s+RPATH\s+(.*)")
138 for line in phdrs.split("\n"):
139 m = rpath_re.match(line)
140 if m:
141 rpath = m.group(1)
142 for dir in bad_dirs:
143 if dir in rpath:
144 oe.qa.add_message(messages, "rpaths", "package %s contains bad RPATH %s in file %s" % (name, rpath, file))
145
146QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
147def package_qa_check_useless_rpaths(file, name, d, elf, messages):
148 """
149 Check for RPATHs that are useless but not dangerous
150 """
151 def rpath_eq(a, b):
152 return os.path.normpath(a) == os.path.normpath(b)
153
154 if not elf:
155 return
156
157 if os.path.islink(file):
158 return
159
160 libdir = d.getVar("libdir")
161 base_libdir = d.getVar("base_libdir")
162
163 phdrs = elf.run_objdump("-p", d)
164
165 import re
166 rpath_re = re.compile(r"\s+RPATH\s+(.*)")
167 for line in phdrs.split("\n"):
168 m = rpath_re.match(line)
169 if m:
170 rpath = m.group(1)
171 if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
172 # The dynamic linker searches both these places anyway. There is no point in
173 # looking there again.
174 oe.qa.add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d, name), rpath))
175
176QAPATHTEST[dev-so] = "package_qa_check_dev"
177def package_qa_check_dev(path, name, d, elf, messages):
178 """
179 Check for ".so" library symlinks in non-dev packages
180 """
181
182 if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
183 oe.qa.add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package %s contains symlink .so '%s'" % \
184 (name, package_qa_clean_path(path, d, name)))
185
186QAPATHTEST[dev-elf] = "package_qa_check_dev_elf"
187def package_qa_check_dev_elf(path, name, d, elf, messages):
188 """
189 Check that -dev doesn't contain real shared libraries. The test has to
190 check that the file is not a link and is an ELF object as some recipes
191 install link-time .so files that are linker scripts.
192 """
193 if name.endswith("-dev") and path.endswith(".so") and not os.path.islink(path) and elf:
194 oe.qa.add_message(messages, "dev-elf", "-dev package %s contains non-symlink .so '%s'" % \
195 (name, package_qa_clean_path(path, d, name)))
196
197QAPATHTEST[staticdev] = "package_qa_check_staticdev"
198def package_qa_check_staticdev(path, name, d, elf, messages):
199 """
200 Check for ".a" library in non-staticdev packages
201 There are a number of exceptions to this rule, -pic packages can contain
202 static libraries, the _nonshared.a belong with their -dev packages and
203 libgcc.a, libgcov.a will be skipped in their packages
204 """
205
206 if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a") and not '/usr/lib/debug-static/' in path and not '/.debug-static/' in path:
207 oe.qa.add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
208 (name, package_qa_clean_path(path,d, name)))
209
210QAPATHTEST[mime] = "package_qa_check_mime"
211def package_qa_check_mime(path, name, d, elf, messages):
212 """
213 Check if package installs mime types to /usr/share/mime/packages
214 while no inheriting mime.bbclass
215 """
216
217 if d.getVar("datadir") + "/mime/packages" in path and path.endswith('.xml') and not bb.data.inherits_class("mime", d):
218 oe.qa.add_message(messages, "mime", "package contains mime types but does not inherit mime: %s path '%s'" % \
219 (name, package_qa_clean_path(path,d)))
220
221QAPATHTEST[mime-xdg] = "package_qa_check_mime_xdg"
222def package_qa_check_mime_xdg(path, name, d, elf, messages):
223 """
224 Check if package installs desktop file containing MimeType and requires
225 mime-types.bbclass to create /usr/share/applications/mimeinfo.cache
226 """
227
228 if d.getVar("datadir") + "/applications" in path and path.endswith('.desktop') and not bb.data.inherits_class("mime-xdg", d):
229 mime_type_found = False
230 try:
231 with open(path, 'r') as f:
232 for line in f.read().split('\n'):
233 if 'MimeType' in line:
234 mime_type_found = True
235 break;
236 except:
237 # At least libreoffice installs symlinks with absolute paths that are dangling here.
238 # We could implement some magic but for few (one) recipes it is not worth the effort so just warn:
239 wstr = "%s cannot open %s - is it a symlink with absolute path?\n" % (name, package_qa_clean_path(path,d))
240 wstr += "Please check if (linked) file contains key 'MimeType'.\n"
241 pkgname = name
242 if name == d.getVar('PN'):
243 pkgname = '${PN}'
244 wstr += "If yes: add \'inhert mime-xdg\' and \'MIME_XDG_PACKAGES += \"%s\"\' / if no add \'INSANE_SKIP:%s += \"mime-xdg\"\' to recipe." % (pkgname, pkgname)
245 oe.qa.add_message(messages, "mime-xdg", wstr)
246 if mime_type_found:
247 oe.qa.add_message(messages, "mime-xdg", "package contains desktop file with key 'MimeType' but does not inhert mime-xdg: %s path '%s'" % \
248 (name, package_qa_clean_path(path,d)))
249
250def package_qa_check_libdir(d):
251 """
252 Check for wrong library installation paths. For instance, catch
253 recipes installing /lib/bar.so when ${base_libdir}="lib32" or
254 installing in /usr/lib64 when ${libdir}="/usr/lib"
255 """
256 import re
257
258 pkgdest = d.getVar('PKGDEST')
259 base_libdir = d.getVar("base_libdir") + os.sep
260 libdir = d.getVar("libdir") + os.sep
261 libexecdir = d.getVar("libexecdir") + os.sep
262 exec_prefix = d.getVar("exec_prefix") + os.sep
263
264 messages = []
265
266 # The re's are purposely fuzzy, as some there are some .so.x.y.z files
267 # that don't follow the standard naming convention. It checks later
268 # that they are actual ELF files
269 lib_re = re.compile(r"^/lib.+\.so(\..+)?$")
270 exec_re = re.compile(r"^%s.*/lib.+\.so(\..+)?$" % exec_prefix)
271
272 for root, dirs, files in os.walk(pkgdest):
273 if root == pkgdest:
274 # Skip subdirectories for any packages with libdir in INSANE_SKIP
275 skippackages = []
276 for package in dirs:
277 if 'libdir' in (d.getVar('INSANE_SKIP:' + package) or "").split():
278 bb.note("Package %s skipping libdir QA test" % (package))
279 skippackages.append(package)
280 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory' and package.endswith("-dbg"):
281 bb.note("Package %s skipping libdir QA test for PACKAGE_DEBUG_SPLIT_STYLE equals debug-file-directory" % (package))
282 skippackages.append(package)
283 for package in skippackages:
284 dirs.remove(package)
285 for file in files:
286 full_path = os.path.join(root, file)
287 rel_path = os.path.relpath(full_path, pkgdest)
288 if os.sep in rel_path:
289 package, rel_path = rel_path.split(os.sep, 1)
290 rel_path = os.sep + rel_path
291 if lib_re.match(rel_path):
292 if base_libdir not in rel_path:
293 # make sure it's an actual ELF file
294 elf = oe.qa.ELFFile(full_path)
295 try:
296 elf.open()
297 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
298 except (oe.qa.NotELFFileError):
299 pass
300 if exec_re.match(rel_path):
301 if libdir not in rel_path and libexecdir not in rel_path:
302 # make sure it's an actual ELF file
303 elf = oe.qa.ELFFile(full_path)
304 try:
305 elf.open()
306 messages.append("%s: found library in wrong location: %s" % (package, rel_path))
307 except (oe.qa.NotELFFileError):
308 pass
309
310 if messages:
311 oe.qa.handle_error("libdir", "\n".join(messages), d)
312
313QAPATHTEST[debug-files] = "package_qa_check_dbg"
314def package_qa_check_dbg(path, name, d, elf, messages):
315 """
316 Check for ".debug" files or directories outside of the dbg package
317 """
318
319 if not "-dbg" in name and not "-ptest" in name:
320 if '.debug' in path.split(os.path.sep):
321 oe.qa.add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
322 (name, package_qa_clean_path(path,d)))
323
324QAPATHTEST[arch] = "package_qa_check_arch"
325def package_qa_check_arch(path,name,d, elf, messages):
326 """
327 Check if archs are compatible
328 """
329 import re, oe.elf
330
331 if not elf:
332 return
333
334 target_os = d.getVar('HOST_OS')
335 target_arch = d.getVar('HOST_ARCH')
336 provides = d.getVar('PROVIDES')
337 bpn = d.getVar('BPN')
338
339 if target_arch == "allarch":
340 pn = d.getVar('PN')
341 oe.qa.add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
342 return
343
344 # FIXME: Cross package confuse this check, so just skip them
345 for s in ['cross', 'nativesdk', 'cross-canadian']:
346 if bb.data.inherits_class(s, d):
347 return
348
349 # avoid following links to /usr/bin (e.g. on udev builds)
350 # we will check the files pointed to anyway...
351 if os.path.islink(path):
352 return
353
354 #if this will throw an exception, then fix the dict above
355 (machine, osabi, abiversion, littleendian, bits) \
356 = oe.elf.machine_dict(d)[target_os][target_arch]
357
358 # Check the architecture and endiannes of the binary
359 is_32 = (("virtual/kernel" in provides) or bb.data.inherits_class("module", d)) and \
360 (target_os == "linux-gnux32" or target_os == "linux-muslx32" or \
361 target_os == "linux-gnu_ilp32" or re.match(r'mips64.*32', d.getVar('DEFAULTTUNE')))
362 is_bpf = (oe.qa.elf_machine_to_string(elf.machine()) == "BPF")
363 if not ((machine == elf.machine()) or is_32 or is_bpf):
364 oe.qa.add_message(messages, "arch", "Architecture did not match (%s, expected %s) in %s" % \
365 (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path, d, name)))
366 elif not ((bits == elf.abiSize()) or is_32 or is_bpf):
367 oe.qa.add_message(messages, "arch", "Bit size did not match (%d, expected %d) in %s" % \
368 (elf.abiSize(), bits, package_qa_clean_path(path, d, name)))
369 elif not ((littleendian == elf.isLittleEndian()) or is_bpf):
370 oe.qa.add_message(messages, "arch", "Endiannes did not match (%d, expected %d) in %s" % \
371 (elf.isLittleEndian(), littleendian, package_qa_clean_path(path,d, name)))
372
373QAPATHTEST[desktop] = "package_qa_check_desktop"
374def package_qa_check_desktop(path, name, d, elf, messages):
375 """
376 Run all desktop files through desktop-file-validate.
377 """
378 if path.endswith(".desktop"):
379 desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE'),'desktop-file-validate')
380 output = os.popen("%s %s" % (desktop_file_validate, path))
381 # This only produces output on errors
382 for l in output:
383 oe.qa.add_message(messages, "desktop", "Desktop file issue: " + l.strip())
384
385QAPATHTEST[textrel] = "package_qa_textrel"
386def package_qa_textrel(path, name, d, elf, messages):
387 """
388 Check if the binary contains relocations in .text
389 """
390
391 if not elf:
392 return
393
394 if os.path.islink(path):
395 return
396
397 phdrs = elf.run_objdump("-p", d)
398 sane = True
399
400 import re
401 textrel_re = re.compile(r"\s+TEXTREL\s+")
402 for line in phdrs.split("\n"):
403 if textrel_re.match(line):
404 sane = False
405 break
406
407 if not sane:
408 path = package_qa_clean_path(path, d, name)
409 oe.qa.add_message(messages, "textrel", "%s: ELF binary %s has relocations in .text" % (name, path))
410
411QAPATHTEST[ldflags] = "package_qa_hash_style"
412def package_qa_hash_style(path, name, d, elf, messages):
413 """
414 Check if the binary has the right hash style...
415 """
416
417 if not elf:
418 return
419
420 if os.path.islink(path):
421 return
422
423 gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS')
424 if not gnu_hash:
425 gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS')
426 if not gnu_hash:
427 return
428
429 sane = False
430 has_syms = False
431
432 phdrs = elf.run_objdump("-p", d)
433
434 # If this binary has symbols, we expect it to have GNU_HASH too.
435 for line in phdrs.split("\n"):
436 if "SYMTAB" in line:
437 has_syms = True
438 if "GNU_HASH" in line or "MIPS_XHASH" in line:
439 sane = True
440 if ("[mips32]" in line or "[mips64]" in line) and d.getVar('TCLIBC') == "musl":
441 sane = True
442 if has_syms and not sane:
443 path = package_qa_clean_path(path, d, name)
444 oe.qa.add_message(messages, "ldflags", "File %s in package %s doesn't have GNU_HASH (didn't pass LDFLAGS?)" % (path, name))
445
446
447QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
448def package_qa_check_buildpaths(path, name, d, elf, messages):
449 """
450 Check for build paths inside target files and error if paths are not
451 explicitly ignored.
452 """
453 import stat
454
455 # Ignore symlinks/devs/fifos
456 mode = os.lstat(path).st_mode
457 if stat.S_ISLNK(mode) or stat.S_ISBLK(mode) or stat.S_ISFIFO(mode) or stat.S_ISCHR(mode) or stat.S_ISSOCK(mode):
458 return
459
460 tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
461 with open(path, 'rb') as f:
462 file_content = f.read()
463 if tmpdir in file_content:
464 trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
465 oe.qa.add_message(messages, "buildpaths", "File %s in package %s contains reference to TMPDIR" % (trimmed, name))
466
467
468QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
469def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
470 """
471 Check that all packages containing Xorg drivers have ABI dependencies
472 """
473
474 # Skip dev, dbg or nativesdk packages
475 if name.endswith("-dev") or name.endswith("-dbg") or name.startswith("nativesdk-"):
476 return
477
478 driverdir = d.expand("${libdir}/xorg/modules/drivers/")
479 if driverdir in path and path.endswith(".so"):
480 mlprefix = d.getVar('MLPREFIX') or ''
481 for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + name) or ""):
482 if rdep.startswith("%sxorg-abi-" % mlprefix):
483 return
484 oe.qa.add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
485
486QAPATHTEST[infodir] = "package_qa_check_infodir"
487def package_qa_check_infodir(path, name, d, elf, messages):
488 """
489 Check that /usr/share/info/dir isn't shipped in a particular package
490 """
491 infodir = d.expand("${infodir}/dir")
492
493 if infodir in path:
494 oe.qa.add_message(messages, "infodir", "The /usr/share/info/dir file is not meant to be shipped in a particular package.")
495
496QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
497def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
498 """
499 Check that the package doesn't contain any absolute symlinks to the sysroot.
500 """
501 if os.path.islink(path):
502 target = os.readlink(path)
503 if os.path.isabs(target):
504 tmpdir = d.getVar('TMPDIR')
505 if target.startswith(tmpdir):
506 trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
507 oe.qa.add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
508
509# Check license variables
510do_populate_lic[postfuncs] += "populate_lic_qa_checksum"
511python populate_lic_qa_checksum() {
512 """
513 Check for changes in the license files.
514 """
515
516 lic_files = d.getVar('LIC_FILES_CHKSUM') or ''
517 lic = d.getVar('LICENSE')
518 pn = d.getVar('PN')
519
520 if lic == "CLOSED":
521 return
522
523 if not lic_files and d.getVar('SRC_URI'):
524 oe.qa.handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
525
526 srcdir = d.getVar('S')
527 corebase_licensefile = d.getVar('COREBASE') + "/LICENSE"
528 for url in lic_files.split():
529 try:
530 (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
531 except bb.fetch.MalformedUrl:
532 oe.qa.handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
533 continue
534 srclicfile = os.path.join(srcdir, path)
535 if not os.path.isfile(srclicfile):
536 oe.qa.handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
537 continue
538
539 if (srclicfile == corebase_licensefile):
540 bb.warn("${COREBASE}/LICENSE is not a valid license file, please use '${COMMON_LICENSE_DIR}/MIT' for a MIT License file in LIC_FILES_CHKSUM. This will become an error in the future")
541
542 recipemd5 = parm.get('md5', '')
543 beginline, endline = 0, 0
544 if 'beginline' in parm:
545 beginline = int(parm['beginline'])
546 if 'endline' in parm:
547 endline = int(parm['endline'])
548
549 if (not beginline) and (not endline):
550 md5chksum = bb.utils.md5_file(srclicfile)
551 with open(srclicfile, 'r', errors='replace') as f:
552 license = f.read().splitlines()
553 else:
554 with open(srclicfile, 'rb') as f:
555 import hashlib
556 lineno = 0
557 license = []
558 m = hashlib.new('MD5', usedforsecurity=False)
559 for line in f:
560 lineno += 1
561 if (lineno >= beginline):
562 if ((lineno <= endline) or not endline):
563 m.update(line)
564 license.append(line.decode('utf-8', errors='replace').rstrip())
565 else:
566 break
567 md5chksum = m.hexdigest()
568 if recipemd5 == md5chksum:
569 bb.note (pn + ": md5 checksum matched for ", url)
570 else:
571 if recipemd5:
572 msg = pn + ": The LIC_FILES_CHKSUM does not match for " + url
573 msg = msg + "\n" + pn + ": The new md5 checksum is " + md5chksum
574 max_lines = int(d.getVar('QA_MAX_LICENSE_LINES') or 20)
575 if not license or license[-1] != '':
576 # Ensure that our license text ends with a line break
577 # (will be added with join() below).
578 license.append('')
579 remove = len(license) - max_lines
580 if remove > 0:
581 start = max_lines // 2
582 end = start + remove - 1
583 del license[start:end]
584 license.insert(start, '...')
585 msg = msg + "\n" + pn + ": Here is the selected license text:" + \
586 "\n" + \
587 "{:v^70}".format(" beginline=%d " % beginline if beginline else "") + \
588 "\n" + "\n".join(license) + \
589 "{:^^70}".format(" endline=%d " % endline if endline else "")
590 if beginline:
591 if endline:
592 srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
593 else:
594 srcfiledesc = "%s (beginning on line %d)" % (srclicfile, beginline)
595 elif endline:
596 srcfiledesc = "%s (ending on line %d)" % (srclicfile, endline)
597 else:
598 srcfiledesc = srclicfile
599 msg = msg + "\n" + pn + ": Check if the license information has changed in %s to verify that the LICENSE value \"%s\" remains valid" % (srcfiledesc, lic)
600
601 else:
602 msg = pn + ": LIC_FILES_CHKSUM is not specified for " + url
603 msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
604 oe.qa.handle_error("license-checksum", msg, d)
605
606 oe.qa.exit_if_errors(d)
607}
608
609def qa_check_staged(path,d):
610 """
611 Check staged la and pc files for common problems like references to the work
612 directory.
613
614 As this is run after every stage we should be able to find the one
615 responsible for the errors easily even if we look at every .pc and .la file.
616 """
617
618 tmpdir = d.getVar('TMPDIR')
619 workdir = os.path.join(tmpdir, "work")
620 recipesysroot = d.getVar("RECIPE_SYSROOT")
621
622 if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
623 pkgconfigcheck = workdir
624 else:
625 pkgconfigcheck = tmpdir
626
627 skip = (d.getVar('INSANE_SKIP') or "").split()
628 skip_la = False
629 if 'la' in skip:
630 bb.note("Recipe %s skipping qa checking: la" % d.getVar('PN'))
631 skip_la = True
632
633 skip_pkgconfig = False
634 if 'pkgconfig' in skip:
635 bb.note("Recipe %s skipping qa checking: pkgconfig" % d.getVar('PN'))
636 skip_pkgconfig = True
637
638 skip_shebang_size = False
639 if 'shebang-size' in skip:
640 bb.note("Recipe %s skipping qa checkking: shebang-size" % d.getVar('PN'))
641 skip_shebang_size = True
642
643 # find all .la and .pc files
644 # read the content
645 # and check for stuff that looks wrong
646 for root, dirs, files in os.walk(path):
647 for file in files:
648 path = os.path.join(root,file)
649 if file.endswith(".la") and not skip_la:
650 with open(path) as f:
651 file_content = f.read()
652 file_content = file_content.replace(recipesysroot, "")
653 if workdir in file_content:
654 error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
655 oe.qa.handle_error("la", error_msg, d)
656 elif file.endswith(".pc") and not skip_pkgconfig:
657 with open(path) as f:
658 file_content = f.read()
659 file_content = file_content.replace(recipesysroot, "")
660 if pkgconfigcheck in file_content:
661 error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
662 oe.qa.handle_error("pkgconfig", error_msg, d)
663
664 if not skip_shebang_size:
665 errors = {}
666 package_qa_check_shebang_size(path, "", d, None, errors)
667 for e in errors:
668 oe.qa.handle_error(e, errors[e], d)
669
670
671# Run all package-wide warnfuncs and errorfuncs
672def package_qa_package(warnfuncs, errorfuncs, package, d):
673 warnings = {}
674 errors = {}
675
676 for func in warnfuncs:
677 func(package, d, warnings)
678 for func in errorfuncs:
679 func(package, d, errors)
680
681 for w in warnings:
682 oe.qa.handle_error(w, warnings[w], d)
683 for e in errors:
684 oe.qa.handle_error(e, errors[e], d)
685
686 return len(errors) == 0
687
688# Run all recipe-wide warnfuncs and errorfuncs
689def package_qa_recipe(warnfuncs, errorfuncs, pn, d):
690 warnings = {}
691 errors = {}
692
693 for func in warnfuncs:
694 func(pn, d, warnings)
695 for func in errorfuncs:
696 func(pn, d, errors)
697
698 for w in warnings:
699 oe.qa.handle_error(w, warnings[w], d)
700 for e in errors:
701 oe.qa.handle_error(e, errors[e], d)
702
703 return len(errors) == 0
704
705def prepopulate_objdump_p(elf, d):
706 output = elf.run_objdump("-p", d)
707 return (elf.name, output)
708
709# Walk over all files in a directory and call func
710def package_qa_walk(warnfuncs, errorfuncs, package, d):
711 #if this will throw an exception, then fix the dict above
712 target_os = d.getVar('HOST_OS')
713 target_arch = d.getVar('HOST_ARCH')
714
715 warnings = {}
716 errors = {}
717 elves = {}
718 for path in pkgfiles[package]:
719 elf = None
720 if os.path.isfile(path):
721 elf = oe.qa.ELFFile(path)
722 try:
723 elf.open()
724 elf.close()
725 except oe.qa.NotELFFileError:
726 elf = None
727 if elf:
728 elves[path] = elf
729
730 results = oe.utils.multiprocess_launch(prepopulate_objdump_p, elves.values(), d, extraargs=(d,))
731 for item in results:
732 elves[item[0]].set_objdump("-p", item[1])
733
734 for path in pkgfiles[package]:
735 if path in elves:
736 elves[path].open()
737 for func in warnfuncs:
738 func(path, package, d, elves.get(path), warnings)
739 for func in errorfuncs:
740 func(path, package, d, elves.get(path), errors)
741 if path in elves:
742 elves[path].close()
743
744 for w in warnings:
745 oe.qa.handle_error(w, warnings[w], d)
746 for e in errors:
747 oe.qa.handle_error(e, errors[e], d)
748
749def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
750 # Don't do this check for kernel/module recipes, there aren't too many debug/development
751 # packages and you can get false positives e.g. on kernel-module-lirc-dev
752 if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
753 return
754
755 if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg:
756 localdata = bb.data.createCopy(d)
757 localdata.setVar('OVERRIDES', localdata.getVar('OVERRIDES') + ':' + pkg)
758
759 # Now check the RDEPENDS
760 rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS') or "")
761
762 # Now do the sanity check!!!
763 if "build-deps" not in skip:
764 for rdepend in rdepends:
765 if "-dbg" in rdepend and "debug-deps" not in skip:
766 error_msg = "%s rdepends on %s" % (pkg,rdepend)
767 oe.qa.handle_error("debug-deps", error_msg, d)
768 if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
769 error_msg = "%s rdepends on %s" % (pkg, rdepend)
770 oe.qa.handle_error("dev-deps", error_msg, d)
771 if rdepend not in packages:
772 rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
773 if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
774 continue
775 if not rdep_data or not 'PN' in rdep_data:
776 pkgdata_dir = d.getVar("PKGDATA_DIR")
777 try:
778 possibles = os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdepend))
779 except OSError:
780 possibles = []
781 for p in possibles:
782 rdep_data = oe.packagedata.read_subpkgdata(p, d)
783 if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
784 break
785 if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
786 continue
787 if rdep_data and 'PN' in rdep_data:
788 error_msg = "%s rdepends on %s, but it isn't a build dependency, missing %s in DEPENDS or PACKAGECONFIG?" % (pkg, rdepend, rdep_data['PN'])
789 else:
790 error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
791 oe.qa.handle_error("build-deps", error_msg, d)
792
793 if "file-rdeps" not in skip:
794 ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
795 if bb.data.inherits_class('nativesdk', d):
796 ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl', 'perl'])
797 # For Saving the FILERDEPENDS
798 filerdepends = {}
799 rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
800 for key in rdep_data:
801 if key.startswith("FILERDEPENDS:"):
802 for subkey in bb.utils.explode_deps(rdep_data[key]):
803 if subkey not in ignored_file_rdeps and \
804 not subkey.startswith('perl('):
805 # We already know it starts with FILERDEPENDS_
806 filerdepends[subkey] = key[13:]
807
808 if filerdepends:
809 done = rdepends[:]
810 # Add the rprovides of itself
811 if pkg not in done:
812 done.insert(0, pkg)
813
814 # The python is not a package, but python-core provides it, so
815 # skip checking /usr/bin/python if python is in the rdeps, in
816 # case there is a RDEPENDS:pkg = "python" in the recipe.
817 for py in [ d.getVar('MLPREFIX') + "python", "python" ]:
818 if py in done:
819 filerdepends.pop("/usr/bin/python",None)
820 done.remove(py)
821 for rdep in done:
822 # The file dependencies may contain package names, e.g.,
823 # perl
824 filerdepends.pop(rdep,None)
825
826 # For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO
827 rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
828 for key in rdep_data:
829 if key.startswith("FILERPROVIDES:") or key.startswith("RPROVIDES:"):
830 for subkey in bb.utils.explode_deps(rdep_data[key]):
831 filerdepends.pop(subkey,None)
832 # Add the files list to the rprovides
833 if key.startswith("FILES_INFO:"):
834 # Use eval() to make it as a dict
835 for subkey in eval(rdep_data[key]):
836 filerdepends.pop(subkey,None)
837 if not filerdepends:
838 # Break if all the file rdepends are met
839 break
840 if filerdepends:
841 for key in filerdepends:
842 error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS:%s?" % \
843 (filerdepends[key].replace(":%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
844 oe.qa.handle_error("file-rdeps", error_msg, d)
845package_qa_check_rdepends[vardepsexclude] = "OVERRIDES"
846
847def package_qa_check_deps(pkg, pkgdest, d):
848
849 localdata = bb.data.createCopy(d)
850 localdata.setVar('OVERRIDES', pkg)
851
852 def check_valid_deps(var):
853 try:
854 rvar = bb.utils.explode_dep_versions2(localdata.getVar(var) or "")
855 except ValueError as e:
856 bb.fatal("%s:%s: %s" % (var, pkg, e))
857 for dep in rvar:
858 for v in rvar[dep]:
859 if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
860 error_msg = "%s:%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
861 oe.qa.handle_error("dep-cmp", error_msg, d)
862
863 check_valid_deps('RDEPENDS')
864 check_valid_deps('RRECOMMENDS')
865 check_valid_deps('RSUGGESTS')
866 check_valid_deps('RPROVIDES')
867 check_valid_deps('RREPLACES')
868 check_valid_deps('RCONFLICTS')
869
870QAPKGTEST[usrmerge] = "package_qa_check_usrmerge"
871def package_qa_check_usrmerge(pkg, d, messages):
872
873 pkgdest = d.getVar('PKGDEST')
874 pkg_dir = pkgdest + os.sep + pkg + os.sep
875 merged_dirs = ['bin', 'sbin', 'lib'] + d.getVar('MULTILIB_VARIANTS').split()
876 for f in merged_dirs:
877 if os.path.exists(pkg_dir + f) and not os.path.islink(pkg_dir + f):
878 msg = "%s package is not obeying usrmerge distro feature. /%s should be relocated to /usr." % (pkg, f)
879 oe.qa.add_message(messages, "usrmerge", msg)
880 return False
881 return True
882
883QAPKGTEST[perllocalpod] = "package_qa_check_perllocalpod"
884def package_qa_check_perllocalpod(pkg, d, messages):
885 """
886 Check that the recipe didn't ship a perlocal.pod file, which shouldn't be
887 installed in a distribution package. cpan.bbclass sets NO_PERLLOCAL=1 to
888 handle this for most recipes.
889 """
890 import glob
891 pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
892 podpath = oe.path.join(pkgd, d.getVar("libdir"), "perl*", "*", "*", "perllocal.pod")
893
894 matches = glob.glob(podpath)
895 if matches:
896 matches = [package_qa_clean_path(path, d, pkg) for path in matches]
897 msg = "%s contains perllocal.pod (%s), should not be installed" % (pkg, " ".join(matches))
898 oe.qa.add_message(messages, "perllocalpod", msg)
899
900QAPKGTEST[expanded-d] = "package_qa_check_expanded_d"
901def package_qa_check_expanded_d(package, d, messages):
902 """
903 Check for the expanded D (${D}) value in pkg_* and FILES
904 variables, warn the user to use it correctly.
905 """
906 sane = True
907 expanded_d = d.getVar('D')
908
909 for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
910 bbvar = d.getVar(var + ":" + package) or ""
911 if expanded_d in bbvar:
912 if var == 'FILES':
913 oe.qa.add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package)
914 sane = False
915 else:
916 oe.qa.add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package))
917 sane = False
918 return sane
919
920QAPKGTEST[unlisted-pkg-lics] = "package_qa_check_unlisted_pkg_lics"
921def package_qa_check_unlisted_pkg_lics(package, d, messages):
922 """
923 Check that all licenses for a package are among the licenses for the recipe.
924 """
925 pkg_lics = d.getVar('LICENSE:' + package)
926 if not pkg_lics:
927 return True
928
929 recipe_lics_set = oe.license.list_licenses(d.getVar('LICENSE'))
930 package_lics = oe.license.list_licenses(pkg_lics)
931 unlisted = package_lics - recipe_lics_set
932 if unlisted:
933 oe.qa.add_message(messages, "unlisted-pkg-lics",
934 "LICENSE:%s includes licenses (%s) that are not "
935 "listed in LICENSE" % (package, ' '.join(unlisted)))
936 return False
937 obsolete = set(oe.license.obsolete_license_list()) & package_lics - recipe_lics_set
938 if obsolete:
939 oe.qa.add_message(messages, "obsolete-license",
940 "LICENSE:%s includes obsolete licenses %s" % (package, ' '.join(obsolete)))
941 return False
942 return True
943
944QAPKGTEST[empty-dirs] = "package_qa_check_empty_dirs"
945def package_qa_check_empty_dirs(pkg, d, messages):
946 """
947 Check for the existence of files in directories that are expected to be
948 empty.
949 """
950
951 pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
952 for dir in (d.getVar('QA_EMPTY_DIRS') or "").split():
953 empty_dir = oe.path.join(pkgd, dir)
954 if os.path.exists(empty_dir) and os.listdir(empty_dir):
955 recommendation = (d.getVar('QA_EMPTY_DIRS_RECOMMENDATION:' + dir) or
956 "but it is expected to be empty")
957 msg = "%s installs files in %s, %s" % (pkg, dir, recommendation)
958 oe.qa.add_message(messages, "empty-dirs", msg)
959
960def package_qa_check_encoding(keys, encode, d):
961 def check_encoding(key, enc):
962 sane = True
963 value = d.getVar(key)
964 if value:
965 try:
966 s = value.encode(enc)
967 except UnicodeDecodeError as e:
968 error_msg = "%s has non %s characters" % (key,enc)
969 sane = False
970 oe.qa.handle_error("invalid-chars", error_msg, d)
971 return sane
972
973 for key in keys:
974 sane = check_encoding(key, encode)
975 if not sane:
976 break
977
978HOST_USER_UID := "${@os.getuid()}"
979HOST_USER_GID := "${@os.getgid()}"
980
981QAPATHTEST[host-user-contaminated] = "package_qa_check_host_user"
982def package_qa_check_host_user(path, name, d, elf, messages):
983 """Check for paths outside of /home which are owned by the user running bitbake."""
984
985 if not os.path.lexists(path):
986 return
987
988 dest = d.getVar('PKGDEST')
989 pn = d.getVar('PN')
990 home = os.path.join(dest, name, 'home')
991 if path == home or path.startswith(home + os.sep):
992 return
993
994 try:
995 stat = os.lstat(path)
996 except OSError as exc:
997 import errno
998 if exc.errno != errno.ENOENT:
999 raise
1000 else:
1001 check_uid = int(d.getVar('HOST_USER_UID'))
1002 if stat.st_uid == check_uid:
1003 oe.qa.add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_uid))
1004 return False
1005
1006 check_gid = int(d.getVar('HOST_USER_GID'))
1007 if stat.st_gid == check_gid:
1008 oe.qa.add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_gid))
1009 return False
1010 return True
1011
1012QARECIPETEST[unhandled-features-check] = "package_qa_check_unhandled_features_check"
1013def package_qa_check_unhandled_features_check(pn, d, messages):
1014 if not bb.data.inherits_class('features_check', d):
1015 var_set = False
1016 for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
1017 for var in ['ANY_OF_' + kind + '_FEATURES', 'REQUIRED_' + kind + '_FEATURES', 'CONFLICT_' + kind + '_FEATURES']:
1018 if d.getVar(var) is not None or d.hasOverrides(var):
1019 var_set = True
1020 if var_set:
1021 oe.qa.handle_error("unhandled-features-check", "%s: recipe doesn't inherit features_check" % pn, d)
1022
1023QARECIPETEST[missing-update-alternatives] = "package_qa_check_missing_update_alternatives"
1024def package_qa_check_missing_update_alternatives(pn, d, messages):
1025 # Look at all packages and find out if any of those sets ALTERNATIVE variable
1026 # without inheriting update-alternatives class
1027 for pkg in (d.getVar('PACKAGES') or '').split():
1028 if d.getVar('ALTERNATIVE:%s' % pkg) and not bb.data.inherits_class('update-alternatives', d):
1029 oe.qa.handle_error("missing-update-alternatives", "%s: recipe defines ALTERNATIVE:%s but doesn't inherit update-alternatives. This might fail during do_rootfs later!" % (pn, pkg), d)
1030
1031# The PACKAGE FUNC to scan each package
1032python do_package_qa () {
1033 import subprocess
1034 import oe.packagedata
1035
1036 bb.note("DO PACKAGE QA")
1037
1038 main_lic = d.getVar('LICENSE')
1039
1040 # Check for obsolete license references in main LICENSE (packages are checked below for any changes)
1041 main_licenses = oe.license.list_licenses(d.getVar('LICENSE'))
1042 obsolete = set(oe.license.obsolete_license_list()) & main_licenses
1043 if obsolete:
1044 oe.qa.handle_error("obsolete-license", "Recipe LICENSE includes obsolete licenses %s" % ' '.join(obsolete), d)
1045
1046 bb.build.exec_func("read_subpackage_metadata", d)
1047
1048 # Check non UTF-8 characters on recipe's metadata
1049 package_qa_check_encoding(['DESCRIPTION', 'SUMMARY', 'LICENSE', 'SECTION'], 'utf-8', d)
1050
1051 logdir = d.getVar('T')
1052 pn = d.getVar('PN')
1053
1054 # Scan the packages...
1055 pkgdest = d.getVar('PKGDEST')
1056 packages = set((d.getVar('PACKAGES') or '').split())
1057
1058 global pkgfiles
1059 pkgfiles = {}
1060 for pkg in packages:
1061 pkgfiles[pkg] = []
1062 pkgdir = os.path.join(pkgdest, pkg)
1063 for walkroot, dirs, files in os.walk(pkgdir):
1064 # Don't walk into top-level CONTROL or DEBIAN directories as these
1065 # are temporary directories created by do_package.
1066 if walkroot == pkgdir:
1067 for control in ("CONTROL", "DEBIAN"):
1068 if control in dirs:
1069 dirs.remove(control)
1070 for file in files:
1071 pkgfiles[pkg].append(os.path.join(walkroot, file))
1072
1073 # no packages should be scanned
1074 if not packages:
1075 return
1076
1077 import re
1078 # The package name matches the [a-z0-9.+-]+ regular expression
1079 pkgname_pattern = re.compile(r"^[a-z0-9.+-]+$")
1080
1081 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
1082 taskdeps = set()
1083 for dep in taskdepdata:
1084 taskdeps.add(taskdepdata[dep][0])
1085
1086 def parse_test_matrix(matrix_name):
1087 testmatrix = d.getVarFlags(matrix_name) or {}
1088 g = globals()
1089 warnchecks = []
1090 for w in (d.getVar("WARN_QA") or "").split():
1091 if w in skip:
1092 continue
1093 if w in testmatrix and testmatrix[w] in g:
1094 warnchecks.append(g[testmatrix[w]])
1095
1096 errorchecks = []
1097 for e in (d.getVar("ERROR_QA") or "").split():
1098 if e in skip:
1099 continue
1100 if e in testmatrix and testmatrix[e] in g:
1101 errorchecks.append(g[testmatrix[e]])
1102 return warnchecks, errorchecks
1103
1104 for package in packages:
1105 skip = set((d.getVar('INSANE_SKIP') or "").split() +
1106 (d.getVar('INSANE_SKIP:' + package) or "").split())
1107 if skip:
1108 bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
1109
1110 bb.note("Checking Package: %s" % package)
1111 # Check package name
1112 if not pkgname_pattern.match(package):
1113 oe.qa.handle_error("pkgname",
1114 "%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
1115
1116 warn_checks, error_checks = parse_test_matrix("QAPATHTEST")
1117 package_qa_walk(warn_checks, error_checks, package, d)
1118
1119 warn_checks, error_checks = parse_test_matrix("QAPKGTEST")
1120 package_qa_package(warn_checks, error_checks, package, d)
1121
1122 package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d)
1123 package_qa_check_deps(package, pkgdest, d)
1124
1125 warn_checks, error_checks = parse_test_matrix("QARECIPETEST")
1126 package_qa_recipe(warn_checks, error_checks, pn, d)
1127
1128 if 'libdir' in d.getVar("ALL_QA").split():
1129 package_qa_check_libdir(d)
1130
1131 oe.qa.exit_if_errors(d)
1132}
1133
1134# binutils is used for most checks, so need to set as dependency
1135# POPULATESYSROOTDEPS is defined in staging class.
1136do_package_qa[depends] += "${POPULATESYSROOTDEPS}"
1137do_package_qa[vardeps] = "${@bb.utils.contains('ERROR_QA', 'empty-dirs', 'QA_EMPTY_DIRS', '', d)}"
1138do_package_qa[vardepsexclude] = "BB_TASKDEPDATA"
1139do_package_qa[rdeptask] = "do_packagedata"
1140addtask do_package_qa after do_packagedata do_package before do_build
1141
1142# Add the package specific INSANE_SKIPs to the sstate dependencies
1143python() {
1144 pkgs = (d.getVar('PACKAGES') or '').split()
1145 for pkg in pkgs:
1146 d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP:{}".format(pkg))
1147}
1148
1149SSTATETASKS += "do_package_qa"
1150do_package_qa[sstate-inputdirs] = ""
1151do_package_qa[sstate-outputdirs] = ""
1152python do_package_qa_setscene () {
1153 sstate_setscene(d)
1154}
1155addtask do_package_qa_setscene
1156
1157python do_qa_sysroot() {
1158 bb.note("QA checking do_populate_sysroot")
1159 sysroot_destdir = d.expand('${SYSROOT_DESTDIR}')
1160 for sysroot_dir in d.expand('${SYSROOT_DIRS}').split():
1161 qa_check_staged(sysroot_destdir + sysroot_dir, d)
1162 oe.qa.exit_with_message_if_errors("do_populate_sysroot for this recipe installed files with QA issues", d)
1163}
1164do_populate_sysroot[postfuncs] += "do_qa_sysroot"
1165
1166python do_qa_patch() {
1167 import subprocess
1168
1169 ###########################################################################
1170 # Check patch.log for fuzz warnings
1171 #
1172 # Further information on why we check for patch fuzz warnings:
1173 # http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html
1174 # https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450
1175 ###########################################################################
1176
1177 logdir = d.getVar('T')
1178 patchlog = os.path.join(logdir,"log.do_patch")
1179
1180 if os.path.exists(patchlog):
1181 fuzzheader = '--- Patch fuzz start ---'
1182 fuzzfooter = '--- Patch fuzz end ---'
1183 statement = "grep -e '%s' %s > /dev/null" % (fuzzheader, patchlog)
1184 if subprocess.call(statement, shell=True) == 0:
1185 msg = "Fuzz detected:\n\n"
1186 fuzzmsg = ""
1187 inFuzzInfo = False
1188 f = open(patchlog, "r")
1189 for line in f:
1190 if fuzzheader in line:
1191 inFuzzInfo = True
1192 fuzzmsg = ""
1193 elif fuzzfooter in line:
1194 fuzzmsg = fuzzmsg.replace('\n\n', '\n')
1195 msg += fuzzmsg
1196 msg += "\n"
1197 inFuzzInfo = False
1198 elif inFuzzInfo and not 'Now at patch' in line:
1199 fuzzmsg += line
1200 f.close()
1201 msg += "The context lines in the patches can be updated with devtool:\n"
1202 msg += "\n"
1203 msg += " devtool modify %s\n" % d.getVar('PN')
1204 msg += " devtool finish --force-patch-refresh %s <layer_path>\n\n" % d.getVar('PN')
1205 msg += "Don't forget to review changes done by devtool!\n"
1206 if bb.utils.filter('ERROR_QA', 'patch-fuzz', d):
1207 bb.error(msg)
1208 elif bb.utils.filter('WARN_QA', 'patch-fuzz', d):
1209 bb.warn(msg)
1210 msg = "Patch log indicates that patches do not apply cleanly."
1211 oe.qa.handle_error("patch-fuzz", msg, d)
1212
1213 # Check if the patch contains a correctly formatted and spelled Upstream-Status
1214 import re
1215 from oe import patch
1216
1217 for url in patch.src_patches(d):
1218 (_, _, fullpath, _, _, _) = bb.fetch.decodeurl(url)
1219
1220 # skip patches not in oe-core
1221 if '/meta/' not in fullpath:
1222 continue
1223
1224 kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE)
1225 strict_status_re = re.compile(r"^Upstream-Status: (Pending|Submitted|Denied|Accepted|Inappropriate|Backport|Inactive-Upstream)( .+)?$", re.MULTILINE)
1226 guidelines = "https://www.openembedded.org/wiki/Commit_Patch_Message_Guidelines#Patch_Header_Recommendations:_Upstream-Status"
1227
1228 with open(fullpath, encoding='utf-8', errors='ignore') as f:
1229 file_content = f.read()
1230 match_kinda = kinda_status_re.search(file_content)
1231 match_strict = strict_status_re.search(file_content)
1232
1233 if not match_strict:
1234 if match_kinda:
1235 bb.error("Malformed Upstream-Status in patch\n%s\nPlease correct according to %s :\n%s" % (fullpath, guidelines, match_kinda.group(0)))
1236 else:
1237 bb.error("Missing Upstream-Status in patch\n%s\nPlease add according to %s ." % (fullpath, guidelines))
1238}
1239
1240python do_qa_configure() {
1241 import subprocess
1242
1243 ###########################################################################
1244 # Check config.log for cross compile issues
1245 ###########################################################################
1246
1247 configs = []
1248 workdir = d.getVar('WORKDIR')
1249
1250 skip = (d.getVar('INSANE_SKIP') or "").split()
1251 skip_configure_unsafe = False
1252 if 'configure-unsafe' in skip:
1253 bb.note("Recipe %s skipping qa checking: configure-unsafe" % d.getVar('PN'))
1254 skip_configure_unsafe = True
1255
1256 if bb.data.inherits_class('autotools', d) and not skip_configure_unsafe:
1257 bb.note("Checking autotools environment for common misconfiguration")
1258 for root, dirs, files in os.walk(workdir):
1259 statement = "grep -q -F -e 'is unsafe for cross-compilation' %s" % \
1260 os.path.join(root,"config.log")
1261 if "config.log" in files:
1262 if subprocess.call(statement, shell=True) == 0:
1263 error_msg = """This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
1264Rerun configure task after fixing this."""
1265 oe.qa.handle_error("configure-unsafe", error_msg, d)
1266
1267 if "configure.ac" in files:
1268 configs.append(os.path.join(root,"configure.ac"))
1269 if "configure.in" in files:
1270 configs.append(os.path.join(root, "configure.in"))
1271
1272 ###########################################################################
1273 # Check gettext configuration and dependencies are correct
1274 ###########################################################################
1275
1276 skip_configure_gettext = False
1277 if 'configure-gettext' in skip:
1278 bb.note("Recipe %s skipping qa checking: configure-gettext" % d.getVar('PN'))
1279 skip_configure_gettext = True
1280
1281 cnf = d.getVar('EXTRA_OECONF') or ""
1282 if not ("gettext" in d.getVar('P') or "gcc-runtime" in d.getVar('P') or \
1283 "--disable-nls" in cnf or skip_configure_gettext):
1284 ml = d.getVar("MLPREFIX") or ""
1285 if bb.data.inherits_class('cross-canadian', d):
1286 gt = "nativesdk-gettext"
1287 else:
1288 gt = "gettext-native"
1289 deps = bb.utils.explode_deps(d.getVar('DEPENDS') or "")
1290 if gt not in deps:
1291 for config in configs:
1292 gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
1293 if subprocess.call(gnu, shell=True) == 0:
1294 error_msg = "AM_GNU_GETTEXT used but no inherit gettext"
1295 oe.qa.handle_error("configure-gettext", error_msg, d)
1296
1297 ###########################################################################
1298 # Check unrecognised configure options (with a white list)
1299 ###########################################################################
1300 if bb.data.inherits_class("autotools", d):
1301 bb.note("Checking configure output for unrecognised options")
1302 try:
1303 if bb.data.inherits_class("autotools", d):
1304 flag = "WARNING: unrecognized options:"
1305 log = os.path.join(d.getVar('B'), 'config.log')
1306 output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ').replace('"', '')
1307 options = set()
1308 for line in output.splitlines():
1309 options |= set(line.partition(flag)[2].split())
1310 ignore_opts = set(d.getVar("UNKNOWN_CONFIGURE_OPT_IGNORE").split())
1311 options -= ignore_opts
1312 if options:
1313 pn = d.getVar('PN')
1314 error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
1315 oe.qa.handle_error("unknown-configure-option", error_msg, d)
1316 except subprocess.CalledProcessError:
1317 pass
1318
1319 # Check invalid PACKAGECONFIG
1320 pkgconfig = (d.getVar("PACKAGECONFIG") or "").split()
1321 if pkgconfig:
1322 pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
1323 for pconfig in pkgconfig:
1324 if pconfig not in pkgconfigflags:
1325 pn = d.getVar('PN')
1326 error_msg = "%s: invalid PACKAGECONFIG: %s" % (pn, pconfig)
1327 oe.qa.handle_error("invalid-packageconfig", error_msg, d)
1328
1329 oe.qa.exit_if_errors(d)
1330}
1331
1332def unpack_check_src_uri(pn, d):
1333 import re
1334
1335 skip = (d.getVar('INSANE_SKIP') or "").split()
1336 if 'src-uri-bad' in skip:
1337 bb.note("Recipe %s skipping qa checking: src-uri-bad" % d.getVar('PN'))
1338 return
1339
1340 if "${PN}" in d.getVar("SRC_URI", False):
1341 oe.qa.handle_error("src-uri-bad", "%s: SRC_URI uses PN not BPN" % pn, d)
1342
1343 for url in d.getVar("SRC_URI").split():
1344 # Search for github and gitlab URLs that pull unstable archives (comment for future greppers)
1345 if re.search(r"git(hu|la)b\.com/.+/.+/archive/.+", url):
1346 oe.qa.handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub/GitLab archives, convert recipe to use git protocol" % pn, d)
1347
1348python do_qa_unpack() {
1349 src_uri = d.getVar('SRC_URI')
1350 s_dir = d.getVar('S')
1351 if src_uri and not os.path.exists(s_dir):
1352 bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN'), d.getVar('S', False), s_dir))
1353
1354 unpack_check_src_uri(d.getVar('PN'), d)
1355}
1356
1357# Check for patch fuzz
1358do_patch[postfuncs] += "do_qa_patch "
1359
1360# Check broken config.log files, for packages requiring Gettext which
1361# don't have it in DEPENDS.
1362#addtask qa_configure after do_configure before do_compile
1363do_configure[postfuncs] += "do_qa_configure "
1364
1365# Check does S exist.
1366do_unpack[postfuncs] += "do_qa_unpack"
1367
1368python () {
1369 import re
1370
1371 tests = d.getVar('ALL_QA').split()
1372 if "desktop" in tests:
1373 d.appendVar("PACKAGE_DEPENDS", " desktop-file-utils-native")
1374
1375 ###########################################################################
1376 # Check various variables
1377 ###########################################################################
1378
1379 # Checking ${FILESEXTRAPATHS}
1380 extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
1381 if '__default' not in extrapaths.split(":"):
1382 msg = "FILESEXTRAPATHS-variable, must always use :prepend (or :append)\n"
1383 msg += "type of assignment, and don't forget the colon.\n"
1384 msg += "Please assign it with the format of:\n"
1385 msg += " FILESEXTRAPATHS:append := \":${THISDIR}/Your_Files_Path\" or\n"
1386 msg += " FILESEXTRAPATHS:prepend := \"${THISDIR}/Your_Files_Path:\"\n"
1387 msg += "in your bbappend file\n\n"
1388 msg += "Your incorrect assignment is:\n"
1389 msg += "%s\n" % extrapaths
1390 bb.warn(msg)
1391
1392 overrides = d.getVar('OVERRIDES').split(':')
1393 pn = d.getVar('PN')
1394 if pn in overrides:
1395 msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE"), pn)
1396 oe.qa.handle_error("pn-overrides", msg, d)
1397 prog = re.compile(r'[A-Z]')
1398 if prog.search(pn):
1399 oe.qa.handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
1400
1401 # Some people mistakenly use DEPENDS:${PN} instead of DEPENDS and wonder
1402 # why it doesn't work.
1403 if (d.getVar(d.expand('DEPENDS:${PN}'))):
1404 oe.qa.handle_error("pkgvarcheck", "recipe uses DEPENDS:${PN}, should use DEPENDS", d)
1405
1406 issues = []
1407 if (d.getVar('PACKAGES') or "").split():
1408 for dep in (d.getVar('QADEPENDS') or "").split():
1409 d.appendVarFlag('do_package_qa', 'depends', " %s:do_populate_sysroot" % dep)
1410 for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY':
1411 if d.getVar(var, False):
1412 issues.append(var)
1413
1414 fakeroot_tests = d.getVar('FAKEROOT_QA').split()
1415 if set(tests) & set(fakeroot_tests):
1416 d.setVarFlag('do_package_qa', 'fakeroot', '1')
1417 d.appendVarFlag('do_package_qa', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
1418 else:
1419 d.setVarFlag('do_package_qa', 'rdeptask', '')
1420 for i in issues:
1421 oe.qa.handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
1422
1423 if 'native-last' not in (d.getVar('INSANE_SKIP') or "").split():
1424 for native_class in ['native', 'nativesdk']:
1425 if bb.data.inherits_class(native_class, d):
1426
1427 inherited_classes = d.getVar('__inherit_cache', False) or []
1428 needle = "/" + native_class
1429
1430 bbclassextend = (d.getVar('BBCLASSEXTEND') or '').split()
1431 # BBCLASSEXTEND items are always added in the end
1432 skip_classes = bbclassextend
1433 if bb.data.inherits_class('native', d) or 'native' in bbclassextend:
1434 # native also inherits nopackages and relocatable bbclasses
1435 skip_classes.extend(['nopackages', 'relocatable'])
1436
1437 broken_order = []
1438 for class_item in reversed(inherited_classes):
1439 if needle not in class_item:
1440 for extend_item in skip_classes:
1441 if '/%s.bbclass' % extend_item in class_item:
1442 break
1443 else:
1444 pn = d.getVar('PN')
1445 broken_order.append(os.path.basename(class_item))
1446 else:
1447 break
1448 if broken_order:
1449 oe.qa.handle_error("native-last", "%s: native/nativesdk class is not inherited last, this can result in unexpected behaviour. "
1450 "Classes inherited after native/nativesdk: %s" % (pn, " ".join(broken_order)), d)
1451
1452 oe.qa.exit_if_errors(d)
1453}
diff --git a/meta/classes-global/license.bbclass b/meta/classes-global/license.bbclass
new file mode 100644
index 0000000000..560acb8b6f
--- /dev/null
+++ b/meta/classes-global/license.bbclass
@@ -0,0 +1,426 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
8# LIC_FILES_CHKSUM.
9# TODO:
10# - There is a real issue revolving around license naming standards.
11
12LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
13LICSSTATEDIR = "${WORKDIR}/license-destdir/"
14
15# Create extra package with license texts and add it to RRECOMMENDS:${PN}
16LICENSE_CREATE_PACKAGE[type] = "boolean"
17LICENSE_CREATE_PACKAGE ??= "0"
18LICENSE_PACKAGE_SUFFIX ??= "-lic"
19LICENSE_FILES_DIRECTORY ??= "${datadir}/licenses/"
20
21addtask populate_lic after do_patch before do_build
22do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
23do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
24
25python do_populate_lic() {
26 """
27 Populate LICENSE_DIRECTORY with licenses.
28 """
29 lic_files_paths = find_license_files(d)
30
31 # The base directory we wrangle licenses to
32 destdir = os.path.join(d.getVar('LICSSTATEDIR'), d.getVar('PN'))
33 copy_license_files(lic_files_paths, destdir)
34 info = get_recipe_info(d)
35 with open(os.path.join(destdir, "recipeinfo"), "w") as f:
36 for key in sorted(info.keys()):
37 f.write("%s: %s\n" % (key, info[key]))
38 oe.qa.exit_if_errors(d)
39}
40
41PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '') + ' ' + d.getVar('COREBASE') + '/meta/COPYING').split())}"
42# it would be better to copy them in do_install:append, but find_license_filesa is python
43python perform_packagecopy:prepend () {
44 enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
45 if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
46 lic_files_paths = find_license_files(d)
47
48 # LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
49 destdir = d.getVar('D') + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY'), d.getVar('PN'))
50 copy_license_files(lic_files_paths, destdir)
51 add_package_and_files(d)
52}
53perform_packagecopy[vardeps] += "LICENSE_CREATE_PACKAGE"
54
55def get_recipe_info(d):
56 info = {}
57 info["PV"] = d.getVar("PV")
58 info["PR"] = d.getVar("PR")
59 info["LICENSE"] = d.getVar("LICENSE")
60 return info
61
62def add_package_and_files(d):
63 packages = d.getVar('PACKAGES')
64 files = d.getVar('LICENSE_FILES_DIRECTORY')
65 pn = d.getVar('PN')
66 pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX', False))
67 if pn_lic in packages.split():
68 bb.warn("%s package already existed in %s." % (pn_lic, pn))
69 else:
70 # first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
71 d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
72 d.setVar('FILES:' + pn_lic, files)
73
74def copy_license_files(lic_files_paths, destdir):
75 import shutil
76 import errno
77
78 bb.utils.mkdirhier(destdir)
79 for (basename, path, beginline, endline) in lic_files_paths:
80 try:
81 src = path
82 dst = os.path.join(destdir, basename)
83 if os.path.exists(dst):
84 os.remove(dst)
85 if os.path.islink(src):
86 src = os.path.realpath(src)
87 canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev) and beginline is None and endline is None
88 if canlink:
89 try:
90 os.link(src, dst)
91 except OSError as err:
92 if err.errno == errno.EXDEV:
93 # Copy license files if hardlink is not possible even if st_dev is the
94 # same on source and destination (docker container with device-mapper?)
95 canlink = False
96 else:
97 raise
98 # Only chown if we did hardlink and we're running under pseudo
99 if canlink and os.environ.get('PSEUDO_DISABLED') == '0':
100 os.chown(dst,0,0)
101 if not canlink:
102 begin_idx = max(0, int(beginline) - 1) if beginline is not None else None
103 end_idx = max(0, int(endline)) if endline is not None else None
104 if begin_idx is None and end_idx is None:
105 shutil.copyfile(src, dst)
106 else:
107 with open(src, 'rb') as src_f:
108 with open(dst, 'wb') as dst_f:
109 dst_f.write(b''.join(src_f.readlines()[begin_idx:end_idx]))
110
111 except Exception as e:
112 bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
113
114def find_license_files(d):
115 """
116 Creates list of files used in LIC_FILES_CHKSUM and generic LICENSE files.
117 """
118 import shutil
119 import oe.license
120 from collections import defaultdict, OrderedDict
121
122 # All the license files for the package
123 lic_files = d.getVar('LIC_FILES_CHKSUM') or ""
124 pn = d.getVar('PN')
125 # The license files are located in S/LIC_FILE_CHECKSUM.
126 srcdir = d.getVar('S')
127 # Directory we store the generic licenses as set in the distro configuration
128 generic_directory = d.getVar('COMMON_LICENSE_DIR')
129 # List of basename, path tuples
130 lic_files_paths = []
131 # hash for keep track generic lics mappings
132 non_generic_lics = {}
133 # Entries from LIC_FILES_CHKSUM
134 lic_chksums = {}
135 license_source_dirs = []
136 license_source_dirs.append(generic_directory)
137 try:
138 additional_lic_dirs = d.getVar('LICENSE_PATH').split()
139 for lic_dir in additional_lic_dirs:
140 license_source_dirs.append(lic_dir)
141 except:
142 pass
143
144 class FindVisitor(oe.license.LicenseVisitor):
145 def visit_Str(self, node):
146 #
147 # Until I figure out what to do with
148 # the two modifiers I support (or greater = +
149 # and "with exceptions" being *
150 # we'll just strip out the modifier and put
151 # the base license.
152 find_license(node.s.replace("+", "").replace("*", ""))
153 self.generic_visit(node)
154
155 def visit_Constant(self, node):
156 find_license(node.value.replace("+", "").replace("*", ""))
157 self.generic_visit(node)
158
159 def find_license(license_type):
160 try:
161 bb.utils.mkdirhier(gen_lic_dest)
162 except:
163 pass
164 spdx_generic = None
165 license_source = None
166 # If the generic does not exist we need to check to see if there is an SPDX mapping to it,
167 # unless NO_GENERIC_LICENSE is set.
168 for lic_dir in license_source_dirs:
169 if not os.path.isfile(os.path.join(lic_dir, license_type)):
170 if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
171 # Great, there is an SPDXLICENSEMAP. We can copy!
172 bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
173 spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
174 license_source = lic_dir
175 break
176 elif os.path.isfile(os.path.join(lic_dir, license_type)):
177 spdx_generic = license_type
178 license_source = lic_dir
179 break
180
181 non_generic_lic = d.getVarFlag('NO_GENERIC_LICENSE', license_type)
182 if spdx_generic and license_source:
183 # we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
184 # audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
185
186 lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic),
187 None, None))
188
189 # The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
190 # and should not be allowed, warn the user in this case.
191 if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
192 oe.qa.handle_error("license-no-generic",
193 "%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type), d)
194
195 elif non_generic_lic and non_generic_lic in lic_chksums:
196 # if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
197 # of the package rather than the license_source_dirs.
198 lic_files_paths.append(("generic_" + license_type,
199 os.path.join(srcdir, non_generic_lic), None, None))
200 non_generic_lics[non_generic_lic] = license_type
201 else:
202 # Explicitly avoid the CLOSED license because this isn't generic
203 if license_type != 'CLOSED':
204 # And here is where we warn people that their licenses are lousy
205 oe.qa.handle_error("license-exists",
206 "%s: No generic license file exists for: %s in any provider" % (pn, license_type), d)
207 pass
208
209 if not generic_directory:
210 bb.fatal("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
211
212 for url in lic_files.split():
213 try:
214 (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
215 if method != "file" or not path:
216 raise bb.fetch.MalformedUrl()
217 except bb.fetch.MalformedUrl:
218 bb.fatal("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF'), url))
219 # We want the license filename and path
220 chksum = parm.get('md5', None)
221 beginline = parm.get('beginline')
222 endline = parm.get('endline')
223 lic_chksums[path] = (chksum, beginline, endline)
224
225 v = FindVisitor()
226 try:
227 v.visit_string(d.getVar('LICENSE'))
228 except oe.license.InvalidLicense as exc:
229 bb.fatal('%s: %s' % (d.getVar('PF'), exc))
230 except SyntaxError:
231 oe.qa.handle_error("license-syntax",
232 "%s: Failed to parse it's LICENSE field." % (d.getVar('PF')), d)
233 # Add files from LIC_FILES_CHKSUM to list of license files
234 lic_chksum_paths = defaultdict(OrderedDict)
235 for path, data in sorted(lic_chksums.items()):
236 lic_chksum_paths[os.path.basename(path)][data] = (os.path.join(srcdir, path), data[1], data[2])
237 for basename, files in lic_chksum_paths.items():
238 if len(files) == 1:
239 # Don't copy again a LICENSE already handled as non-generic
240 if basename in non_generic_lics:
241 continue
242 data = list(files.values())[0]
243 lic_files_paths.append(tuple([basename] + list(data)))
244 else:
245 # If there are multiple different license files with identical
246 # basenames we rename them to <file>.0, <file>.1, ...
247 for i, data in enumerate(files.values()):
248 lic_files_paths.append(tuple(["%s.%d" % (basename, i)] + list(data)))
249
250 return lic_files_paths
251
252def return_spdx(d, license):
253 """
254 This function returns the spdx mapping of a license if it exists.
255 """
256 return d.getVarFlag('SPDXLICENSEMAP', license)
257
258def canonical_license(d, license):
259 """
260 Return the canonical (SPDX) form of the license if available (so GPLv3
261 becomes GPL-3.0-only) or the passed license if there is no canonical form.
262 """
263 return d.getVarFlag('SPDXLICENSEMAP', license) or license
264
265def expand_wildcard_licenses(d, wildcard_licenses):
266 """
267 There are some common wildcard values users may want to use. Support them
268 here.
269 """
270 licenses = set(wildcard_licenses)
271 mapping = {
272 "AGPL-3.0*" : ["AGPL-3.0-only", "AGPL-3.0-or-later"],
273 "GPL-3.0*" : ["GPL-3.0-only", "GPL-3.0-or-later"],
274 "LGPL-3.0*" : ["LGPL-3.0-only", "LGPL-3.0-or-later"],
275 }
276 for k in mapping:
277 if k in wildcard_licenses:
278 licenses.remove(k)
279 for item in mapping[k]:
280 licenses.add(item)
281
282 for l in licenses:
283 if l in oe.license.obsolete_license_list():
284 bb.fatal("Error, %s is an obsolete license, please use an SPDX reference in INCOMPATIBLE_LICENSE" % l)
285 if "*" in l:
286 bb.fatal("Error, %s is an invalid license wildcard entry" % l)
287
288 return list(licenses)
289
290def incompatible_license_contains(license, truevalue, falsevalue, d):
291 license = canonical_license(d, license)
292 bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
293 bad_licenses = expand_wildcard_licenses(d, bad_licenses)
294 return truevalue if license in bad_licenses else falsevalue
295
296def incompatible_pkg_license(d, dont_want_licenses, license):
297 # Handles an "or" or two license sets provided by
298 # flattened_licenses(), pick one that works if possible.
299 def choose_lic_set(a, b):
300 return a if all(oe.license.license_ok(canonical_license(d, lic),
301 dont_want_licenses) for lic in a) else b
302
303 try:
304 licenses = oe.license.flattened_licenses(license, choose_lic_set)
305 except oe.license.LicenseError as exc:
306 bb.fatal('%s: %s' % (d.getVar('P'), exc))
307
308 incompatible_lic = []
309 for l in licenses:
310 license = canonical_license(d, l)
311 if not oe.license.license_ok(license, dont_want_licenses):
312 incompatible_lic.append(license)
313
314 return sorted(incompatible_lic)
315
316def incompatible_license(d, dont_want_licenses, package=None):
317 """
318 This function checks if a recipe has only incompatible licenses. It also
319 take into consideration 'or' operand. dont_want_licenses should be passed
320 as canonical (SPDX) names.
321 """
322 import oe.license
323 license = d.getVar("LICENSE:%s" % package) if package else None
324 if not license:
325 license = d.getVar('LICENSE')
326
327 return incompatible_pkg_license(d, dont_want_licenses, license)
328
329def check_license_flags(d):
330 """
331 This function checks if a recipe has any LICENSE_FLAGS that
332 aren't acceptable.
333
334 If it does, it returns the all LICENSE_FLAGS missing from the list
335 of acceptable license flags, or all of the LICENSE_FLAGS if there
336 is no list of acceptable flags.
337
338 If everything is is acceptable, it returns None.
339 """
340
341 def license_flag_matches(flag, acceptlist, pn):
342 """
343 Return True if flag matches something in acceptlist, None if not.
344
345 Before we test a flag against the acceptlist, we append _${PN}
346 to it. We then try to match that string against the
347 acceptlist. This covers the normal case, where we expect
348 LICENSE_FLAGS to be a simple string like 'commercial', which
349 the user typically matches exactly in the acceptlist by
350 explicitly appending the package name e.g 'commercial_foo'.
351 If we fail the match however, we then split the flag across
352 '_' and append each fragment and test until we either match or
353 run out of fragments.
354 """
355 flag_pn = ("%s_%s" % (flag, pn))
356 for candidate in acceptlist:
357 if flag_pn == candidate:
358 return True
359
360 flag_cur = ""
361 flagments = flag_pn.split("_")
362 flagments.pop() # we've already tested the full string
363 for flagment in flagments:
364 if flag_cur:
365 flag_cur += "_"
366 flag_cur += flagment
367 for candidate in acceptlist:
368 if flag_cur == candidate:
369 return True
370 return False
371
372 def all_license_flags_match(license_flags, acceptlist):
373 """ Return all unmatched flags, None if all flags match """
374 pn = d.getVar('PN')
375 split_acceptlist = acceptlist.split()
376 flags = []
377 for flag in license_flags.split():
378 if not license_flag_matches(flag, split_acceptlist, pn):
379 flags.append(flag)
380 return flags if flags else None
381
382 license_flags = d.getVar('LICENSE_FLAGS')
383 if license_flags:
384 acceptlist = d.getVar('LICENSE_FLAGS_ACCEPTED')
385 if not acceptlist:
386 return license_flags.split()
387 unmatched_flags = all_license_flags_match(license_flags, acceptlist)
388 if unmatched_flags:
389 return unmatched_flags
390 return None
391
392def check_license_format(d):
393 """
394 This function checks if LICENSE is well defined,
395 Validate operators in LICENSES.
396 No spaces are allowed between LICENSES.
397 """
398 pn = d.getVar('PN')
399 licenses = d.getVar('LICENSE')
400 from oe.license import license_operator, license_operator_chars, license_pattern
401
402 elements = list(filter(lambda x: x.strip(), license_operator.split(licenses)))
403 for pos, element in enumerate(elements):
404 if license_pattern.match(element):
405 if pos > 0 and license_pattern.match(elements[pos - 1]):
406 oe.qa.handle_error('license-format',
407 '%s: LICENSE value "%s" has an invalid format - license names ' \
408 'must be separated by the following characters to indicate ' \
409 'the license selection: %s' %
410 (pn, licenses, license_operator_chars), d)
411 elif not license_operator.match(element):
412 oe.qa.handle_error('license-format',
413 '%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
414 'in the valid list of separators (%s)' %
415 (pn, licenses, element, license_operator_chars), d)
416
417SSTATETASKS += "do_populate_lic"
418do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
419do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
420
421IMAGE_CLASSES:append = " license_image"
422
423python do_populate_lic_setscene () {
424 sstate_setscene(d)
425}
426addtask do_populate_lic_setscene
diff --git a/meta/classes-global/logging.bbclass b/meta/classes-global/logging.bbclass
new file mode 100644
index 0000000000..ce03abfe42
--- /dev/null
+++ b/meta/classes-global/logging.bbclass
@@ -0,0 +1,107 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# The following logging mechanisms are to be used in bash functions of recipes.
8# They are intended to map one to one in intention and output format with the
9# python recipe logging functions of a similar naming convention: bb.plain(),
10# bb.note(), etc.
11
12LOGFIFO = "${T}/fifo.${@os.getpid()}"
13
14# Print the output exactly as it is passed in. Typically used for output of
15# tasks that should be seen on the console. Use sparingly.
16# Output: logs console
17bbplain() {
18 if [ -p ${LOGFIFO} ] ; then
19 printf "%b\0" "bbplain $*" > ${LOGFIFO}
20 else
21 echo "$*"
22 fi
23}
24
25# Notify the user of a noteworthy condition.
26# Output: logs
27bbnote() {
28 if [ -p ${LOGFIFO} ] ; then
29 printf "%b\0" "bbnote $*" > ${LOGFIFO}
30 else
31 echo "NOTE: $*"
32 fi
33}
34
35# Print a warning to the log. Warnings are non-fatal, and do not
36# indicate a build failure.
37# Output: logs console
38bbwarn() {
39 if [ -p ${LOGFIFO} ] ; then
40 printf "%b\0" "bbwarn $*" > ${LOGFIFO}
41 else
42 echo "WARNING: $*"
43 fi
44}
45
46# Print an error to the log. Errors are non-fatal in that the build can
47# continue, but they do indicate a build failure.
48# Output: logs console
49bberror() {
50 if [ -p ${LOGFIFO} ] ; then
51 printf "%b\0" "bberror $*" > ${LOGFIFO}
52 else
53 echo "ERROR: $*"
54 fi
55}
56
57# Print a fatal error to the log. Fatal errors indicate build failure
58# and halt the build, exiting with an error code.
59# Output: logs console
60bbfatal() {
61 if [ -p ${LOGFIFO} ] ; then
62 printf "%b\0" "bbfatal $*" > ${LOGFIFO}
63 else
64 echo "ERROR: $*"
65 fi
66 exit 1
67}
68
69# Like bbfatal, except prevents the suppression of the error log by
70# bitbake's UI.
71# Output: logs console
72bbfatal_log() {
73 if [ -p ${LOGFIFO} ] ; then
74 printf "%b\0" "bbfatal_log $*" > ${LOGFIFO}
75 else
76 echo "ERROR: $*"
77 fi
78 exit 1
79}
80
81# Print debug messages. These are appropriate for progress checkpoint
82# messages to the logs. Depending on the debug log level, they may also
83# go to the console.
84# Output: logs console
85# Usage: bbdebug 1 "first level debug message"
86# bbdebug 2 "second level debug message"
87bbdebug() {
88 USAGE='Usage: bbdebug [123] "message"'
89 if [ $# -lt 2 ]; then
90 bbfatal "$USAGE"
91 fi
92
93 # Strip off the debug level and ensure it is an integer
94 DBGLVL=$1; shift
95 NONDIGITS=$(echo "$DBGLVL" | tr -d "[:digit:]")
96 if [ "$NONDIGITS" ]; then
97 bbfatal "$USAGE"
98 fi
99
100 # All debug output is printed to the logs
101 if [ -p ${LOGFIFO} ] ; then
102 printf "%b\0" "bbdebug $DBGLVL $*" > ${LOGFIFO}
103 else
104 echo "DEBUG: $*"
105 fi
106}
107
diff --git a/meta/classes-global/mirrors.bbclass b/meta/classes-global/mirrors.bbclass
new file mode 100644
index 0000000000..9643b31a23
--- /dev/null
+++ b/meta/classes-global/mirrors.bbclass
@@ -0,0 +1,95 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7MIRRORS += "\
8${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \
9${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \
10${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \
11${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \
12${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \
13${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \
14${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \
15${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \
16${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \
17${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \
18${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \
19${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \
20${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \
21${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \
22${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \
23${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \
24${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \
25${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \
26${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \
27${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \
28${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \
29${GNU_MIRROR} https://mirrors.kernel.org/gnu \
30${KERNELORG_MIRROR} http://www.kernel.org/pub \
31${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \
32${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \
33${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \
34ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \
35ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \
36ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \
37ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \
38http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \
39http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \
40${APACHE_MIRROR} http://www.us.apache.org/dist \
41${APACHE_MIRROR} http://archive.apache.org/dist \
42http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \
43${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \
44${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \
45ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \
46ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \
47ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \
48cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
49svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
50git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
51gitsm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
52hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
53bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
54p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
55osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
56https?://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
57ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
58npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \
59cvs://.*/.* http://sources.openembedded.org/ \
60svn://.*/.* http://sources.openembedded.org/ \
61git://.*/.* http://sources.openembedded.org/ \
62gitsm://.*/.* http://sources.openembedded.org/ \
63hg://.*/.* http://sources.openembedded.org/ \
64bzr://.*/.* http://sources.openembedded.org/ \
65p4://.*/.* http://sources.openembedded.org/ \
66osc://.*/.* http://sources.openembedded.org/ \
67https?://.*/.* http://sources.openembedded.org/ \
68ftp://.*/.* http://sources.openembedded.org/ \
69npm://.*/?.* http://sources.openembedded.org/ \
70${CPAN_MIRROR} http://cpan.metacpan.org/ \
71${CPAN_MIRROR} http://search.cpan.org/CPAN/ \
72https?://downloads.yoctoproject.org/releases/uninative/ https://mirrors.kernel.org/yocto/uninative/ \
73https?://downloads.yoctoproject.org/mirror/sources/ https://mirrors.kernel.org/yocto-sources/ \
74"
75
76# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
77# where git native protocol fetches may fail due to local firewall rules, etc.
78
79MIRRORS += "\
80git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \
81git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \
82git://.*/.* git://HOST/PATH;protocol=https \
83git://.*/.* git://HOST/git/PATH;protocol=https \
84"
85
86# Switch glibc and binutils recipes to use shallow clones as they're large and this
87# improves user experience whilst allowing the flexibility of git urls in the recipes
88BB_GIT_SHALLOW:pn-binutils = "1"
89BB_GIT_SHALLOW:pn-binutils-cross-${TARGET_ARCH} = "1"
90BB_GIT_SHALLOW:pn-binutils-cross-canadian-${TRANSLATED_TARGET_ARCH} = "1"
91BB_GIT_SHALLOW:pn-binutils-cross-testsuite = "1"
92BB_GIT_SHALLOW:pn-binutils-crosssdk-${SDK_SYS} = "1"
93BB_GIT_SHALLOW:pn-glibc = "1"
94PREMIRRORS += "git://sourceware.org/git/glibc.git https://downloads.yoctoproject.org/mirror/sources/ \
95 git://sourceware.org/git/binutils-gdb.git https://downloads.yoctoproject.org/mirror/sources/"
diff --git a/meta/classes-global/package.bbclass b/meta/classes-global/package.bbclass
new file mode 100644
index 0000000000..418400da8c
--- /dev/null
+++ b/meta/classes-global/package.bbclass
@@ -0,0 +1,2558 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Packaging process
9#
10# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS
11# Taking D and splitting it up into the packages listed in PACKAGES, placing the
12# resulting output in PKGDEST.
13#
14# There are the following default steps but PACKAGEFUNCS can be extended:
15#
16# a) package_convert_pr_autoinc - convert AUTOINC in PKGV to ${PRSERV_PV_AUTOINC}
17#
18# b) perform_packagecopy - Copy D into PKGD
19#
20# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES
21#
22# d) split_and_strip_files - split the files into runtime and debug and strip them.
23# Debug files include debug info split, and associated sources that end up in -dbg packages
24#
25# e) fixup_perms - Fix up permissions in the package before we split it.
26#
27# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/<pkgname>
28# Also triggers the binary stripping code to put files in -dbg packages.
29#
30# g) package_do_filedeps - Collect perfile run-time dependency metadata
31# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with
32# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
33#
34# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
35# dependencies found. Also stores the package name so anyone else using this library
36# knows which package to depend on.
37#
38# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
39#
40# j) read_shlibdeps - Reads the stored shlibs information into the metadata
41#
42# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages
43#
44# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later
45# packaging steps
46
47inherit packagedata
48inherit chrpath
49inherit package_pkgdata
50inherit insane
51
52PKGD = "${WORKDIR}/package"
53PKGDEST = "${WORKDIR}/packages-split"
54
55LOCALE_SECTION ?= ''
56
57ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
58
59# rpm is used for the per-file dependency identification
60# dwarfsrcfiles is used to determine the list of debug source files
61PACKAGE_DEPENDS += "rpm-native dwarfsrcfiles-native"
62
63
64# If your postinstall can execute at rootfs creation time rather than on
65# target but depends on a native/cross tool in order to execute, you need to
66# list that tool in PACKAGE_WRITE_DEPS. Target package dependencies belong
67# in the package dependencies as normal, this is just for native/cross support
68# tools at rootfs build time.
69PACKAGE_WRITE_DEPS ??= ""
70
71def legitimize_package_name(s):
72 """
73 Make sure package names are legitimate strings
74 """
75 import re
76
77 def fixutf(m):
78 cp = m.group(1)
79 if cp:
80 return ('\\u%s' % cp).encode('latin-1').decode('unicode_escape')
81
82 # Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
83 s = re.sub(r'<U([0-9A-Fa-f]{1,4})>', fixutf, s)
84
85 # Remaining package name validity fixes
86 return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
87
88def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False, summary=None):
89 """
90 Used in .bb files to split up dynamically generated subpackages of a
91 given package, usually plugins or modules.
92
93 Arguments:
94 root -- the path in which to search
95 file_regex -- regular expression to match searched files. Use
96 parentheses () to mark the part of this expression
97 that should be used to derive the module name (to be
98 substituted where %s is used in other function
99 arguments as noted below)
100 output_pattern -- pattern to use for the package names. Must include %s.
101 description -- description to set for each package. Must include %s.
102 postinst -- postinstall script to use for all packages (as a
103 string)
104 recursive -- True to perform a recursive search - default False
105 hook -- a hook function to be called for every match. The
106 function will be called with the following arguments
107 (in the order listed):
108 f: full path to the file/directory match
109 pkg: the package name
110 file_regex: as above
111 output_pattern: as above
112 modulename: the module name derived using file_regex
113 extra_depends -- extra runtime dependencies (RDEPENDS) to be set for
114 all packages. The default value of None causes a
115 dependency on the main package (${PN}) - if you do
116 not want this, pass '' for this parameter.
117 aux_files_pattern -- extra item(s) to be added to FILES for each
118 package. Can be a single string item or a list of
119 strings for multiple items. Must include %s.
120 postrm -- postrm script to use for all packages (as a string)
121 allow_dirs -- True allow directories to be matched - default False
122 prepend -- if True, prepend created packages to PACKAGES instead
123 of the default False which appends them
124 match_path -- match file_regex on the whole relative path to the
125 root rather than just the file name
126 aux_files_pattern_verbatim -- extra item(s) to be added to FILES for
127 each package, using the actual derived module name
128 rather than converting it to something legal for a
129 package name. Can be a single string item or a list
130 of strings for multiple items. Must include %s.
131 allow_links -- True to allow symlinks to be matched - default False
132 summary -- Summary to set for each package. Must include %s;
133 defaults to description if not set.
134
135 """
136
137 dvar = d.getVar('PKGD')
138 root = d.expand(root)
139 output_pattern = d.expand(output_pattern)
140 extra_depends = d.expand(extra_depends)
141
142 # If the root directory doesn't exist, don't error out later but silently do
143 # no splitting.
144 if not os.path.exists(dvar + root):
145 return []
146
147 ml = d.getVar("MLPREFIX")
148 if ml:
149 if not output_pattern.startswith(ml):
150 output_pattern = ml + output_pattern
151
152 newdeps = []
153 for dep in (extra_depends or "").split():
154 if dep.startswith(ml):
155 newdeps.append(dep)
156 else:
157 newdeps.append(ml + dep)
158 if newdeps:
159 extra_depends = " ".join(newdeps)
160
161
162 packages = d.getVar('PACKAGES').split()
163 split_packages = set()
164
165 if postinst:
166 postinst = '#!/bin/sh\n' + postinst + '\n'
167 if postrm:
168 postrm = '#!/bin/sh\n' + postrm + '\n'
169 if not recursive:
170 objs = os.listdir(dvar + root)
171 else:
172 objs = []
173 for walkroot, dirs, files in os.walk(dvar + root):
174 for file in files:
175 relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
176 if relpath:
177 objs.append(relpath)
178
179 if extra_depends == None:
180 extra_depends = d.getVar("PN")
181
182 if not summary:
183 summary = description
184
185 for o in sorted(objs):
186 import re, stat
187 if match_path:
188 m = re.match(file_regex, o)
189 else:
190 m = re.match(file_regex, os.path.basename(o))
191
192 if not m:
193 continue
194 f = os.path.join(dvar + root, o)
195 mode = os.lstat(f).st_mode
196 if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
197 continue
198 on = legitimize_package_name(m.group(1))
199 pkg = output_pattern % on
200 split_packages.add(pkg)
201 if not pkg in packages:
202 if prepend:
203 packages = [pkg] + packages
204 else:
205 packages.append(pkg)
206 oldfiles = d.getVar('FILES:' + pkg)
207 newfile = os.path.join(root, o)
208 # These names will be passed through glob() so if the filename actually
209 # contains * or ? (rare, but possible) we need to handle that specially
210 newfile = newfile.replace('*', '[*]')
211 newfile = newfile.replace('?', '[?]')
212 if not oldfiles:
213 the_files = [newfile]
214 if aux_files_pattern:
215 if type(aux_files_pattern) is list:
216 for fp in aux_files_pattern:
217 the_files.append(fp % on)
218 else:
219 the_files.append(aux_files_pattern % on)
220 if aux_files_pattern_verbatim:
221 if type(aux_files_pattern_verbatim) is list:
222 for fp in aux_files_pattern_verbatim:
223 the_files.append(fp % m.group(1))
224 else:
225 the_files.append(aux_files_pattern_verbatim % m.group(1))
226 d.setVar('FILES:' + pkg, " ".join(the_files))
227 else:
228 d.setVar('FILES:' + pkg, oldfiles + " " + newfile)
229 if extra_depends != '':
230 d.appendVar('RDEPENDS:' + pkg, ' ' + extra_depends)
231 if not d.getVar('DESCRIPTION:' + pkg):
232 d.setVar('DESCRIPTION:' + pkg, description % on)
233 if not d.getVar('SUMMARY:' + pkg):
234 d.setVar('SUMMARY:' + pkg, summary % on)
235 if postinst:
236 d.setVar('pkg_postinst:' + pkg, postinst)
237 if postrm:
238 d.setVar('pkg_postrm:' + pkg, postrm)
239 if callable(hook):
240 hook(f, pkg, file_regex, output_pattern, m.group(1))
241
242 d.setVar('PACKAGES', ' '.join(packages))
243 return list(split_packages)
244
245PACKAGE_DEPENDS += "file-native"
246
247python () {
248 if d.getVar('PACKAGES') != '':
249 deps = ""
250 for dep in (d.getVar('PACKAGE_DEPENDS') or "").split():
251 deps += " %s:do_populate_sysroot" % dep
252 if d.getVar('PACKAGE_MINIDEBUGINFO') == '1':
253 deps += ' xz-native:do_populate_sysroot'
254 d.appendVarFlag('do_package', 'depends', deps)
255
256 # shlibs requires any DEPENDS to have already packaged for the *.list files
257 d.appendVarFlag('do_package', 'deptask', " do_packagedata")
258}
259
260# Get a list of files from file vars by searching files under current working directory
261# The list contains symlinks, directories and normal files.
262def files_from_filevars(filevars):
263 import os,glob
264 cpath = oe.cachedpath.CachedPath()
265 files = []
266 for f in filevars:
267 if os.path.isabs(f):
268 f = '.' + f
269 if not f.startswith("./"):
270 f = './' + f
271 globbed = glob.glob(f)
272 if globbed:
273 if [ f ] != globbed:
274 files += globbed
275 continue
276 files.append(f)
277
278 symlink_paths = []
279 for ind, f in enumerate(files):
280 # Handle directory symlinks. Truncate path to the lowest level symlink
281 parent = ''
282 for dirname in f.split('/')[:-1]:
283 parent = os.path.join(parent, dirname)
284 if dirname == '.':
285 continue
286 if cpath.islink(parent):
287 bb.warn("FILES contains file '%s' which resides under a "
288 "directory symlink. Please fix the recipe and use the "
289 "real path for the file." % f[1:])
290 symlink_paths.append(f)
291 files[ind] = parent
292 f = parent
293 break
294
295 if not cpath.islink(f):
296 if cpath.isdir(f):
297 newfiles = [ os.path.join(f,x) for x in os.listdir(f) ]
298 if newfiles:
299 files += newfiles
300
301 return files, symlink_paths
302
303# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
304def get_conffiles(pkg, d):
305 pkgdest = d.getVar('PKGDEST')
306 root = os.path.join(pkgdest, pkg)
307 cwd = os.getcwd()
308 os.chdir(root)
309
310 conffiles = d.getVar('CONFFILES:%s' % pkg);
311 if conffiles == None:
312 conffiles = d.getVar('CONFFILES')
313 if conffiles == None:
314 conffiles = ""
315 conffiles = conffiles.split()
316 conf_orig_list = files_from_filevars(conffiles)[0]
317
318 # Remove links and directories from conf_orig_list to get conf_list which only contains normal files
319 conf_list = []
320 for f in conf_orig_list:
321 if os.path.isdir(f):
322 continue
323 if os.path.islink(f):
324 continue
325 if not os.path.exists(f):
326 continue
327 conf_list.append(f)
328
329 # Remove the leading './'
330 for i in range(0, len(conf_list)):
331 conf_list[i] = conf_list[i][1:]
332
333 os.chdir(cwd)
334 return conf_list
335
336def checkbuildpath(file, d):
337 tmpdir = d.getVar('TMPDIR')
338 with open(file) as f:
339 file_content = f.read()
340 if tmpdir in file_content:
341 return True
342
343 return False
344
345def parse_debugsources_from_dwarfsrcfiles_output(dwarfsrcfiles_output):
346 debugfiles = {}
347
348 for line in dwarfsrcfiles_output.splitlines():
349 if line.startswith("\t"):
350 debugfiles[os.path.normpath(line.split()[0])] = ""
351
352 return debugfiles.keys()
353
354def source_info(file, d, fatal=True):
355 import subprocess
356
357 cmd = ["dwarfsrcfiles", file]
358 try:
359 output = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.STDOUT)
360 retval = 0
361 except subprocess.CalledProcessError as exc:
362 output = exc.output
363 retval = exc.returncode
364
365 # 255 means a specific file wasn't fully parsed to get the debug file list, which is not a fatal failure
366 if retval != 0 and retval != 255:
367 msg = "dwarfsrcfiles failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")
368 if fatal:
369 bb.fatal(msg)
370 bb.note(msg)
371
372 debugsources = parse_debugsources_from_dwarfsrcfiles_output(output)
373
374 return list(debugsources)
375
376def splitdebuginfo(file, dvar, dv, d):
377 # Function to split a single file into two components, one is the stripped
378 # target system binary, the other contains any debugging information. The
379 # two files are linked to reference each other.
380 #
381 # return a mapping of files:debugsources
382
383 import stat
384 import subprocess
385
386 src = file[len(dvar):]
387 dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
388 debugfile = dvar + dest
389 sources = []
390
391 if file.endswith(".ko") and file.find("/lib/modules/") != -1:
392 if oe.package.is_kernel_module_signed(file):
393 bb.debug(1, "Skip strip on signed module %s" % file)
394 return (file, sources)
395
396 # Split the file...
397 bb.utils.mkdirhier(os.path.dirname(debugfile))
398 #bb.note("Split %s -> %s" % (file, debugfile))
399 # Only store off the hard link reference if we successfully split!
400
401 dvar = d.getVar('PKGD')
402 objcopy = d.getVar("OBJCOPY")
403
404 newmode = None
405 if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
406 origmode = os.stat(file)[stat.ST_MODE]
407 newmode = origmode | stat.S_IWRITE | stat.S_IREAD
408 os.chmod(file, newmode)
409
410 # We need to extract the debug src information here...
411 if dv["srcdir"]:
412 sources = source_info(file, d)
413
414 bb.utils.mkdirhier(os.path.dirname(debugfile))
415
416 subprocess.check_output([objcopy, '--only-keep-debug', file, debugfile], stderr=subprocess.STDOUT)
417
418 # Set the debuglink to have the view of the file path on the target
419 subprocess.check_output([objcopy, '--add-gnu-debuglink', debugfile, file], stderr=subprocess.STDOUT)
420
421 if newmode:
422 os.chmod(file, origmode)
423
424 return (file, sources)
425
426def splitstaticdebuginfo(file, dvar, dv, d):
427 # Unlike the function above, there is no way to split a static library
428 # two components. So to get similar results we will copy the unmodified
429 # static library (containing the debug symbols) into a new directory.
430 # We will then strip (preserving symbols) the static library in the
431 # typical location.
432 #
433 # return a mapping of files:debugsources
434
435 import stat
436
437 src = file[len(dvar):]
438 dest = dv["staticlibdir"] + os.path.dirname(src) + dv["staticdir"] + "/" + os.path.basename(src) + dv["staticappend"]
439 debugfile = dvar + dest
440 sources = []
441
442 # Copy the file...
443 bb.utils.mkdirhier(os.path.dirname(debugfile))
444 #bb.note("Copy %s -> %s" % (file, debugfile))
445
446 dvar = d.getVar('PKGD')
447
448 newmode = None
449 if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
450 origmode = os.stat(file)[stat.ST_MODE]
451 newmode = origmode | stat.S_IWRITE | stat.S_IREAD
452 os.chmod(file, newmode)
453
454 # We need to extract the debug src information here...
455 if dv["srcdir"]:
456 sources = source_info(file, d)
457
458 bb.utils.mkdirhier(os.path.dirname(debugfile))
459
460 # Copy the unmodified item to the debug directory
461 shutil.copy2(file, debugfile)
462
463 if newmode:
464 os.chmod(file, origmode)
465
466 return (file, sources)
467
468def inject_minidebuginfo(file, dvar, dv, d):
469 # Extract just the symbols from debuginfo into minidebuginfo,
470 # compress it with xz and inject it back into the binary in a .gnu_debugdata section.
471 # https://sourceware.org/gdb/onlinedocs/gdb/MiniDebugInfo.html
472
473 import subprocess
474
475 readelf = d.getVar('READELF')
476 nm = d.getVar('NM')
477 objcopy = d.getVar('OBJCOPY')
478
479 minidebuginfodir = d.expand('${WORKDIR}/minidebuginfo')
480
481 src = file[len(dvar):]
482 dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
483 debugfile = dvar + dest
484 minidebugfile = minidebuginfodir + src + '.minidebug'
485 bb.utils.mkdirhier(os.path.dirname(minidebugfile))
486
487 # If we didn't produce debuginfo for any reason, we can't produce minidebuginfo either
488 # so skip it.
489 if not os.path.exists(debugfile):
490 bb.debug(1, 'ELF file {} has no debuginfo, skipping minidebuginfo injection'.format(file))
491 return
492
493 # Find non-allocated PROGBITS, NOTE, and NOBITS sections in the debuginfo.
494 # We will exclude all of these from minidebuginfo to save space.
495 remove_section_names = []
496 for line in subprocess.check_output([readelf, '-W', '-S', debugfile], universal_newlines=True).splitlines():
497 fields = line.split()
498 if len(fields) < 8:
499 continue
500 name = fields[0]
501 type = fields[1]
502 flags = fields[7]
503 # .debug_ sections will be removed by objcopy -S so no need to explicitly remove them
504 if name.startswith('.debug_'):
505 continue
506 if 'A' not in flags and type in ['PROGBITS', 'NOTE', 'NOBITS']:
507 remove_section_names.append(name)
508
509 # List dynamic symbols in the binary. We can exclude these from minidebuginfo
510 # because they are always present in the binary.
511 dynsyms = set()
512 for line in subprocess.check_output([nm, '-D', file, '--format=posix', '--defined-only'], universal_newlines=True).splitlines():
513 dynsyms.add(line.split()[0])
514
515 # Find all function symbols from debuginfo which aren't in the dynamic symbols table.
516 # These are the ones we want to keep in minidebuginfo.
517 keep_symbols_file = minidebugfile + '.symlist'
518 found_any_symbols = False
519 with open(keep_symbols_file, 'w') as f:
520 for line in subprocess.check_output([nm, debugfile, '--format=sysv', '--defined-only'], universal_newlines=True).splitlines():
521 fields = line.split('|')
522 if len(fields) < 7:
523 continue
524 name = fields[0].strip()
525 type = fields[3].strip()
526 if type == 'FUNC' and name not in dynsyms:
527 f.write('{}\n'.format(name))
528 found_any_symbols = True
529
530 if not found_any_symbols:
531 bb.debug(1, 'ELF file {} contains no symbols, skipping minidebuginfo injection'.format(file))
532 return
533
534 bb.utils.remove(minidebugfile)
535 bb.utils.remove(minidebugfile + '.xz')
536
537 subprocess.check_call([objcopy, '-S'] +
538 ['--remove-section={}'.format(s) for s in remove_section_names] +
539 ['--keep-symbols={}'.format(keep_symbols_file), debugfile, minidebugfile])
540
541 subprocess.check_call(['xz', '--keep', minidebugfile])
542
543 subprocess.check_call([objcopy, '--add-section', '.gnu_debugdata={}.xz'.format(minidebugfile), file])
544
545def copydebugsources(debugsrcdir, sources, d):
546 # The debug src information written out to sourcefile is further processed
547 # and copied to the destination here.
548
549 import stat
550 import subprocess
551
552 if debugsrcdir and sources:
553 sourcefile = d.expand("${WORKDIR}/debugsources.list")
554 bb.utils.remove(sourcefile)
555
556 # filenames are null-separated - this is an artefact of the previous use
557 # of rpm's debugedit, which was writing them out that way, and the code elsewhere
558 # is still assuming that.
559 debuglistoutput = '\0'.join(sources) + '\0'
560 with open(sourcefile, 'a') as sf:
561 sf.write(debuglistoutput)
562
563 dvar = d.getVar('PKGD')
564 strip = d.getVar("STRIP")
565 objcopy = d.getVar("OBJCOPY")
566 workdir = d.getVar("WORKDIR")
567 sdir = d.getVar("S")
568 sparentdir = os.path.dirname(os.path.dirname(sdir))
569 sbasedir = os.path.basename(os.path.dirname(sdir)) + "/" + os.path.basename(sdir)
570 workparentdir = os.path.dirname(os.path.dirname(workdir))
571 workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
572
573 # If S isnt based on WORKDIR we can infer our sources are located elsewhere,
574 # e.g. using externalsrc; use S as base for our dirs
575 if workdir in sdir or 'work-shared' in sdir:
576 basedir = workbasedir
577 parentdir = workparentdir
578 else:
579 basedir = sbasedir
580 parentdir = sparentdir
581
582 # If build path exists in sourcefile, it means toolchain did not use
583 # -fdebug-prefix-map to compile
584 if checkbuildpath(sourcefile, d):
585 localsrc_prefix = parentdir + "/"
586 else:
587 localsrc_prefix = "/usr/src/debug/"
588
589 nosuchdir = []
590 basepath = dvar
591 for p in debugsrcdir.split("/"):
592 basepath = basepath + "/" + p
593 if not cpath.exists(basepath):
594 nosuchdir.append(basepath)
595 bb.utils.mkdirhier(basepath)
596 cpath.updatecache(basepath)
597
598 # Ignore files from the recipe sysroots (target and native)
599 processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '((<internal>|<built-in>)$|/.*recipe-sysroot.*/)' | "
600 # We need to ignore files that are not actually ours
601 # we do this by only paying attention to items from this package
602 processdebugsrc += "fgrep -zw '%s' | "
603 # Remove prefix in the source paths
604 processdebugsrc += "sed 's#%s##g' | "
605 processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
606
607 cmd = processdebugsrc % (sourcefile, basedir, localsrc_prefix, parentdir, dvar, debugsrcdir)
608 try:
609 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
610 except subprocess.CalledProcessError:
611 # Can "fail" if internal headers/transient sources are attempted
612 pass
613
614 # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
615 # Work around this by manually finding and copying any symbolic links that made it through.
616 cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s')" % \
617 (dvar, debugsrcdir, dvar, debugsrcdir, parentdir, dvar, debugsrcdir)
618 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
619
620
621 # debugsources.list may be polluted from the host if we used externalsrc,
622 # cpio uses copy-pass and may have just created a directory structure
623 # matching the one from the host, if thats the case move those files to
624 # debugsrcdir to avoid host contamination.
625 # Empty dir structure will be deleted in the next step.
626
627 # Same check as above for externalsrc
628 if workdir not in sdir:
629 if os.path.exists(dvar + debugsrcdir + sdir):
630 cmd = "mv %s%s%s/* %s%s" % (dvar, debugsrcdir, sdir, dvar,debugsrcdir)
631 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
632
633 # The copy by cpio may have resulted in some empty directories! Remove these
634 cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
635 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
636
637 # Also remove debugsrcdir if its empty
638 for p in nosuchdir[::-1]:
639 if os.path.exists(p) and not os.listdir(p):
640 os.rmdir(p)
641
642#
643# Package data handling routines
644#
645
646def get_package_mapping (pkg, basepkg, d, depversions=None):
647 import oe.packagedata
648
649 data = oe.packagedata.read_subpkgdata(pkg, d)
650 key = "PKG:%s" % pkg
651
652 if key in data:
653 if bb.data.inherits_class('allarch', d) and bb.data.inherits_class('packagegroup', d) and pkg != data[key]:
654 bb.error("An allarch packagegroup shouldn't depend on packages which are dynamically renamed (%s to %s)" % (pkg, data[key]))
655 # Have to avoid undoing the write_extra_pkgs(global_variants...)
656 if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
657 and data[key] == basepkg:
658 return pkg
659 if depversions == []:
660 # Avoid returning a mapping if the renamed package rprovides its original name
661 rprovkey = "RPROVIDES:%s" % pkg
662 if rprovkey in data:
663 if pkg in bb.utils.explode_dep_versions2(data[rprovkey]):
664 bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg))
665 return pkg
666 # Do map to rewritten package name
667 return data[key]
668
669 return pkg
670
671def get_package_additional_metadata (pkg_type, d):
672 base_key = "PACKAGE_ADD_METADATA"
673 for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key):
674 if d.getVar(key, False) is None:
675 continue
676 d.setVarFlag(key, "type", "list")
677 if d.getVarFlag(key, "separator") is None:
678 d.setVarFlag(key, "separator", "\\n")
679 metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
680 return "\n".join(metadata_fields).strip()
681
682def runtime_mapping_rename (varname, pkg, d):
683 #bb.note("%s before: %s" % (varname, d.getVar(varname)))
684
685 new_depends = {}
686 deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "")
687 for depend, depversions in deps.items():
688 new_depend = get_package_mapping(depend, pkg, d, depversions)
689 if depend != new_depend:
690 bb.note("package name mapping done: %s -> %s" % (depend, new_depend))
691 new_depends[new_depend] = deps[depend]
692
693 d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
694
695 #bb.note("%s after: %s" % (varname, d.getVar(varname)))
696
697#
698# Used by do_packagedata (and possibly other routines post do_package)
699#
700
701PRSERV_ACTIVE = "${@bool(d.getVar("PRSERV_HOST"))}"
702PRSERV_ACTIVE[vardepvalue] = "${PRSERV_ACTIVE}"
703package_get_auto_pr[vardepsexclude] = "BB_TASKDEPDATA"
704package_get_auto_pr[vardeps] += "PRSERV_ACTIVE"
705python package_get_auto_pr() {
706 import oe.prservice
707
708 def get_do_package_hash(pn):
709 if d.getVar("BB_RUNTASK") != "do_package":
710 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
711 for dep in taskdepdata:
712 if taskdepdata[dep][1] == "do_package" and taskdepdata[dep][0] == pn:
713 return taskdepdata[dep][6]
714 return None
715
716 # Support per recipe PRSERV_HOST
717 pn = d.getVar('PN')
718 host = d.getVar("PRSERV_HOST_" + pn)
719 if not (host is None):
720 d.setVar("PRSERV_HOST", host)
721
722 pkgv = d.getVar("PKGV")
723
724 # PR Server not active, handle AUTOINC
725 if not d.getVar('PRSERV_HOST'):
726 d.setVar("PRSERV_PV_AUTOINC", "0")
727 return
728
729 auto_pr = None
730 pv = d.getVar("PV")
731 version = d.getVar("PRAUTOINX")
732 pkgarch = d.getVar("PACKAGE_ARCH")
733 checksum = get_do_package_hash(pn)
734
735 # If do_package isn't in the dependencies, we can't get the checksum...
736 if not checksum:
737 bb.warn('Task %s requested do_package unihash, but it was not available.' % d.getVar('BB_RUNTASK'))
738 #taskdepdata = d.getVar("BB_TASKDEPDATA", False)
739 #for dep in taskdepdata:
740 # bb.warn('%s:%s = %s' % (taskdepdata[dep][0], taskdepdata[dep][1], taskdepdata[dep][6]))
741 return
742
743 if d.getVar('PRSERV_LOCKDOWN'):
744 auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
745 if auto_pr is None:
746 bb.fatal("Can NOT get PRAUTO from lockdown exported file")
747 d.setVar('PRAUTO',str(auto_pr))
748 return
749
750 try:
751 conn = oe.prservice.prserv_make_conn(d)
752 if conn is not None:
753 if "AUTOINC" in pkgv:
754 srcpv = bb.fetch2.get_srcrev(d)
755 base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
756 value = conn.getPR(base_ver, pkgarch, srcpv)
757 d.setVar("PRSERV_PV_AUTOINC", str(value))
758
759 auto_pr = conn.getPR(version, pkgarch, checksum)
760 conn.close()
761 except Exception as e:
762 bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
763 if auto_pr is None:
764 bb.fatal("Can NOT get PRAUTO from remote PR service")
765 d.setVar('PRAUTO',str(auto_pr))
766}
767
768#
769# Package functions suitable for inclusion in PACKAGEFUNCS
770#
771
772python package_convert_pr_autoinc() {
773 pkgv = d.getVar("PKGV")
774
775 # Adjust pkgv as necessary...
776 if 'AUTOINC' in pkgv:
777 d.setVar("PKGV", pkgv.replace("AUTOINC", "${PRSERV_PV_AUTOINC}"))
778
779 # Change PRSERV_PV_AUTOINC and EXTENDPRAUTO usage to special values
780 d.setVar('PRSERV_PV_AUTOINC', '@PRSERV_PV_AUTOINC@')
781 d.setVar('EXTENDPRAUTO', '@EXTENDPRAUTO@')
782}
783
784LOCALEBASEPN ??= "${PN}"
785
786python package_do_split_locales() {
787 if (d.getVar('PACKAGE_NO_LOCALE') == '1'):
788 bb.debug(1, "package requested not splitting locales")
789 return
790
791 packages = (d.getVar('PACKAGES') or "").split()
792
793 datadir = d.getVar('datadir')
794 if not datadir:
795 bb.note("datadir not defined")
796 return
797
798 dvar = d.getVar('PKGD')
799 pn = d.getVar('LOCALEBASEPN')
800
801 if pn + '-locale' in packages:
802 packages.remove(pn + '-locale')
803
804 localedir = os.path.join(dvar + datadir, 'locale')
805
806 if not cpath.isdir(localedir):
807 bb.debug(1, "No locale files in this package")
808 return
809
810 locales = os.listdir(localedir)
811
812 summary = d.getVar('SUMMARY') or pn
813 description = d.getVar('DESCRIPTION') or ""
814 locale_section = d.getVar('LOCALE_SECTION')
815 mlprefix = d.getVar('MLPREFIX') or ""
816 for l in sorted(locales):
817 ln = legitimize_package_name(l)
818 pkg = pn + '-locale-' + ln
819 packages.append(pkg)
820 d.setVar('FILES:' + pkg, os.path.join(datadir, 'locale', l))
821 d.setVar('RRECOMMENDS:' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
822 d.setVar('RPROVIDES:' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
823 d.setVar('SUMMARY:' + pkg, '%s - %s translations' % (summary, l))
824 d.setVar('DESCRIPTION:' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
825 if locale_section:
826 d.setVar('SECTION:' + pkg, locale_section)
827
828 d.setVar('PACKAGES', ' '.join(packages))
829
830 # Disabled by RP 18/06/07
831 # Wildcards aren't supported in debian
832 # They break with ipkg since glibc-locale* will mean that
833 # glibc-localedata-translit* won't install as a dependency
834 # for some other package which breaks meta-toolchain
835 # Probably breaks since virtual-locale- isn't provided anywhere
836 #rdep = (d.getVar('RDEPENDS:%s' % pn) or "").split()
837 #rdep.append('%s-locale*' % pn)
838 #d.setVar('RDEPENDS:%s' % pn, ' '.join(rdep))
839}
840
841python perform_packagecopy () {
842 import subprocess
843 import shutil
844
845 dest = d.getVar('D')
846 dvar = d.getVar('PKGD')
847
848 # Start by package population by taking a copy of the installed
849 # files to operate on
850 # Preserve sparse files and hard links
851 cmd = 'tar --exclude=./sysroot-only -cf - -C %s -p -S . | tar -xf - -C %s' % (dest, dvar)
852 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
853
854 # replace RPATHs for the nativesdk binaries, to make them relocatable
855 if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
856 rpath_replace (dvar, d)
857}
858perform_packagecopy[cleandirs] = "${PKGD}"
859perform_packagecopy[dirs] = "${PKGD}"
860
861# We generate a master list of directories to process, we start by
862# seeding this list with reasonable defaults, then load from
863# the fs-perms.txt files
864python fixup_perms () {
865 import pwd, grp
866
867 # init using a string with the same format as a line as documented in
868 # the fs-perms.txt file
869 # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid>
870 # <path> link <link target>
871 #
872 # __str__ can be used to print out an entry in the input format
873 #
874 # if fs_perms_entry.path is None:
875 # an error occurred
876 # if fs_perms_entry.link, you can retrieve:
877 # fs_perms_entry.path = path
878 # fs_perms_entry.link = target of link
879 # if not fs_perms_entry.link, you can retrieve:
880 # fs_perms_entry.path = path
881 # fs_perms_entry.mode = expected dir mode or None
882 # fs_perms_entry.uid = expected uid or -1
883 # fs_perms_entry.gid = expected gid or -1
884 # fs_perms_entry.walk = 'true' or something else
885 # fs_perms_entry.fmode = expected file mode or None
886 # fs_perms_entry.fuid = expected file uid or -1
887 # fs_perms_entry_fgid = expected file gid or -1
888 class fs_perms_entry():
889 def __init__(self, line):
890 lsplit = line.split()
891 if len(lsplit) == 3 and lsplit[1].lower() == "link":
892 self._setlink(lsplit[0], lsplit[2])
893 elif len(lsplit) == 8:
894 self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
895 else:
896 msg = "Fixup Perms: invalid config line %s" % line
897 oe.qa.handle_error("perm-config", msg, d)
898 self.path = None
899 self.link = None
900
901 def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid):
902 self.path = os.path.normpath(path)
903 self.link = None
904 self.mode = self._procmode(mode)
905 self.uid = self._procuid(uid)
906 self.gid = self._procgid(gid)
907 self.walk = walk.lower()
908 self.fmode = self._procmode(fmode)
909 self.fuid = self._procuid(fuid)
910 self.fgid = self._procgid(fgid)
911
912 def _setlink(self, path, link):
913 self.path = os.path.normpath(path)
914 self.link = link
915
916 def _procmode(self, mode):
917 if not mode or (mode and mode == "-"):
918 return None
919 else:
920 return int(mode,8)
921
922 # Note uid/gid -1 has special significance in os.lchown
923 def _procuid(self, uid):
924 if uid is None or uid == "-":
925 return -1
926 elif uid.isdigit():
927 return int(uid)
928 else:
929 return pwd.getpwnam(uid).pw_uid
930
931 def _procgid(self, gid):
932 if gid is None or gid == "-":
933 return -1
934 elif gid.isdigit():
935 return int(gid)
936 else:
937 return grp.getgrnam(gid).gr_gid
938
939 # Use for debugging the entries
940 def __str__(self):
941 if self.link:
942 return "%s link %s" % (self.path, self.link)
943 else:
944 mode = "-"
945 if self.mode:
946 mode = "0%o" % self.mode
947 fmode = "-"
948 if self.fmode:
949 fmode = "0%o" % self.fmode
950 uid = self._mapugid(self.uid)
951 gid = self._mapugid(self.gid)
952 fuid = self._mapugid(self.fuid)
953 fgid = self._mapugid(self.fgid)
954 return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid)
955
956 def _mapugid(self, id):
957 if id is None or id == -1:
958 return "-"
959 else:
960 return "%d" % id
961
962 # Fix the permission, owner and group of path
963 def fix_perms(path, mode, uid, gid, dir):
964 if mode and not os.path.islink(path):
965 #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
966 os.chmod(path, mode)
967 # -1 is a special value that means don't change the uid/gid
968 # if they are BOTH -1, don't bother to lchown
969 if not (uid == -1 and gid == -1):
970 #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
971 os.lchown(path, uid, gid)
972
973 # Return a list of configuration files based on either the default
974 # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES
975 # paths are resolved via BBPATH
976 def get_fs_perms_list(d):
977 str = ""
978 bbpath = d.getVar('BBPATH')
979 fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES') or ""
980 for conf_file in fs_perms_tables.split():
981 confpath = bb.utils.which(bbpath, conf_file)
982 if confpath:
983 str += " %s" % bb.utils.which(bbpath, conf_file)
984 else:
985 bb.warn("cannot find %s specified in FILESYSTEM_PERMS_TABLES" % conf_file)
986 return str
987
988
989
990 dvar = d.getVar('PKGD')
991
992 fs_perms_table = {}
993 fs_link_table = {}
994
995 # By default all of the standard directories specified in
996 # bitbake.conf will get 0755 root:root.
997 target_path_vars = [ 'base_prefix',
998 'prefix',
999 'exec_prefix',
1000 'base_bindir',
1001 'base_sbindir',
1002 'base_libdir',
1003 'datadir',
1004 'sysconfdir',
1005 'servicedir',
1006 'sharedstatedir',
1007 'localstatedir',
1008 'infodir',
1009 'mandir',
1010 'docdir',
1011 'bindir',
1012 'sbindir',
1013 'libexecdir',
1014 'libdir',
1015 'includedir',
1016 'oldincludedir' ]
1017
1018 for path in target_path_vars:
1019 dir = d.getVar(path) or ""
1020 if dir == "":
1021 continue
1022 fs_perms_table[dir] = fs_perms_entry(d.expand("%s 0755 root root false - - -" % (dir)))
1023
1024 # Now we actually load from the configuration files
1025 for conf in get_fs_perms_list(d).split():
1026 if not os.path.exists(conf):
1027 continue
1028 with open(conf) as f:
1029 for line in f:
1030 if line.startswith('#'):
1031 continue
1032 lsplit = line.split()
1033 if len(lsplit) == 0:
1034 continue
1035 if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
1036 msg = "Fixup perms: %s invalid line: %s" % (conf, line)
1037 oe.qa.handle_error("perm-line", msg, d)
1038 continue
1039 entry = fs_perms_entry(d.expand(line))
1040 if entry and entry.path:
1041 if entry.link:
1042 fs_link_table[entry.path] = entry
1043 if entry.path in fs_perms_table:
1044 fs_perms_table.pop(entry.path)
1045 else:
1046 fs_perms_table[entry.path] = entry
1047 if entry.path in fs_link_table:
1048 fs_link_table.pop(entry.path)
1049
1050 # Debug -- list out in-memory table
1051 #for dir in fs_perms_table:
1052 # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
1053 #for link in fs_link_table:
1054 # bb.note("Fixup Perms: %s: %s" % (link, str(fs_link_table[link])))
1055
1056 # We process links first, so we can go back and fixup directory ownership
1057 # for any newly created directories
1058 # Process in sorted order so /run gets created before /run/lock, etc.
1059 for entry in sorted(fs_link_table.values(), key=lambda x: x.link):
1060 link = entry.link
1061 dir = entry.path
1062 origin = dvar + dir
1063 if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)):
1064 continue
1065
1066 if link[0] == "/":
1067 target = dvar + link
1068 ptarget = link
1069 else:
1070 target = os.path.join(os.path.dirname(origin), link)
1071 ptarget = os.path.join(os.path.dirname(dir), link)
1072 if os.path.exists(target):
1073 msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
1074 oe.qa.handle_error("perm-link", msg, d)
1075 continue
1076
1077 # Create path to move directory to, move it, and then setup the symlink
1078 bb.utils.mkdirhier(os.path.dirname(target))
1079 #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
1080 bb.utils.rename(origin, target)
1081 #bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
1082 os.symlink(link, origin)
1083
1084 for dir in fs_perms_table:
1085 origin = dvar + dir
1086 if not (cpath.exists(origin) and cpath.isdir(origin)):
1087 continue
1088
1089 fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
1090
1091 if fs_perms_table[dir].walk == 'true':
1092 for root, dirs, files in os.walk(origin):
1093 for dr in dirs:
1094 each_dir = os.path.join(root, dr)
1095 fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
1096 for f in files:
1097 each_file = os.path.join(root, f)
1098 fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
1099}
1100
1101def package_debug_vars(d):
1102 # We default to '.debug' style
1103 if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
1104 # Single debug-file-directory style debug info
1105 debug_vars = {
1106 "append": ".debug",
1107 "staticappend": "",
1108 "dir": "",
1109 "staticdir": "",
1110 "libdir": "/usr/lib/debug",
1111 "staticlibdir": "/usr/lib/debug-static",
1112 "srcdir": "/usr/src/debug",
1113 }
1114 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
1115 # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
1116 debug_vars = {
1117 "append": "",
1118 "staticappend": "",
1119 "dir": "/.debug",
1120 "staticdir": "/.debug-static",
1121 "libdir": "",
1122 "staticlibdir": "",
1123 "srcdir": "",
1124 }
1125 elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
1126 debug_vars = {
1127 "append": "",
1128 "staticappend": "",
1129 "dir": "/.debug",
1130 "staticdir": "/.debug-static",
1131 "libdir": "",
1132 "staticlibdir": "",
1133 "srcdir": "/usr/src/debug",
1134 }
1135 else:
1136 # Original OE-core, a.k.a. ".debug", style debug info
1137 debug_vars = {
1138 "append": "",
1139 "staticappend": "",
1140 "dir": "/.debug",
1141 "staticdir": "/.debug-static",
1142 "libdir": "",
1143 "staticlibdir": "",
1144 "srcdir": "/usr/src/debug",
1145 }
1146
1147 return debug_vars
1148
1149python split_and_strip_files () {
1150 import stat, errno
1151 import subprocess
1152
1153 dvar = d.getVar('PKGD')
1154 pn = d.getVar('PN')
1155 hostos = d.getVar('HOST_OS')
1156
1157 oldcwd = os.getcwd()
1158 os.chdir(dvar)
1159
1160 dv = package_debug_vars(d)
1161
1162 #
1163 # First lets figure out all of the files we may have to process ... do this only once!
1164 #
1165 elffiles = {}
1166 symlinks = {}
1167 staticlibs = []
1168 inodes = {}
1169 libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
1170 baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir"))
1171 skipfiles = (d.getVar("INHIBIT_PACKAGE_STRIP_FILES") or "").split()
1172 if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1' or \
1173 d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
1174 checkelf = {}
1175 checkelflinks = {}
1176 for root, dirs, files in cpath.walk(dvar):
1177 for f in files:
1178 file = os.path.join(root, f)
1179
1180 # Skip debug files
1181 if dv["append"] and file.endswith(dv["append"]):
1182 continue
1183 if dv["dir"] and dv["dir"] in os.path.dirname(file[len(dvar):]):
1184 continue
1185
1186 if file in skipfiles:
1187 continue
1188
1189 if oe.package.is_static_lib(file):
1190 staticlibs.append(file)
1191 continue
1192
1193 try:
1194 ltarget = cpath.realpath(file, dvar, False)
1195 s = cpath.lstat(ltarget)
1196 except OSError as e:
1197 (err, strerror) = e.args
1198 if err != errno.ENOENT:
1199 raise
1200 # Skip broken symlinks
1201 continue
1202 if not s:
1203 continue
1204 # Check its an executable
1205 if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) \
1206 or (s[stat.ST_MODE] & stat.S_IXOTH) \
1207 or ((file.startswith(libdir) or file.startswith(baselibdir)) \
1208 and (".so" in f or ".node" in f)) \
1209 or (f.startswith('vmlinux') or ".ko" in f):
1210
1211 if cpath.islink(file):
1212 checkelflinks[file] = ltarget
1213 continue
1214 # Use a reference of device ID and inode number to identify files
1215 file_reference = "%d_%d" % (s.st_dev, s.st_ino)
1216 checkelf[file] = (file, file_reference)
1217
1218 results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelflinks.values(), d)
1219 results_map = {}
1220 for (ltarget, elf_file) in results:
1221 results_map[ltarget] = elf_file
1222 for file in checkelflinks:
1223 ltarget = checkelflinks[file]
1224 # If it's a symlink, and points to an ELF file, we capture the readlink target
1225 if results_map[ltarget]:
1226 target = os.readlink(file)
1227 #bb.note("Sym: %s (%d)" % (ltarget, results_map[ltarget]))
1228 symlinks[file] = target
1229
1230 results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelf.keys(), d)
1231
1232 # Sort results by file path. This ensures that the files are always
1233 # processed in the same order, which is important to make sure builds
1234 # are reproducible when dealing with hardlinks
1235 results.sort(key=lambda x: x[0])
1236
1237 for (file, elf_file) in results:
1238 # It's a file (or hardlink), not a link
1239 # ...but is it ELF, and is it already stripped?
1240 if elf_file & 1:
1241 if elf_file & 2:
1242 if 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split():
1243 bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
1244 else:
1245 msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
1246 oe.qa.handle_error("already-stripped", msg, d)
1247 continue
1248
1249 # At this point we have an unstripped elf file. We need to:
1250 # a) Make sure any file we strip is not hardlinked to anything else outside this tree
1251 # b) Only strip any hardlinked file once (no races)
1252 # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
1253
1254 # Use a reference of device ID and inode number to identify files
1255 file_reference = checkelf[file][1]
1256 if file_reference in inodes:
1257 os.unlink(file)
1258 os.link(inodes[file_reference][0], file)
1259 inodes[file_reference].append(file)
1260 else:
1261 inodes[file_reference] = [file]
1262 # break hardlink
1263 bb.utils.break_hardlinks(file)
1264 elffiles[file] = elf_file
1265 # Modified the file so clear the cache
1266 cpath.updatecache(file)
1267
1268 def strip_pkgd_prefix(f):
1269 nonlocal dvar
1270
1271 if f.startswith(dvar):
1272 return f[len(dvar):]
1273
1274 return f
1275
1276 #
1277 # First lets process debug splitting
1278 #
1279 if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
1280 results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, dv, d))
1281
1282 if dv["srcdir"] and not hostos.startswith("mingw"):
1283 if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
1284 results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, dv, d))
1285 else:
1286 for file in staticlibs:
1287 results.append( (file,source_info(file, d)) )
1288
1289 d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results})
1290
1291 sources = set()
1292 for r in results:
1293 sources.update(r[1])
1294
1295 # Hardlink our debug symbols to the other hardlink copies
1296 for ref in inodes:
1297 if len(inodes[ref]) == 1:
1298 continue
1299
1300 target = inodes[ref][0][len(dvar):]
1301 for file in inodes[ref][1:]:
1302 src = file[len(dvar):]
1303 dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
1304 fpath = dvar + dest
1305 ftarget = dvar + dv["libdir"] + os.path.dirname(target) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
1306 bb.utils.mkdirhier(os.path.dirname(fpath))
1307 # Only one hardlink of separated debug info file in each directory
1308 if not os.access(fpath, os.R_OK):
1309 #bb.note("Link %s -> %s" % (fpath, ftarget))
1310 os.link(ftarget, fpath)
1311
1312 # Create symlinks for all cases we were able to split symbols
1313 for file in symlinks:
1314 src = file[len(dvar):]
1315 dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
1316 fpath = dvar + dest
1317 # Skip it if the target doesn't exist
1318 try:
1319 s = os.stat(fpath)
1320 except OSError as e:
1321 (err, strerror) = e.args
1322 if err != errno.ENOENT:
1323 raise
1324 continue
1325
1326 ltarget = symlinks[file]
1327 lpath = os.path.dirname(ltarget)
1328 lbase = os.path.basename(ltarget)
1329 ftarget = ""
1330 if lpath and lpath != ".":
1331 ftarget += lpath + dv["dir"] + "/"
1332 ftarget += lbase + dv["append"]
1333 if lpath.startswith(".."):
1334 ftarget = os.path.join("..", ftarget)
1335 bb.utils.mkdirhier(os.path.dirname(fpath))
1336 #bb.note("Symlink %s -> %s" % (fpath, ftarget))
1337 os.symlink(ftarget, fpath)
1338
1339 # Process the dv["srcdir"] if requested...
1340 # This copies and places the referenced sources for later debugging...
1341 copydebugsources(dv["srcdir"], sources, d)
1342 #
1343 # End of debug splitting
1344 #
1345
1346 #
1347 # Now lets go back over things and strip them
1348 #
1349 if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1'):
1350 strip = d.getVar("STRIP")
1351 sfiles = []
1352 for file in elffiles:
1353 elf_file = int(elffiles[file])
1354 #bb.note("Strip %s" % file)
1355 sfiles.append((file, elf_file, strip))
1356 if (d.getVar('PACKAGE_STRIP_STATIC') == '1' or d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
1357 for f in staticlibs:
1358 sfiles.append((f, 16, strip))
1359
1360 oe.utils.multiprocess_launch(oe.package.runstrip, sfiles, d)
1361
1362 # Build "minidebuginfo" and reinject it back into the stripped binaries
1363 if d.getVar('PACKAGE_MINIDEBUGINFO') == '1':
1364 oe.utils.multiprocess_launch(inject_minidebuginfo, list(elffiles), d,
1365 extraargs=(dvar, dv, d))
1366
1367 #
1368 # End of strip
1369 #
1370 os.chdir(oldcwd)
1371}
1372
1373python populate_packages () {
1374 import glob, re
1375
1376 workdir = d.getVar('WORKDIR')
1377 outdir = d.getVar('DEPLOY_DIR')
1378 dvar = d.getVar('PKGD')
1379 packages = d.getVar('PACKAGES').split()
1380 pn = d.getVar('PN')
1381
1382 bb.utils.mkdirhier(outdir)
1383 os.chdir(dvar)
1384
1385 autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG") or False)
1386
1387 split_source_package = (d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg')
1388
1389 # If debug-with-srcpkg mode is enabled then add the source package if it
1390 # doesn't exist and add the source file contents to the source package.
1391 if split_source_package:
1392 src_package_name = ('%s-src' % d.getVar('PN'))
1393 if not src_package_name in packages:
1394 packages.append(src_package_name)
1395 d.setVar('FILES:%s' % src_package_name, '/usr/src/debug')
1396
1397 # Sanity check PACKAGES for duplicates
1398 # Sanity should be moved to sanity.bbclass once we have the infrastructure
1399 package_dict = {}
1400
1401 for i, pkg in enumerate(packages):
1402 if pkg in package_dict:
1403 msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
1404 oe.qa.handle_error("packages-list", msg, d)
1405 # Ensure the source package gets the chance to pick up the source files
1406 # before the debug package by ordering it first in PACKAGES. Whether it
1407 # actually picks up any source files is controlled by
1408 # PACKAGE_DEBUG_SPLIT_STYLE.
1409 elif pkg.endswith("-src"):
1410 package_dict[pkg] = (10, i)
1411 elif autodebug and pkg.endswith("-dbg"):
1412 package_dict[pkg] = (30, i)
1413 else:
1414 package_dict[pkg] = (50, i)
1415 packages = sorted(package_dict.keys(), key=package_dict.get)
1416 d.setVar('PACKAGES', ' '.join(packages))
1417 pkgdest = d.getVar('PKGDEST')
1418
1419 seen = []
1420
1421 # os.mkdir masks the permissions with umask so we have to unset it first
1422 oldumask = os.umask(0)
1423
1424 debug = []
1425 for root, dirs, files in cpath.walk(dvar):
1426 dir = root[len(dvar):]
1427 if not dir:
1428 dir = os.sep
1429 for f in (files + dirs):
1430 path = "." + os.path.join(dir, f)
1431 if "/.debug/" in path or "/.debug-static/" in path or path.endswith("/.debug"):
1432 debug.append(path)
1433
1434 for pkg in packages:
1435 root = os.path.join(pkgdest, pkg)
1436 bb.utils.mkdirhier(root)
1437
1438 filesvar = d.getVar('FILES:%s' % pkg) or ""
1439 if "//" in filesvar:
1440 msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
1441 oe.qa.handle_error("files-invalid", msg, d)
1442 filesvar.replace("//", "/")
1443
1444 origfiles = filesvar.split()
1445 files, symlink_paths = files_from_filevars(origfiles)
1446
1447 if autodebug and pkg.endswith("-dbg"):
1448 files.extend(debug)
1449
1450 for file in files:
1451 if (not cpath.islink(file)) and (not cpath.exists(file)):
1452 continue
1453 if file in seen:
1454 continue
1455 seen.append(file)
1456
1457 def mkdir(src, dest, p):
1458 src = os.path.join(src, p)
1459 dest = os.path.join(dest, p)
1460 fstat = cpath.stat(src)
1461 os.mkdir(dest)
1462 os.chmod(dest, fstat.st_mode)
1463 os.chown(dest, fstat.st_uid, fstat.st_gid)
1464 if p not in seen:
1465 seen.append(p)
1466 cpath.updatecache(dest)
1467
1468 def mkdir_recurse(src, dest, paths):
1469 if cpath.exists(dest + '/' + paths):
1470 return
1471 while paths.startswith("./"):
1472 paths = paths[2:]
1473 p = "."
1474 for c in paths.split("/"):
1475 p = os.path.join(p, c)
1476 if not cpath.exists(os.path.join(dest, p)):
1477 mkdir(src, dest, p)
1478
1479 if cpath.isdir(file) and not cpath.islink(file):
1480 mkdir_recurse(dvar, root, file)
1481 continue
1482
1483 mkdir_recurse(dvar, root, os.path.dirname(file))
1484 fpath = os.path.join(root,file)
1485 if not cpath.islink(file):
1486 os.link(file, fpath)
1487 continue
1488 ret = bb.utils.copyfile(file, fpath)
1489 if ret is False or ret == 0:
1490 bb.fatal("File population failed")
1491
1492 # Check if symlink paths exist
1493 for file in symlink_paths:
1494 if not os.path.exists(os.path.join(root,file)):
1495 bb.fatal("File '%s' cannot be packaged into '%s' because its "
1496 "parent directory structure does not exist. One of "
1497 "its parent directories is a symlink whose target "
1498 "directory is not included in the package." %
1499 (file, pkg))
1500
1501 os.umask(oldumask)
1502 os.chdir(workdir)
1503
1504 # Handle excluding packages with incompatible licenses
1505 package_list = []
1506 for pkg in packages:
1507 licenses = d.getVar('_exclude_incompatible-' + pkg)
1508 if licenses:
1509 msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses)
1510 oe.qa.handle_error("incompatible-license", msg, d)
1511 else:
1512 package_list.append(pkg)
1513 d.setVar('PACKAGES', ' '.join(package_list))
1514
1515 unshipped = []
1516 for root, dirs, files in cpath.walk(dvar):
1517 dir = root[len(dvar):]
1518 if not dir:
1519 dir = os.sep
1520 for f in (files + dirs):
1521 path = os.path.join(dir, f)
1522 if ('.' + path) not in seen:
1523 unshipped.append(path)
1524
1525 if unshipped != []:
1526 msg = pn + ": Files/directories were installed but not shipped in any package:"
1527 if "installed-vs-shipped" in (d.getVar('INSANE_SKIP:' + pn) or "").split():
1528 bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
1529 else:
1530 for f in unshipped:
1531 msg = msg + "\n " + f
1532 msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n"
1533 msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped))
1534 oe.qa.handle_error("installed-vs-shipped", msg, d)
1535}
1536populate_packages[dirs] = "${D}"
1537
1538python package_fixsymlinks () {
1539 import errno
1540 pkgdest = d.getVar('PKGDEST')
1541 packages = d.getVar("PACKAGES", False).split()
1542
1543 dangling_links = {}
1544 pkg_files = {}
1545 for pkg in packages:
1546 dangling_links[pkg] = []
1547 pkg_files[pkg] = []
1548 inst_root = os.path.join(pkgdest, pkg)
1549 for path in pkgfiles[pkg]:
1550 rpath = path[len(inst_root):]
1551 pkg_files[pkg].append(rpath)
1552 rtarget = cpath.realpath(path, inst_root, True, assume_dir = True)
1553 if not cpath.lexists(rtarget):
1554 dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):]))
1555
1556 newrdepends = {}
1557 for pkg in dangling_links:
1558 for l in dangling_links[pkg]:
1559 found = False
1560 bb.debug(1, "%s contains dangling link %s" % (pkg, l))
1561 for p in packages:
1562 if l in pkg_files[p]:
1563 found = True
1564 bb.debug(1, "target found in %s" % p)
1565 if p == pkg:
1566 break
1567 if pkg not in newrdepends:
1568 newrdepends[pkg] = []
1569 newrdepends[pkg].append(p)
1570 break
1571 if found == False:
1572 bb.note("%s contains dangling symlink to %s" % (pkg, l))
1573
1574 for pkg in newrdepends:
1575 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
1576 for p in newrdepends[pkg]:
1577 if p not in rdepends:
1578 rdepends[p] = []
1579 d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
1580}
1581
1582
1583python package_package_name_hook() {
1584 """
1585 A package_name_hook function can be used to rewrite the package names by
1586 changing PKG. For an example, see debian.bbclass.
1587 """
1588 pass
1589}
1590
1591EXPORT_FUNCTIONS package_name_hook
1592
1593
1594PKGDESTWORK = "${WORKDIR}/pkgdata"
1595
1596PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS RPROVIDES RRECOMMENDS RSUGGESTS RREPLACES RCONFLICTS SECTION PKG ALLOW_EMPTY FILES CONFFILES FILES_INFO PACKAGE_ADD_METADATA pkg_postinst pkg_postrm pkg_preinst pkg_prerm"
1597
1598python emit_pkgdata() {
1599 from glob import glob
1600 import json
1601 import bb.compress.zstd
1602
1603 def process_postinst_on_target(pkg, mlprefix):
1604 pkgval = d.getVar('PKG:%s' % pkg)
1605 if pkgval is None:
1606 pkgval = pkg
1607
1608 defer_fragment = """
1609if [ -n "$D" ]; then
1610 $INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
1611 exit 0
1612fi
1613""" % (pkgval, mlprefix)
1614
1615 postinst = d.getVar('pkg_postinst:%s' % pkg)
1616 postinst_ontarget = d.getVar('pkg_postinst_ontarget:%s' % pkg)
1617
1618 if postinst_ontarget:
1619 bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
1620 if not postinst:
1621 postinst = '#!/bin/sh\n'
1622 postinst += defer_fragment
1623 postinst += postinst_ontarget
1624 d.setVar('pkg_postinst:%s' % pkg, postinst)
1625
1626 def add_set_e_to_scriptlets(pkg):
1627 for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
1628 scriptlet = d.getVar('%s:%s' % (scriptlet_name, pkg))
1629 if scriptlet:
1630 scriptlet_split = scriptlet.split('\n')
1631 if scriptlet_split[0].startswith("#!"):
1632 scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
1633 else:
1634 scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
1635 d.setVar('%s:%s' % (scriptlet_name, pkg), scriptlet)
1636
1637 def write_if_exists(f, pkg, var):
1638 def encode(str):
1639 import codecs
1640 c = codecs.getencoder("unicode_escape")
1641 return c(str)[0].decode("latin1")
1642
1643 val = d.getVar('%s:%s' % (var, pkg))
1644 if val:
1645 f.write('%s:%s: %s\n' % (var, pkg, encode(val)))
1646 return val
1647 val = d.getVar('%s' % (var))
1648 if val:
1649 f.write('%s: %s\n' % (var, encode(val)))
1650 return val
1651
1652 def write_extra_pkgs(variants, pn, packages, pkgdatadir):
1653 for variant in variants:
1654 with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
1655 fd.write("PACKAGES: %s\n" % ' '.join(
1656 map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
1657
1658 def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
1659 for variant in variants:
1660 for pkg in packages.split():
1661 ml_pkg = "%s-%s" % (variant, pkg)
1662 subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
1663 with open(subdata_file, 'w') as fd:
1664 fd.write("PKG:%s: %s" % (ml_pkg, pkg))
1665
1666 packages = d.getVar('PACKAGES')
1667 pkgdest = d.getVar('PKGDEST')
1668 pkgdatadir = d.getVar('PKGDESTWORK')
1669
1670 data_file = pkgdatadir + d.expand("/${PN}")
1671 with open(data_file, 'w') as fd:
1672 fd.write("PACKAGES: %s\n" % packages)
1673
1674 pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
1675
1676 pn = d.getVar('PN')
1677 global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
1678 variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
1679
1680 if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
1681 write_extra_pkgs(variants, pn, packages, pkgdatadir)
1682
1683 if bb.data.inherits_class('allarch', d) and not variants \
1684 and not bb.data.inherits_class('packagegroup', d):
1685 write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
1686
1687 workdir = d.getVar('WORKDIR')
1688
1689 for pkg in packages.split():
1690 pkgval = d.getVar('PKG:%s' % pkg)
1691 if pkgval is None:
1692 pkgval = pkg
1693 d.setVar('PKG:%s' % pkg, pkg)
1694
1695 extended_data = {
1696 "files_info": {}
1697 }
1698
1699 pkgdestpkg = os.path.join(pkgdest, pkg)
1700 files = {}
1701 files_extra = {}
1702 total_size = 0
1703 seen = set()
1704 for f in pkgfiles[pkg]:
1705 fpath = os.sep + os.path.relpath(f, pkgdestpkg)
1706
1707 fstat = os.lstat(f)
1708 files[fpath] = fstat.st_size
1709
1710 extended_data["files_info"].setdefault(fpath, {})
1711 extended_data["files_info"][fpath]['size'] = fstat.st_size
1712
1713 if fstat.st_ino not in seen:
1714 seen.add(fstat.st_ino)
1715 total_size += fstat.st_size
1716
1717 if fpath in pkgdebugsource:
1718 extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
1719 del pkgdebugsource[fpath]
1720
1721 d.setVar('FILES_INFO:' + pkg , json.dumps(files, sort_keys=True))
1722
1723 process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
1724 add_set_e_to_scriptlets(pkg)
1725
1726 subdata_file = pkgdatadir + "/runtime/%s" % pkg
1727 with open(subdata_file, 'w') as sf:
1728 for var in (d.getVar('PKGDATA_VARS') or "").split():
1729 val = write_if_exists(sf, pkg, var)
1730
1731 write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
1732 for dfile in sorted((d.getVar('FILERPROVIDESFLIST:' + pkg) or "").split()):
1733 write_if_exists(sf, pkg, 'FILERPROVIDES:' + dfile)
1734
1735 write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
1736 for dfile in sorted((d.getVar('FILERDEPENDSFLIST:' + pkg) or "").split()):
1737 write_if_exists(sf, pkg, 'FILERDEPENDS:' + dfile)
1738
1739 sf.write('%s:%s: %d\n' % ('PKGSIZE', pkg, total_size))
1740
1741 subdata_extended_file = pkgdatadir + "/extended/%s.json.zstd" % pkg
1742 num_threads = int(d.getVar("BB_NUMBER_THREADS"))
1743 with bb.compress.zstd.open(subdata_extended_file, "wt", encoding="utf-8", num_threads=num_threads) as f:
1744 json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
1745
1746 # Symlinks needed for rprovides lookup
1747 rprov = d.getVar('RPROVIDES:%s' % pkg) or d.getVar('RPROVIDES')
1748 if rprov:
1749 for p in bb.utils.explode_deps(rprov):
1750 subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
1751 bb.utils.mkdirhier(os.path.dirname(subdata_sym))
1752 oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
1753
1754 allow_empty = d.getVar('ALLOW_EMPTY:%s' % pkg)
1755 if not allow_empty:
1756 allow_empty = d.getVar('ALLOW_EMPTY')
1757 root = "%s/%s" % (pkgdest, pkg)
1758 os.chdir(root)
1759 g = glob('*')
1760 if g or allow_empty == "1":
1761 # Symlinks needed for reverse lookups (from the final package name)
1762 subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
1763 oe.path.symlink("../runtime/%s" % pkg, subdata_sym, True)
1764
1765 packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
1766 open(packagedfile, 'w').close()
1767
1768 if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
1769 write_extra_runtime_pkgs(variants, packages, pkgdatadir)
1770
1771 if bb.data.inherits_class('allarch', d) and not variants \
1772 and not bb.data.inherits_class('packagegroup', d):
1773 write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
1774
1775}
1776emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides ${PKGDESTWORK}/extended"
1777emit_pkgdata[vardepsexclude] = "BB_NUMBER_THREADS"
1778
1779ldconfig_postinst_fragment() {
1780if [ x"$D" = "x" ]; then
1781 if [ -x /sbin/ldconfig ]; then /sbin/ldconfig ; fi
1782fi
1783}
1784
1785RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps --define '__font_provides %{nil}'"
1786
1787# Collect perfile run-time dependency metadata
1788# Output:
1789# FILERPROVIDESFLIST:pkg - list of all files w/ deps
1790# FILERPROVIDES:filepath:pkg - per file dep
1791#
1792# FILERDEPENDSFLIST:pkg - list of all files w/ deps
1793# FILERDEPENDS:filepath:pkg - per file dep
1794
1795python package_do_filedeps() {
1796 if d.getVar('SKIP_FILEDEPS') == '1':
1797 return
1798
1799 pkgdest = d.getVar('PKGDEST')
1800 packages = d.getVar('PACKAGES')
1801 rpmdeps = d.getVar('RPMDEPS')
1802
1803 def chunks(files, n):
1804 return [files[i:i+n] for i in range(0, len(files), n)]
1805
1806 pkglist = []
1807 for pkg in packages.split():
1808 if d.getVar('SKIP_FILEDEPS:' + pkg) == '1':
1809 continue
1810 if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'):
1811 continue
1812 for files in chunks(pkgfiles[pkg], 100):
1813 pkglist.append((pkg, files, rpmdeps, pkgdest))
1814
1815 processed = oe.utils.multiprocess_launch(oe.package.filedeprunner, pkglist, d)
1816
1817 provides_files = {}
1818 requires_files = {}
1819
1820 for result in processed:
1821 (pkg, provides, requires) = result
1822
1823 if pkg not in provides_files:
1824 provides_files[pkg] = []
1825 if pkg not in requires_files:
1826 requires_files[pkg] = []
1827
1828 for file in sorted(provides):
1829 provides_files[pkg].append(file)
1830 key = "FILERPROVIDES:" + file + ":" + pkg
1831 d.appendVar(key, " " + " ".join(provides[file]))
1832
1833 for file in sorted(requires):
1834 requires_files[pkg].append(file)
1835 key = "FILERDEPENDS:" + file + ":" + pkg
1836 d.appendVar(key, " " + " ".join(requires[file]))
1837
1838 for pkg in requires_files:
1839 d.setVar("FILERDEPENDSFLIST:" + pkg, " ".join(sorted(requires_files[pkg])))
1840 for pkg in provides_files:
1841 d.setVar("FILERPROVIDESFLIST:" + pkg, " ".join(sorted(provides_files[pkg])))
1842}
1843
1844SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
1845SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
1846
1847python package_do_shlibs() {
1848 import itertools
1849 import re, pipes
1850 import subprocess
1851
1852 exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', False)
1853 if exclude_shlibs:
1854 bb.note("not generating shlibs")
1855 return
1856
1857 lib_re = re.compile(r"^.*\.so")
1858 libdir_re = re.compile(r".*/%s$" % d.getVar('baselib'))
1859
1860 packages = d.getVar('PACKAGES')
1861
1862 shlib_pkgs = []
1863 exclusion_list = d.getVar("EXCLUDE_PACKAGES_FROM_SHLIBS")
1864 if exclusion_list:
1865 for pkg in packages.split():
1866 if pkg not in exclusion_list.split():
1867 shlib_pkgs.append(pkg)
1868 else:
1869 bb.note("not generating shlibs for %s" % pkg)
1870 else:
1871 shlib_pkgs = packages.split()
1872
1873 hostos = d.getVar('HOST_OS')
1874
1875 workdir = d.getVar('WORKDIR')
1876
1877 ver = d.getVar('PKGV')
1878 if not ver:
1879 msg = "PKGV not defined"
1880 oe.qa.handle_error("pkgv-undefined", msg, d)
1881 return
1882
1883 pkgdest = d.getVar('PKGDEST')
1884
1885 shlibswork_dir = d.getVar('SHLIBSWORKDIR')
1886
1887 def linux_so(file, pkg, pkgver, d):
1888 needs_ldconfig = False
1889 needed = set()
1890 sonames = set()
1891 renames = []
1892 ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
1893 cmd = d.getVar('OBJDUMP') + " -p " + pipes.quote(file) + " 2>/dev/null"
1894 fd = os.popen(cmd)
1895 lines = fd.readlines()
1896 fd.close()
1897 rpath = tuple()
1898 for l in lines:
1899 m = re.match(r"\s+RPATH\s+([^\s]*)", l)
1900 if m:
1901 rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
1902 rpath = tuple(map(os.path.normpath, rpaths))
1903 for l in lines:
1904 m = re.match(r"\s+NEEDED\s+([^\s]*)", l)
1905 if m:
1906 dep = m.group(1)
1907 if dep not in needed:
1908 needed.add((dep, file, rpath))
1909 m = re.match(r"\s+SONAME\s+([^\s]*)", l)
1910 if m:
1911 this_soname = m.group(1)
1912 prov = (this_soname, ldir, pkgver)
1913 if not prov in sonames:
1914 # if library is private (only used by package) then do not build shlib for it
1915 import fnmatch
1916 if not private_libs or len([i for i in private_libs if fnmatch.fnmatch(this_soname, i)]) == 0:
1917 sonames.add(prov)
1918 if libdir_re.match(os.path.dirname(file)):
1919 needs_ldconfig = True
1920 if needs_ldconfig and snap_symlinks and (os.path.basename(file) != this_soname):
1921 renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
1922 return (needs_ldconfig, needed, sonames, renames)
1923
1924 def darwin_so(file, needed, sonames, renames, pkgver):
1925 if not os.path.exists(file):
1926 return
1927 ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
1928
1929 def get_combinations(base):
1930 #
1931 # Given a base library name, find all combinations of this split by "." and "-"
1932 #
1933 combos = []
1934 options = base.split(".")
1935 for i in range(1, len(options) + 1):
1936 combos.append(".".join(options[0:i]))
1937 options = base.split("-")
1938 for i in range(1, len(options) + 1):
1939 combos.append("-".join(options[0:i]))
1940 return combos
1941
1942 if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg') and not pkg.endswith('-src'):
1943 # Drop suffix
1944 name = os.path.basename(file).rsplit(".",1)[0]
1945 # Find all combinations
1946 combos = get_combinations(name)
1947 for combo in combos:
1948 if not combo in sonames:
1949 prov = (combo, ldir, pkgver)
1950 sonames.add(prov)
1951 if file.endswith('.dylib') or file.endswith('.so'):
1952 rpath = []
1953 p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1954 out, err = p.communicate()
1955 # If returned successfully, process stdout for results
1956 if p.returncode == 0:
1957 for l in out.split("\n"):
1958 l = l.strip()
1959 if l.startswith('path '):
1960 rpath.append(l.split()[1])
1961
1962 p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1963 out, err = p.communicate()
1964 # If returned successfully, process stdout for results
1965 if p.returncode == 0:
1966 for l in out.split("\n"):
1967 l = l.strip()
1968 if not l or l.endswith(":"):
1969 continue
1970 if "is not an object file" in l:
1971 continue
1972 name = os.path.basename(l.split()[0]).rsplit(".", 1)[0]
1973 if name and name not in needed[pkg]:
1974 needed[pkg].add((name, file, tuple()))
1975
1976 def mingw_dll(file, needed, sonames, renames, pkgver):
1977 if not os.path.exists(file):
1978 return
1979
1980 if file.endswith(".dll"):
1981 # assume all dlls are shared objects provided by the package
1982 sonames.add((os.path.basename(file), os.path.dirname(file).replace(pkgdest + "/" + pkg, ''), pkgver))
1983
1984 if (file.endswith(".dll") or file.endswith(".exe")):
1985 # use objdump to search for "DLL Name: .*\.dll"
1986 p = subprocess.Popen([d.expand("${HOST_PREFIX}objdump"), "-p", file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1987 out, err = p.communicate()
1988 # process the output, grabbing all .dll names
1989 if p.returncode == 0:
1990 for m in re.finditer(r"DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE):
1991 dllname = m.group(1)
1992 if dllname:
1993 needed[pkg].add((dllname, file, tuple()))
1994
1995 if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS') == "1":
1996 snap_symlinks = True
1997 else:
1998 snap_symlinks = False
1999
2000 needed = {}
2001
2002 shlib_provider = oe.package.read_shlib_providers(d)
2003
2004 for pkg in shlib_pkgs:
2005 private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
2006 private_libs = private_libs.split()
2007 needs_ldconfig = False
2008 bb.debug(2, "calculating shlib provides for %s" % pkg)
2009
2010 pkgver = d.getVar('PKGV:' + pkg)
2011 if not pkgver:
2012 pkgver = d.getVar('PV_' + pkg)
2013 if not pkgver:
2014 pkgver = ver
2015
2016 needed[pkg] = set()
2017 sonames = set()
2018 renames = []
2019 linuxlist = []
2020 for file in pkgfiles[pkg]:
2021 soname = None
2022 if cpath.islink(file):
2023 continue
2024 if hostos == "darwin" or hostos == "darwin8":
2025 darwin_so(file, needed, sonames, renames, pkgver)
2026 elif hostos.startswith("mingw"):
2027 mingw_dll(file, needed, sonames, renames, pkgver)
2028 elif os.access(file, os.X_OK) or lib_re.match(file):
2029 linuxlist.append(file)
2030
2031 if linuxlist:
2032 results = oe.utils.multiprocess_launch(linux_so, linuxlist, d, extraargs=(pkg, pkgver, d))
2033 for r in results:
2034 ldconfig = r[0]
2035 needed[pkg] |= r[1]
2036 sonames |= r[2]
2037 renames.extend(r[3])
2038 needs_ldconfig = needs_ldconfig or ldconfig
2039
2040 for (old, new) in renames:
2041 bb.note("Renaming %s to %s" % (old, new))
2042 bb.utils.rename(old, new)
2043 pkgfiles[pkg].remove(old)
2044
2045 shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
2046 if len(sonames):
2047 with open(shlibs_file, 'w') as fd:
2048 for s in sorted(sonames):
2049 if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
2050 (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
2051 if old_pkg != pkg:
2052 bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
2053 bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
2054 fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
2055 if s[0] not in shlib_provider:
2056 shlib_provider[s[0]] = {}
2057 shlib_provider[s[0]][s[1]] = (pkg, pkgver)
2058 if needs_ldconfig:
2059 bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
2060 postinst = d.getVar('pkg_postinst:%s' % pkg)
2061 if not postinst:
2062 postinst = '#!/bin/sh\n'
2063 postinst += d.getVar('ldconfig_postinst_fragment')
2064 d.setVar('pkg_postinst:%s' % pkg, postinst)
2065 bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
2066
2067 assumed_libs = d.getVar('ASSUME_SHLIBS')
2068 if assumed_libs:
2069 libdir = d.getVar("libdir")
2070 for e in assumed_libs.split():
2071 l, dep_pkg = e.split(":")
2072 lib_ver = None
2073 dep_pkg = dep_pkg.rsplit("_", 1)
2074 if len(dep_pkg) == 2:
2075 lib_ver = dep_pkg[1]
2076 dep_pkg = dep_pkg[0]
2077 if l not in shlib_provider:
2078 shlib_provider[l] = {}
2079 shlib_provider[l][libdir] = (dep_pkg, lib_ver)
2080
2081 libsearchpath = [d.getVar('libdir'), d.getVar('base_libdir')]
2082
2083 for pkg in shlib_pkgs:
2084 bb.debug(2, "calculating shlib requirements for %s" % pkg)
2085
2086 private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
2087 private_libs = private_libs.split()
2088
2089 deps = list()
2090 for n in needed[pkg]:
2091 # if n is in private libraries, don't try to search provider for it
2092 # this could cause problem in case some abc.bb provides private
2093 # /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
2094 # but skipping it is still better alternative than providing own
2095 # version and then adding runtime dependency for the same system library
2096 import fnmatch
2097 if private_libs and len([i for i in private_libs if fnmatch.fnmatch(n[0], i)]) > 0:
2098 bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
2099 continue
2100 if n[0] in shlib_provider.keys():
2101 shlib_provider_map = shlib_provider[n[0]]
2102 matches = set()
2103 for p in itertools.chain(list(n[2]), sorted(shlib_provider_map.keys()), libsearchpath):
2104 if p in shlib_provider_map:
2105 matches.add(p)
2106 if len(matches) > 1:
2107 matchpkgs = ', '.join([shlib_provider_map[match][0] for match in matches])
2108 bb.error("%s: Multiple shlib providers for %s: %s (used by files: %s)" % (pkg, n[0], matchpkgs, n[1]))
2109 elif len(matches) == 1:
2110 (dep_pkg, ver_needed) = shlib_provider_map[matches.pop()]
2111
2112 bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
2113
2114 if dep_pkg == pkg:
2115 continue
2116
2117 if ver_needed:
2118 dep = "%s (>= %s)" % (dep_pkg, ver_needed)
2119 else:
2120 dep = dep_pkg
2121 if not dep in deps:
2122 deps.append(dep)
2123 continue
2124 bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n[0], n[1]))
2125
2126 deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
2127 if os.path.exists(deps_file):
2128 os.remove(deps_file)
2129 if deps:
2130 with open(deps_file, 'w') as fd:
2131 for dep in sorted(deps):
2132 fd.write(dep + '\n')
2133}
2134
2135python package_do_pkgconfig () {
2136 import re
2137
2138 packages = d.getVar('PACKAGES')
2139 workdir = d.getVar('WORKDIR')
2140 pkgdest = d.getVar('PKGDEST')
2141
2142 shlibs_dirs = d.getVar('SHLIBSDIRS').split()
2143 shlibswork_dir = d.getVar('SHLIBSWORKDIR')
2144
2145 pc_re = re.compile(r'(.*)\.pc$')
2146 var_re = re.compile(r'(.*)=(.*)')
2147 field_re = re.compile(r'(.*): (.*)')
2148
2149 pkgconfig_provided = {}
2150 pkgconfig_needed = {}
2151 for pkg in packages.split():
2152 pkgconfig_provided[pkg] = []
2153 pkgconfig_needed[pkg] = []
2154 for file in sorted(pkgfiles[pkg]):
2155 m = pc_re.match(file)
2156 if m:
2157 pd = bb.data.init()
2158 name = m.group(1)
2159 pkgconfig_provided[pkg].append(os.path.basename(name))
2160 if not os.access(file, os.R_OK):
2161 continue
2162 with open(file, 'r') as f:
2163 lines = f.readlines()
2164 for l in lines:
2165 m = var_re.match(l)
2166 if m:
2167 name = m.group(1)
2168 val = m.group(2)
2169 pd.setVar(name, pd.expand(val))
2170 continue
2171 m = field_re.match(l)
2172 if m:
2173 hdr = m.group(1)
2174 exp = pd.expand(m.group(2))
2175 if hdr == 'Requires':
2176 pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
2177
2178 for pkg in packages.split():
2179 pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
2180 if pkgconfig_provided[pkg] != []:
2181 with open(pkgs_file, 'w') as f:
2182 for p in sorted(pkgconfig_provided[pkg]):
2183 f.write('%s\n' % p)
2184
2185 # Go from least to most specific since the last one found wins
2186 for dir in reversed(shlibs_dirs):
2187 if not os.path.exists(dir):
2188 continue
2189 for file in sorted(os.listdir(dir)):
2190 m = re.match(r'^(.*)\.pclist$', file)
2191 if m:
2192 pkg = m.group(1)
2193 with open(os.path.join(dir, file)) as fd:
2194 lines = fd.readlines()
2195 pkgconfig_provided[pkg] = []
2196 for l in lines:
2197 pkgconfig_provided[pkg].append(l.rstrip())
2198
2199 for pkg in packages.split():
2200 deps = []
2201 for n in pkgconfig_needed[pkg]:
2202 found = False
2203 for k in pkgconfig_provided.keys():
2204 if n in pkgconfig_provided[k]:
2205 if k != pkg and not (k in deps):
2206 deps.append(k)
2207 found = True
2208 if found == False:
2209 bb.note("couldn't find pkgconfig module '%s' in any package" % n)
2210 deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
2211 if len(deps):
2212 with open(deps_file, 'w') as fd:
2213 for dep in deps:
2214 fd.write(dep + '\n')
2215}
2216
2217def read_libdep_files(d):
2218 pkglibdeps = {}
2219 packages = d.getVar('PACKAGES').split()
2220 for pkg in packages:
2221 pkglibdeps[pkg] = {}
2222 for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
2223 depsfile = d.expand("${PKGDEST}/" + pkg + extension)
2224 if os.access(depsfile, os.R_OK):
2225 with open(depsfile) as fd:
2226 lines = fd.readlines()
2227 for l in lines:
2228 l.rstrip()
2229 deps = bb.utils.explode_dep_versions2(l)
2230 for dep in deps:
2231 if not dep in pkglibdeps[pkg]:
2232 pkglibdeps[pkg][dep] = deps[dep]
2233 return pkglibdeps
2234
2235python read_shlibdeps () {
2236 pkglibdeps = read_libdep_files(d)
2237
2238 packages = d.getVar('PACKAGES').split()
2239 for pkg in packages:
2240 rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
2241 for dep in sorted(pkglibdeps[pkg]):
2242 # Add the dep if it's not already there, or if no comparison is set
2243 if dep not in rdepends:
2244 rdepends[dep] = []
2245 for v in pkglibdeps[pkg][dep]:
2246 if v not in rdepends[dep]:
2247 rdepends[dep].append(v)
2248 d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
2249}
2250
2251python package_depchains() {
2252 """
2253 For a given set of prefix and postfix modifiers, make those packages
2254 RRECOMMENDS on the corresponding packages for its RDEPENDS.
2255
2256 Example: If package A depends upon package B, and A's .bb emits an
2257 A-dev package, this would make A-dev Recommends: B-dev.
2258
2259 If only one of a given suffix is specified, it will take the RRECOMMENDS
2260 based on the RDEPENDS of *all* other packages. If more than one of a given
2261 suffix is specified, its will only use the RDEPENDS of the single parent
2262 package.
2263 """
2264
2265 packages = d.getVar('PACKAGES')
2266 postfixes = (d.getVar('DEPCHAIN_POST') or '').split()
2267 prefixes = (d.getVar('DEPCHAIN_PRE') or '').split()
2268
2269 def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
2270
2271 #bb.note('depends for %s is %s' % (base, depends))
2272 rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
2273
2274 for depend in sorted(depends):
2275 if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
2276 #bb.note("Skipping %s" % depend)
2277 continue
2278 if depend.endswith('-dev'):
2279 depend = depend[:-4]
2280 if depend.endswith('-dbg'):
2281 depend = depend[:-4]
2282 pkgname = getname(depend, suffix)
2283 #bb.note("Adding %s for %s" % (pkgname, depend))
2284 if pkgname not in rreclist and pkgname != pkg:
2285 rreclist[pkgname] = []
2286
2287 #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
2288 d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
2289
2290 def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
2291
2292 #bb.note('rdepends for %s is %s' % (base, rdepends))
2293 rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
2294
2295 for depend in sorted(rdepends):
2296 if depend.find('virtual-locale-') != -1:
2297 #bb.note("Skipping %s" % depend)
2298 continue
2299 if depend.endswith('-dev'):
2300 depend = depend[:-4]
2301 if depend.endswith('-dbg'):
2302 depend = depend[:-4]
2303 pkgname = getname(depend, suffix)
2304 #bb.note("Adding %s for %s" % (pkgname, depend))
2305 if pkgname not in rreclist and pkgname != pkg:
2306 rreclist[pkgname] = []
2307
2308 #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
2309 d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
2310
2311 def add_dep(list, dep):
2312 if dep not in list:
2313 list.append(dep)
2314
2315 depends = []
2316 for dep in bb.utils.explode_deps(d.getVar('DEPENDS') or ""):
2317 add_dep(depends, dep)
2318
2319 rdepends = []
2320 for pkg in packages.split():
2321 for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + pkg) or ""):
2322 add_dep(rdepends, dep)
2323
2324 #bb.note('rdepends is %s' % rdepends)
2325
2326 def post_getname(name, suffix):
2327 return '%s%s' % (name, suffix)
2328 def pre_getname(name, suffix):
2329 return '%s%s' % (suffix, name)
2330
2331 pkgs = {}
2332 for pkg in packages.split():
2333 for postfix in postfixes:
2334 if pkg.endswith(postfix):
2335 if not postfix in pkgs:
2336 pkgs[postfix] = {}
2337 pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname)
2338
2339 for prefix in prefixes:
2340 if pkg.startswith(prefix):
2341 if not prefix in pkgs:
2342 pkgs[prefix] = {}
2343 pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname)
2344
2345 if "-dbg" in pkgs:
2346 pkglibdeps = read_libdep_files(d)
2347 pkglibdeplist = []
2348 for pkg in pkglibdeps:
2349 for k in pkglibdeps[pkg]:
2350 add_dep(pkglibdeplist, k)
2351 dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS') == '1') or (bb.data.inherits_class('packagegroup', d)))
2352
2353 for suffix in pkgs:
2354 for pkg in pkgs[suffix]:
2355 if d.getVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs'):
2356 continue
2357 (base, func) = pkgs[suffix][pkg]
2358 if suffix == "-dev":
2359 pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
2360 elif suffix == "-dbg":
2361 if not dbgdefaultdeps:
2362 pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d)
2363 continue
2364 if len(pkgs[suffix]) == 1:
2365 pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
2366 else:
2367 rdeps = []
2368 for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + base) or ""):
2369 add_dep(rdeps, dep)
2370 pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
2371}
2372
2373# Since bitbake can't determine which variables are accessed during package
2374# iteration, we need to list them here:
2375PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm pkg_postinst_ontarget INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE SKIP_FILEDEPS PRIVATE_LIBS PACKAGE_ADD_METADATA"
2376
2377def gen_packagevar(d, pkgvars="PACKAGEVARS"):
2378 ret = []
2379 pkgs = (d.getVar("PACKAGES") or "").split()
2380 vars = (d.getVar(pkgvars) or "").split()
2381 for v in vars:
2382 ret.append(v)
2383 for p in pkgs:
2384 for v in vars:
2385 ret.append(v + ":" + p)
2386
2387 # Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
2388 # affected recipes.
2389 ret.append('_exclude_incompatible-%s' % p)
2390 return " ".join(ret)
2391
2392PACKAGE_PREPROCESS_FUNCS ?= ""
2393# Functions for setting up PKGD
2394PACKAGEBUILDPKGD ?= " \
2395 package_prepare_pkgdata \
2396 perform_packagecopy \
2397 ${PACKAGE_PREPROCESS_FUNCS} \
2398 split_and_strip_files \
2399 fixup_perms \
2400 "
2401# Functions which split PKGD up into separate packages
2402PACKAGESPLITFUNCS ?= " \
2403 package_do_split_locales \
2404 populate_packages"
2405# Functions which process metadata based on split packages
2406PACKAGEFUNCS += " \
2407 package_fixsymlinks \
2408 package_name_hook \
2409 package_do_filedeps \
2410 package_do_shlibs \
2411 package_do_pkgconfig \
2412 read_shlibdeps \
2413 package_depchains \
2414 emit_pkgdata"
2415
2416python do_package () {
2417 # Change the following version to cause sstate to invalidate the package
2418 # cache. This is useful if an item this class depends on changes in a
2419 # way that the output of this class changes. rpmdeps is a good example
2420 # as any change to rpmdeps requires this to be rerun.
2421 # PACKAGE_BBCLASS_VERSION = "4"
2422
2423 # Init cachedpath
2424 global cpath
2425 cpath = oe.cachedpath.CachedPath()
2426
2427 ###########################################################################
2428 # Sanity test the setup
2429 ###########################################################################
2430
2431 packages = (d.getVar('PACKAGES') or "").split()
2432 if len(packages) < 1:
2433 bb.debug(1, "No packages to build, skipping do_package")
2434 return
2435
2436 workdir = d.getVar('WORKDIR')
2437 outdir = d.getVar('DEPLOY_DIR')
2438 dest = d.getVar('D')
2439 dvar = d.getVar('PKGD')
2440 pn = d.getVar('PN')
2441
2442 if not workdir or not outdir or not dest or not dvar or not pn:
2443 msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
2444 oe.qa.handle_error("var-undefined", msg, d)
2445 return
2446
2447 bb.build.exec_func("package_convert_pr_autoinc", d)
2448
2449 ###########################################################################
2450 # Optimisations
2451 ###########################################################################
2452
2453 # Continually expanding complex expressions is inefficient, particularly
2454 # when we write to the datastore and invalidate the expansion cache. This
2455 # code pre-expands some frequently used variables
2456
2457 def expandVar(x, d):
2458 d.setVar(x, d.getVar(x))
2459
2460 for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
2461 expandVar(x, d)
2462
2463 ###########################################################################
2464 # Setup PKGD (from D)
2465 ###########################################################################
2466
2467 for f in (d.getVar('PACKAGEBUILDPKGD') or '').split():
2468 bb.build.exec_func(f, d)
2469
2470 ###########################################################################
2471 # Split up PKGD into PKGDEST
2472 ###########################################################################
2473
2474 cpath = oe.cachedpath.CachedPath()
2475
2476 for f in (d.getVar('PACKAGESPLITFUNCS') or '').split():
2477 bb.build.exec_func(f, d)
2478
2479 ###########################################################################
2480 # Process PKGDEST
2481 ###########################################################################
2482
2483 # Build global list of files in each split package
2484 global pkgfiles
2485 pkgfiles = {}
2486 packages = d.getVar('PACKAGES').split()
2487 pkgdest = d.getVar('PKGDEST')
2488 for pkg in packages:
2489 pkgfiles[pkg] = []
2490 for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
2491 for file in files:
2492 pkgfiles[pkg].append(walkroot + os.sep + file)
2493
2494 for f in (d.getVar('PACKAGEFUNCS') or '').split():
2495 bb.build.exec_func(f, d)
2496
2497 oe.qa.exit_if_errors(d)
2498}
2499
2500do_package[dirs] = "${SHLIBSWORKDIR} ${D}"
2501do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
2502addtask package after do_install
2503
2504SSTATETASKS += "do_package"
2505do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
2506do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
2507do_package_setscene[dirs] = "${STAGING_DIR}"
2508
2509python do_package_setscene () {
2510 sstate_setscene(d)
2511}
2512addtask do_package_setscene
2513
2514# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
2515# do_package_setscene and do_packagedata_setscene leading to races
2516python do_packagedata () {
2517 bb.build.exec_func("package_get_auto_pr", d)
2518
2519 src = d.expand("${PKGDESTWORK}")
2520 dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
2521 oe.path.copyhardlinktree(src, dest)
2522
2523 bb.build.exec_func("packagedata_translate_pr_autoinc", d)
2524}
2525do_packagedata[cleandirs] += "${WORKDIR}/pkgdata-pdata-input"
2526
2527# Translate the EXTENDPRAUTO and AUTOINC to the final values
2528packagedata_translate_pr_autoinc() {
2529 find ${WORKDIR}/pkgdata-pdata-input -type f | xargs --no-run-if-empty \
2530 sed -e 's,@PRSERV_PV_AUTOINC@,${PRSERV_PV_AUTOINC},g' \
2531 -e 's,@EXTENDPRAUTO@,${EXTENDPRAUTO},g' -i
2532}
2533
2534addtask packagedata before do_build after do_package
2535
2536SSTATETASKS += "do_packagedata"
2537do_packagedata[sstate-inputdirs] = "${WORKDIR}/pkgdata-pdata-input"
2538do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
2539do_packagedata[stamp-extra-info] = "${MACHINE_ARCH}"
2540
2541python do_packagedata_setscene () {
2542 sstate_setscene(d)
2543}
2544addtask do_packagedata_setscene
2545
2546#
2547# Helper functions for the package writing classes
2548#
2549
2550def mapping_rename_hook(d):
2551 """
2552 Rewrite variables to account for package renaming in things
2553 like debian.bbclass or manual PKG variable name changes
2554 """
2555 pkg = d.getVar("PKG")
2556 runtime_mapping_rename("RDEPENDS", pkg, d)
2557 runtime_mapping_rename("RRECOMMENDS", pkg, d)
2558 runtime_mapping_rename("RSUGGESTS", pkg, d)
diff --git a/meta/classes-global/package_deb.bbclass b/meta/classes-global/package_deb.bbclass
new file mode 100644
index 0000000000..ec7e10dbc9
--- /dev/null
+++ b/meta/classes-global/package_deb.bbclass
@@ -0,0 +1,329 @@
1#
2# Copyright 2006-2008 OpenedHand Ltd.
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit package
8
9IMAGE_PKGTYPE ?= "deb"
10
11DPKG_BUILDCMD ??= "dpkg-deb"
12
13DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'))}"
14DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
15
16PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
17
18APTCONF_TARGET = "${WORKDIR}"
19
20APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
21
22def debian_arch_map(arch, tune):
23 tune_features = tune.split()
24 if arch == "allarch":
25 return "all"
26 if arch in ["i586", "i686"]:
27 return "i386"
28 if arch == "x86_64":
29 if "mx32" in tune_features:
30 return "x32"
31 return "amd64"
32 if arch.startswith("mips"):
33 endian = ["el", ""]["bigendian" in tune_features]
34 if "n64" in tune_features:
35 return "mips64" + endian
36 if "n32" in tune_features:
37 return "mipsn32" + endian
38 return "mips" + endian
39 if arch == "powerpc":
40 return arch + ["", "spe"]["spe" in tune_features]
41 if arch == "aarch64":
42 return "arm64"
43 if arch == "arm":
44 return arch + ["el", "hf"]["callconvention-hard" in tune_features]
45 return arch
46
47python do_package_deb () {
48 packages = d.getVar('PACKAGES')
49 if not packages:
50 bb.debug(1, "PACKAGES not defined, nothing to package")
51 return
52
53 tmpdir = d.getVar('TMPDIR')
54 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
55 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
56
57 oe.utils.multiprocess_launch(deb_write_pkg, packages.split(), d, extraargs=(d,))
58}
59do_package_deb[vardeps] += "deb_write_pkg"
60do_package_deb[vardepsexclude] = "BB_NUMBER_THREADS"
61
62def deb_write_pkg(pkg, d):
63 import re, copy
64 import textwrap
65 import subprocess
66 import collections
67 import codecs
68
69 outdir = d.getVar('PKGWRITEDIRDEB')
70 pkgdest = d.getVar('PKGDEST')
71
72 def cleanupcontrol(root):
73 for p in ['CONTROL', 'DEBIAN']:
74 p = os.path.join(root, p)
75 if os.path.exists(p):
76 bb.utils.prunedir(p)
77
78 localdata = bb.data.createCopy(d)
79 root = "%s/%s" % (pkgdest, pkg)
80
81 lf = bb.utils.lockfile(root + ".lock")
82 try:
83
84 localdata.setVar('ROOT', '')
85 localdata.setVar('ROOT_%s' % pkg, root)
86 pkgname = localdata.getVar('PKG:%s' % pkg)
87 if not pkgname:
88 pkgname = pkg
89 localdata.setVar('PKG', pkgname)
90
91 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
92
93 basedir = os.path.join(os.path.dirname(root))
94
95 pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH'))
96 bb.utils.mkdirhier(pkgoutdir)
97
98 os.chdir(root)
99 cleanupcontrol(root)
100 from glob import glob
101 g = glob('*')
102 if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
103 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
104 return
105
106 controldir = os.path.join(root, 'DEBIAN')
107 bb.utils.mkdirhier(controldir)
108 os.chmod(controldir, 0o755)
109
110 ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
111
112 fields = []
113 pe = d.getVar('PKGE')
114 if pe and int(pe) > 0:
115 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
116 else:
117 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
118 fields.append(["Description: %s\n", ['DESCRIPTION']])
119 fields.append(["Section: %s\n", ['SECTION']])
120 fields.append(["Priority: %s\n", ['PRIORITY']])
121 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
122 fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
123 fields.append(["OE: %s\n", ['PN']])
124 fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
125 if d.getVar('HOMEPAGE'):
126 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
127
128 # Package, Version, Maintainer, Description - mandatory
129 # Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
130
131
132 def pullData(l, d):
133 l2 = []
134 for i in l:
135 data = d.getVar(i)
136 if data is None:
137 raise KeyError(i)
138 if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH') == 'all':
139 data = 'all'
140 elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
141 # The params in deb package control don't allow character
142 # `_', so change the arch's `_' to `-'. Such as `x86_64'
143 # -->`x86-64'
144 data = data.replace('_', '-')
145 l2.append(data)
146 return l2
147
148 ctrlfile.write("Package: %s\n" % pkgname)
149 if d.getVar('PACKAGE_ARCH') == "all":
150 ctrlfile.write("Multi-Arch: foreign\n")
151 # check for required fields
152 for (c, fs) in fields:
153 # Special behavior for description...
154 if 'DESCRIPTION' in fs:
155 summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
156 ctrlfile.write('Description: %s\n' % summary)
157 description = localdata.getVar('DESCRIPTION') or "."
158 description = textwrap.dedent(description).strip()
159 if '\\n' in description:
160 # Manually indent
161 for t in description.split('\\n'):
162 ctrlfile.write(' %s\n' % (t.strip() or '.'))
163 else:
164 # Auto indent
165 ctrlfile.write('%s\n' % textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))
166
167 else:
168 ctrlfile.write(c % tuple(pullData(fs, localdata)))
169
170 # more fields
171
172 custom_fields_chunk = get_package_additional_metadata("deb", localdata)
173 if custom_fields_chunk:
174 ctrlfile.write(custom_fields_chunk)
175 ctrlfile.write("\n")
176
177 mapping_rename_hook(localdata)
178
179 def debian_cmp_remap(var):
180 # dpkg does not allow for '(', ')' or ':' in a dependency name
181 # Replace any instances of them with '__'
182 #
183 # In debian '>' and '<' do not mean what it appears they mean
184 # '<' = less or equal
185 # '>' = greater or equal
186 # adjust these to the '<<' and '>>' equivalents
187 # Also, "=" specifiers only work if they have the PR in, so 1.2.3 != 1.2.3-r0
188 # so to avoid issues, map this to ">= 1.2.3 << 1.2.3.0"
189 for dep in list(var.keys()):
190 if '(' in dep or '/' in dep:
191 newdep = re.sub(r'[(:)/]', '__', dep)
192 if newdep.startswith("__"):
193 newdep = "A" + newdep
194 if newdep != dep:
195 var[newdep] = var[dep]
196 del var[dep]
197 for dep in var:
198 for i, v in enumerate(var[dep]):
199 if (v or "").startswith("< "):
200 var[dep][i] = var[dep][i].replace("< ", "<< ")
201 elif (v or "").startswith("> "):
202 var[dep][i] = var[dep][i].replace("> ", ">> ")
203 elif (v or "").startswith("= ") and "-r" not in v:
204 ver = var[dep][i].replace("= ", "")
205 var[dep][i] = var[dep][i].replace("= ", ">= ")
206 var[dep].append("<< " + ver + ".0")
207
208 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
209 debian_cmp_remap(rdepends)
210 for dep in list(rdepends.keys()):
211 if dep == pkg:
212 del rdepends[dep]
213 continue
214 if '*' in dep:
215 del rdepends[dep]
216 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
217 debian_cmp_remap(rrecommends)
218 for dep in list(rrecommends.keys()):
219 if '*' in dep:
220 del rrecommends[dep]
221 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
222 debian_cmp_remap(rsuggests)
223 # Deliberately drop version information here, not wanted/supported by deb
224 rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
225 # Remove file paths if any from rprovides, debian does not support custom providers
226 for key in list(rprovides.keys()):
227 if key.startswith('/'):
228 del rprovides[key]
229 rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
230 debian_cmp_remap(rprovides)
231 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
232 debian_cmp_remap(rreplaces)
233 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
234 debian_cmp_remap(rconflicts)
235 if rdepends:
236 ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
237 if rsuggests:
238 ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
239 if rrecommends:
240 ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
241 if rprovides:
242 ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
243 if rreplaces:
244 ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
245 if rconflicts:
246 ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
247 ctrlfile.close()
248
249 for script in ["preinst", "postinst", "prerm", "postrm"]:
250 scriptvar = localdata.getVar('pkg_%s' % script)
251 if not scriptvar:
252 continue
253 scriptvar = scriptvar.strip()
254 scriptfile = open(os.path.join(controldir, script), 'w')
255
256 if scriptvar.startswith("#!"):
257 pos = scriptvar.find("\n") + 1
258 scriptfile.write(scriptvar[:pos])
259 else:
260 pos = 0
261 scriptfile.write("#!/bin/sh\n")
262
263 # Prevent the prerm/postrm scripts from being run during an upgrade
264 if script in ('prerm', 'postrm'):
265 scriptfile.write('[ "$1" != "upgrade" ] || exit 0\n')
266
267 scriptfile.write(scriptvar[pos:])
268 scriptfile.write('\n')
269 scriptfile.close()
270 os.chmod(os.path.join(controldir, script), 0o755)
271
272 conffiles_str = ' '.join(get_conffiles(pkg, d))
273 if conffiles_str:
274 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
275 for f in conffiles_str.split():
276 if os.path.exists(oe.path.join(root, f)):
277 conffiles.write('%s\n' % f)
278 conffiles.close()
279
280 os.chdir(basedir)
281 subprocess.check_output("PATH=\"%s\" %s -b %s %s" % (localdata.getVar("PATH"), localdata.getVar("DPKG_BUILDCMD"),
282 root, pkgoutdir),
283 stderr=subprocess.STDOUT,
284 shell=True)
285
286 finally:
287 cleanupcontrol(root)
288 bb.utils.unlockfile(lf)
289
290# Otherwise allarch packages may change depending on override configuration
291deb_write_pkg[vardepsexclude] = "OVERRIDES"
292
293# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
294DEBEXTRAVARS = "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE PACKAGE_ADD_METADATA_DEB"
295do_package_write_deb[vardeps] += "${@gen_packagevar(d, 'DEBEXTRAVARS')}"
296
297SSTATETASKS += "do_package_write_deb"
298do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
299do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
300
301python do_package_write_deb_setscene () {
302 tmpdir = d.getVar('TMPDIR')
303
304 if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
305 os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
306
307 sstate_setscene(d)
308}
309addtask do_package_write_deb_setscene
310
311python () {
312 if d.getVar('PACKAGES') != '':
313 deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
314 d.appendVarFlag('do_package_write_deb', 'depends', deps)
315 d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
316}
317
318python do_package_write_deb () {
319 bb.build.exec_func("read_subpackage_metadata", d)
320 bb.build.exec_func("do_package_deb", d)
321}
322do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
323do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
324do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
325addtask package_write_deb after do_packagedata do_package do_deploy_source_date_epoch before do_build
326do_build[rdeptask] += "do_package_write_deb"
327
328PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
329PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
diff --git a/meta/classes-global/package_ipk.bbclass b/meta/classes-global/package_ipk.bbclass
new file mode 100644
index 0000000000..c43592af7e
--- /dev/null
+++ b/meta/classes-global/package_ipk.bbclass
@@ -0,0 +1,292 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit package
8
9IMAGE_PKGTYPE ?= "ipk"
10
11IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
12IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
13IPKGCONF_SDK_TARGET = "${WORKDIR}/opkg-sdk-target.conf"
14
15PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
16
17# Program to be used to build opkg packages
18OPKGBUILDCMD ??= 'opkg-build -Z xz -a "${XZ_DEFAULTS}"'
19
20OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
21OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
22OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "").strip() != ""]}"
23
24OPKGLIBDIR ??= "${localstatedir}/lib"
25
26python do_package_ipk () {
27 workdir = d.getVar('WORKDIR')
28 outdir = d.getVar('PKGWRITEDIRIPK')
29 tmpdir = d.getVar('TMPDIR')
30 pkgdest = d.getVar('PKGDEST')
31 if not workdir or not outdir or not tmpdir:
32 bb.error("Variables incorrectly set, unable to package")
33 return
34
35 packages = d.getVar('PACKAGES')
36 if not packages or packages == '':
37 bb.debug(1, "No packages; nothing to do")
38 return
39
40 # We're about to add new packages so the index needs to be checked
41 # so remove the appropriate stamp file.
42 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
43 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
44
45 oe.utils.multiprocess_launch(ipk_write_pkg, packages.split(), d, extraargs=(d,))
46}
47do_package_ipk[vardeps] += "ipk_write_pkg"
48do_package_ipk[vardepsexclude] = "BB_NUMBER_THREADS"
49
50def ipk_write_pkg(pkg, d):
51 import re, copy
52 import subprocess
53 import textwrap
54 import collections
55 import glob
56
57 def cleanupcontrol(root):
58 for p in ['CONTROL', 'DEBIAN']:
59 p = os.path.join(root, p)
60 if os.path.exists(p):
61 bb.utils.prunedir(p)
62
63 outdir = d.getVar('PKGWRITEDIRIPK')
64 pkgdest = d.getVar('PKGDEST')
65 recipesource = os.path.basename(d.getVar('FILE'))
66
67 localdata = bb.data.createCopy(d)
68 root = "%s/%s" % (pkgdest, pkg)
69
70 lf = bb.utils.lockfile(root + ".lock")
71 try:
72 localdata.setVar('ROOT', '')
73 localdata.setVar('ROOT_%s' % pkg, root)
74 pkgname = localdata.getVar('PKG:%s' % pkg)
75 if not pkgname:
76 pkgname = pkg
77 localdata.setVar('PKG', pkgname)
78
79 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
80
81 basedir = os.path.join(os.path.dirname(root))
82 arch = localdata.getVar('PACKAGE_ARCH')
83
84 if localdata.getVar('IPK_HIERARCHICAL_FEED', False) == "1":
85 # Spread packages across subdirectories so each isn't too crowded
86 if pkgname.startswith('lib'):
87 pkg_prefix = 'lib' + pkgname[3]
88 else:
89 pkg_prefix = pkgname[0]
90
91 # Keep -dbg, -dev, -doc, -staticdev, -locale and -locale-* packages
92 # together. These package suffixes are taken from the definitions of
93 # PACKAGES and PACKAGES_DYNAMIC in meta/conf/bitbake.conf
94 if pkgname[-4:] in ('-dbg', '-dev', '-doc'):
95 pkg_subdir = pkgname[:-4]
96 elif pkgname.endswith('-staticdev'):
97 pkg_subdir = pkgname[:-10]
98 elif pkgname.endswith('-locale'):
99 pkg_subdir = pkgname[:-7]
100 elif '-locale-' in pkgname:
101 pkg_subdir = pkgname[:pkgname.find('-locale-')]
102 else:
103 pkg_subdir = pkgname
104
105 pkgoutdir = "%s/%s/%s/%s" % (outdir, arch, pkg_prefix, pkg_subdir)
106 else:
107 pkgoutdir = "%s/%s" % (outdir, arch)
108
109 bb.utils.mkdirhier(pkgoutdir)
110 os.chdir(root)
111 cleanupcontrol(root)
112 g = glob.glob('*')
113 if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
114 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
115 return
116
117 controldir = os.path.join(root, 'CONTROL')
118 bb.utils.mkdirhier(controldir)
119 ctrlfile = open(os.path.join(controldir, 'control'), 'w')
120
121 fields = []
122 pe = d.getVar('PKGE')
123 if pe and int(pe) > 0:
124 fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
125 else:
126 fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
127 fields.append(["Description: %s\n", ['DESCRIPTION']])
128 fields.append(["Section: %s\n", ['SECTION']])
129 fields.append(["Priority: %s\n", ['PRIORITY']])
130 fields.append(["Maintainer: %s\n", ['MAINTAINER']])
131 fields.append(["License: %s\n", ['LICENSE']])
132 fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
133 fields.append(["OE: %s\n", ['PN']])
134 if d.getVar('HOMEPAGE'):
135 fields.append(["Homepage: %s\n", ['HOMEPAGE']])
136
137 def pullData(l, d):
138 l2 = []
139 for i in l:
140 l2.append(d.getVar(i))
141 return l2
142
143 ctrlfile.write("Package: %s\n" % pkgname)
144 # check for required fields
145 for (c, fs) in fields:
146 for f in fs:
147 if localdata.getVar(f, False) is None:
148 raise KeyError(f)
149 # Special behavior for description...
150 if 'DESCRIPTION' in fs:
151 summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
152 ctrlfile.write('Description: %s\n' % summary)
153 description = localdata.getVar('DESCRIPTION') or "."
154 description = textwrap.dedent(description).strip()
155 if '\\n' in description:
156 # Manually indent: multiline description includes a leading space
157 for t in description.split('\\n'):
158 ctrlfile.write(' %s\n' % (t.strip() or ' .'))
159 else:
160 # Auto indent
161 ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
162 else:
163 ctrlfile.write(c % tuple(pullData(fs, localdata)))
164
165 custom_fields_chunk = get_package_additional_metadata("ipk", localdata)
166 if custom_fields_chunk is not None:
167 ctrlfile.write(custom_fields_chunk)
168 ctrlfile.write("\n")
169
170 mapping_rename_hook(localdata)
171
172 def debian_cmp_remap(var):
173 # In debian '>' and '<' do not mean what it appears they mean
174 # '<' = less or equal
175 # '>' = greater or equal
176 # adjust these to the '<<' and '>>' equivalents
177 # Also, "=" specifiers only work if they have the PR in, so 1.2.3 != 1.2.3-r0
178 # so to avoid issues, map this to ">= 1.2.3 << 1.2.3.0"
179 for dep in var:
180 for i, v in enumerate(var[dep]):
181 if (v or "").startswith("< "):
182 var[dep][i] = var[dep][i].replace("< ", "<< ")
183 elif (v or "").startswith("> "):
184 var[dep][i] = var[dep][i].replace("> ", ">> ")
185 elif (v or "").startswith("= ") and "-r" not in v:
186 ver = var[dep][i].replace("= ", "")
187 var[dep][i] = var[dep][i].replace("= ", ">= ")
188 var[dep].append("<< " + ver + ".0")
189
190 rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
191 debian_cmp_remap(rdepends)
192 rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
193 debian_cmp_remap(rrecommends)
194 rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
195 debian_cmp_remap(rsuggests)
196 # Deliberately drop version information here, not wanted/supported by ipk
197 rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
198 rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
199 debian_cmp_remap(rprovides)
200 rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
201 debian_cmp_remap(rreplaces)
202 rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
203 debian_cmp_remap(rconflicts)
204
205 if rdepends:
206 ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
207 if rsuggests:
208 ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
209 if rrecommends:
210 ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
211 if rprovides:
212 ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
213 if rreplaces:
214 ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
215 if rconflicts:
216 ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
217 ctrlfile.write("Source: %s\n" % recipesource)
218 ctrlfile.close()
219
220 for script in ["preinst", "postinst", "prerm", "postrm"]:
221 scriptvar = localdata.getVar('pkg_%s' % script)
222 if not scriptvar:
223 continue
224 scriptfile = open(os.path.join(controldir, script), 'w')
225 scriptfile.write(scriptvar)
226 scriptfile.close()
227 os.chmod(os.path.join(controldir, script), 0o755)
228
229 conffiles_str = ' '.join(get_conffiles(pkg, d))
230 if conffiles_str:
231 conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
232 for f in conffiles_str.split():
233 if os.path.exists(oe.path.join(root, f)):
234 conffiles.write('%s\n' % f)
235 conffiles.close()
236
237 os.chdir(basedir)
238 subprocess.check_output("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH"),
239 d.getVar("OPKGBUILDCMD"), pkg, pkgoutdir),
240 stderr=subprocess.STDOUT,
241 shell=True)
242
243 if d.getVar('IPK_SIGN_PACKAGES') == '1':
244 ipkver = "%s-%s" % (localdata.getVar('PKGV'), localdata.getVar('PKGR'))
245 ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, localdata.getVar('PACKAGE_ARCH'))
246 sign_ipk(d, ipk_to_sign)
247
248 finally:
249 cleanupcontrol(root)
250 bb.utils.unlockfile(lf)
251
252# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
253IPKEXTRAVARS = "PRIORITY MAINTAINER PACKAGE_ARCH HOMEPAGE PACKAGE_ADD_METADATA_IPK"
254ipk_write_pkg[vardeps] += "${@gen_packagevar(d, 'IPKEXTRAVARS')}"
255
256# Otherwise allarch packages may change depending on override configuration
257ipk_write_pkg[vardepsexclude] = "OVERRIDES"
258
259
260SSTATETASKS += "do_package_write_ipk"
261do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
262do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
263
264python do_package_write_ipk_setscene () {
265 tmpdir = d.getVar('TMPDIR')
266
267 if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
268 os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
269
270 sstate_setscene(d)
271}
272addtask do_package_write_ipk_setscene
273
274python () {
275 if d.getVar('PACKAGES') != '':
276 deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot xz-native:do_populate_sysroot'
277 d.appendVarFlag('do_package_write_ipk', 'depends', deps)
278 d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
279}
280
281python do_package_write_ipk () {
282 bb.build.exec_func("read_subpackage_metadata", d)
283 bb.build.exec_func("do_package_ipk", d)
284}
285do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
286do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
287do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
288addtask package_write_ipk after do_packagedata do_package do_deploy_source_date_epoch before do_build
289do_build[rdeptask] += "do_package_write_ipk"
290
291PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
292PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
diff --git a/meta/classes-global/package_pkgdata.bbclass b/meta/classes-global/package_pkgdata.bbclass
new file mode 100644
index 0000000000..f653bd9240
--- /dev/null
+++ b/meta/classes-global/package_pkgdata.bbclass
@@ -0,0 +1,173 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7WORKDIR_PKGDATA = "${WORKDIR}/pkgdata-sysroot"
8
9def package_populate_pkgdata_dir(pkgdatadir, d):
10 import glob
11
12 postinsts = []
13 seendirs = set()
14 stagingdir = d.getVar("PKGDATA_DIR")
15 pkgarchs = ['${MACHINE_ARCH}']
16 pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
17 pkgarchs.append('allarch')
18
19 bb.utils.mkdirhier(pkgdatadir)
20 for pkgarch in pkgarchs:
21 for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.packagedata" % pkgarch)):
22 with open(manifest, "r") as f:
23 for l in f:
24 l = l.strip()
25 dest = l.replace(stagingdir, "")
26 if l.endswith("/"):
27 staging_copydir(l, pkgdatadir, dest, seendirs)
28 continue
29 try:
30 staging_copyfile(l, pkgdatadir, dest, postinsts, seendirs)
31 except FileExistsError:
32 continue
33
34python package_prepare_pkgdata() {
35 import copy
36 import glob
37
38 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
39 mytaskname = d.getVar("BB_RUNTASK")
40 if mytaskname.endswith("_setscene"):
41 mytaskname = mytaskname.replace("_setscene", "")
42 workdir = d.getVar("WORKDIR")
43 pn = d.getVar("PN")
44 stagingdir = d.getVar("PKGDATA_DIR")
45 pkgdatadir = d.getVar("WORKDIR_PKGDATA")
46
47 # Detect bitbake -b usage
48 nodeps = d.getVar("BB_LIMITEDDEPS") or False
49 if nodeps:
50 staging_package_populate_pkgdata_dir(pkgdatadir, d)
51 return
52
53 start = None
54 configuredeps = []
55 for dep in taskdepdata:
56 data = taskdepdata[dep]
57 if data[1] == mytaskname and data[0] == pn:
58 start = dep
59 break
60 if start is None:
61 bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
62
63 # We need to figure out which sysroot files we need to expose to this task.
64 # This needs to match what would get restored from sstate, which is controlled
65 # ultimately by calls from bitbake to setscene_depvalid().
66 # That function expects a setscene dependency tree. We build a dependency tree
67 # condensed to inter-sstate task dependencies, similar to that used by setscene
68 # tasks. We can then call into setscene_depvalid() and decide
69 # which dependencies we can "see" and should expose in the recipe specific sysroot.
70 setscenedeps = copy.deepcopy(taskdepdata)
71
72 start = set([start])
73
74 sstatetasks = d.getVar("SSTATETASKS").split()
75 # Add recipe specific tasks referenced by setscene_depvalid()
76 sstatetasks.append("do_stash_locale")
77
78 # If start is an sstate task (like do_package) we need to add in its direct dependencies
79 # else the code below won't recurse into them.
80 for dep in set(start):
81 for dep2 in setscenedeps[dep][3]:
82 start.add(dep2)
83 start.remove(dep)
84
85 # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
86 for dep in taskdepdata:
87 data = setscenedeps[dep]
88 if data[1] not in sstatetasks:
89 for dep2 in setscenedeps:
90 data2 = setscenedeps[dep2]
91 if dep in data2[3]:
92 data2[3].update(setscenedeps[dep][3])
93 data2[3].remove(dep)
94 if dep in start:
95 start.update(setscenedeps[dep][3])
96 start.remove(dep)
97 del setscenedeps[dep]
98
99 # Remove circular references
100 for dep in setscenedeps:
101 if dep in setscenedeps[dep][3]:
102 setscenedeps[dep][3].remove(dep)
103
104 # Direct dependencies should be present and can be depended upon
105 for dep in set(start):
106 if setscenedeps[dep][1] == "do_packagedata":
107 if dep not in configuredeps:
108 configuredeps.append(dep)
109
110 msgbuf = []
111 # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
112 # for ones that would be restored from sstate.
113 done = list(start)
114 next = list(start)
115 while next:
116 new = []
117 for dep in next:
118 data = setscenedeps[dep]
119 for datadep in data[3]:
120 if datadep in done:
121 continue
122 taskdeps = {}
123 taskdeps[dep] = setscenedeps[dep][:2]
124 taskdeps[datadep] = setscenedeps[datadep][:2]
125 retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
126 done.append(datadep)
127 new.append(datadep)
128 if retval:
129 msgbuf.append("Skipping setscene dependency %s" % datadep)
130 continue
131 if datadep not in configuredeps and setscenedeps[datadep][1] == "do_packagedata":
132 configuredeps.append(datadep)
133 msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
134 else:
135 msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
136 next = new
137
138 # This logging is too verbose for day to day use sadly
139 #bb.debug(2, "\n".join(msgbuf))
140
141 seendirs = set()
142 postinsts = []
143 multilibs = {}
144 manifests = {}
145
146 msg_adding = []
147
148 for dep in configuredeps:
149 c = setscenedeps[dep][0]
150 msg_adding.append(c)
151
152 manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "packagedata", d, multilibs)
153 destsysroot = pkgdatadir
154
155 if manifest:
156 targetdir = destsysroot
157 with open(manifest, "r") as f:
158 manifests[dep] = manifest
159 for l in f:
160 l = l.strip()
161 dest = targetdir + l.replace(stagingdir, "")
162 if l.endswith("/"):
163 staging_copydir(l, targetdir, dest, seendirs)
164 continue
165 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
166
167 bb.note("Installed into pkgdata-sysroot: %s" % str(msg_adding))
168
169}
170package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
171package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA SSTATETASKS"
172
173
diff --git a/meta/classes-global/package_rpm.bbclass b/meta/classes-global/package_rpm.bbclass
new file mode 100644
index 0000000000..63c1b077a3
--- /dev/null
+++ b/meta/classes-global/package_rpm.bbclass
@@ -0,0 +1,761 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit package
8
9IMAGE_PKGTYPE ?= "rpm"
10
11RPM="rpm"
12RPMBUILD="rpmbuild"
13
14PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
15
16# Maintaining the perfile dependencies has singificant overhead when writing the
17# packages. When set, this value merges them for efficiency.
18MERGEPERFILEDEPS = "1"
19
20# Filter dependencies based on a provided function.
21def filter_deps(var, f):
22 import collections
23
24 depends_dict = bb.utils.explode_dep_versions2(var)
25 newdeps_dict = collections.OrderedDict()
26 for dep in depends_dict:
27 if f(dep):
28 newdeps_dict[dep] = depends_dict[dep]
29 return bb.utils.join_deps(newdeps_dict, commasep=False)
30
31# Filter out absolute paths (typically /bin/sh and /usr/bin/env) and any perl
32# dependencies for nativesdk packages.
33def filter_nativesdk_deps(srcname, var):
34 if var and srcname.startswith("nativesdk-"):
35 var = filter_deps(var, lambda dep: not dep.startswith('/') and dep != 'perl' and not dep.startswith('perl('))
36 return var
37
38# Construct per file dependencies file
39def write_rpm_perfiledata(srcname, d):
40 workdir = d.getVar('WORKDIR')
41 packages = d.getVar('PACKAGES')
42 pkgd = d.getVar('PKGD')
43
44 def dump_filerdeps(varname, outfile, d):
45 outfile.write("#!/usr/bin/env python3\n\n")
46 outfile.write("# Dependency table\n")
47 outfile.write('deps = {\n')
48 for pkg in packages.split():
49 dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
50 dependsflist = (d.getVar(dependsflist_key) or "")
51 for dfile in dependsflist.split():
52 key = "FILE" + varname + ":" + dfile + ":" + pkg
53 deps = filter_nativesdk_deps(srcname, d.getVar(key) or "")
54 depends_dict = bb.utils.explode_dep_versions(deps)
55 file = dfile.replace("@underscore@", "_")
56 file = file.replace("@closebrace@", "]")
57 file = file.replace("@openbrace@", "[")
58 file = file.replace("@tab@", "\t")
59 file = file.replace("@space@", " ")
60 file = file.replace("@at@", "@")
61 outfile.write('"' + pkgd + file + '" : "')
62 for dep in depends_dict:
63 ver = depends_dict[dep]
64 if dep and ver:
65 ver = ver.replace("(","")
66 ver = ver.replace(")","")
67 outfile.write(dep + " " + ver + " ")
68 else:
69 outfile.write(dep + " ")
70 outfile.write('",\n')
71 outfile.write('}\n\n')
72 outfile.write("import sys\n")
73 outfile.write("while 1:\n")
74 outfile.write("\tline = sys.stdin.readline().strip()\n")
75 outfile.write("\tif not line:\n")
76 outfile.write("\t\tsys.exit(0)\n")
77 outfile.write("\tif line in deps:\n")
78 outfile.write("\t\tprint(deps[line] + '\\n')\n")
79
80 # OE-core dependencies a.k.a. RPM requires
81 outdepends = workdir + "/" + srcname + ".requires"
82
83 dependsfile = open(outdepends, 'w')
84
85 dump_filerdeps('RDEPENDS', dependsfile, d)
86
87 dependsfile.close()
88 os.chmod(outdepends, 0o755)
89
90 # OE-core / RPM Provides
91 outprovides = workdir + "/" + srcname + ".provides"
92
93 providesfile = open(outprovides, 'w')
94
95 dump_filerdeps('RPROVIDES', providesfile, d)
96
97 providesfile.close()
98 os.chmod(outprovides, 0o755)
99
100 return (outdepends, outprovides)
101
102
103python write_specfile () {
104 import oe.packagedata
105
106 # append information for logs and patches to %prep
107 def add_prep(d,spec_files_bottom):
108 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
109 spec_files_bottom.append('%%prep -n %s' % d.getVar('PN') )
110 spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
111 spec_files_bottom.append('')
112
113 # append the name of tarball to key word 'SOURCE' in xxx.spec.
114 def tail_source(d):
115 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
116 ar_outdir = d.getVar('ARCHIVER_OUTDIR')
117 if not os.path.exists(ar_outdir):
118 return
119 source_list = os.listdir(ar_outdir)
120 source_number = 0
121 for source in source_list:
122 # do_deploy_archives may have already run (from sstate) meaning a .src.rpm may already
123 # exist in ARCHIVER_OUTDIR so skip if present.
124 if source.endswith(".src.rpm"):
125 continue
126 # The rpmbuild doesn't need the root permission, but it needs
127 # to know the file's user and group name, the only user and
128 # group in fakeroot is "root" when working in fakeroot.
129 f = os.path.join(ar_outdir, source)
130 os.chown(f, 0, 0)
131 spec_preamble_top.append('Source%s: %s' % (source_number, source))
132 source_number += 1
133
134 # In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
135 # This format is similar to OE, however there are restrictions on the
136 # characters that can be in a field. In the Version field, "-"
137 # characters are not allowed. "-" is allowed in the Release field.
138 #
139 # We translate the "-" in the version to a "+", by loading the PKGV
140 # from the dependent recipe, replacing the - with a +, and then using
141 # that value to do a replace inside of this recipe's dependencies.
142 # This preserves the "-" separator between the version and release, as
143 # well as any "-" characters inside of the release field.
144 #
145 # All of this has to happen BEFORE the mapping_rename_hook as
146 # after renaming we cannot look up the dependencies in the packagedata
147 # store.
148 def translate_vers(varname, d):
149 depends = d.getVar(varname)
150 if depends:
151 depends_dict = bb.utils.explode_dep_versions2(depends)
152 newdeps_dict = {}
153 for dep in depends_dict:
154 verlist = []
155 for ver in depends_dict[dep]:
156 if '-' in ver:
157 subd = oe.packagedata.read_subpkgdata_dict(dep, d)
158 if 'PKGV' in subd:
159 pv = subd['PV']
160 pkgv = subd['PKGV']
161 reppv = pkgv.replace('-', '+')
162 ver = ver.replace(pv, reppv).replace(pkgv, reppv)
163 if 'PKGR' in subd:
164 # Make sure PKGR rather than PR in ver
165 pr = '-' + subd['PR']
166 pkgr = '-' + subd['PKGR']
167 if pkgr not in ver:
168 ver = ver.replace(pr, pkgr)
169 verlist.append(ver)
170 else:
171 verlist.append(ver)
172 newdeps_dict[dep] = verlist
173 depends = bb.utils.join_deps(newdeps_dict)
174 d.setVar(varname, depends.strip())
175
176 # We need to change the style the dependency from BB to RPM
177 # This needs to happen AFTER the mapping_rename_hook
178 def print_deps(variable, tag, array, d):
179 depends = variable
180 if depends:
181 depends_dict = bb.utils.explode_dep_versions2(depends)
182 for dep in depends_dict:
183 for ver in depends_dict[dep]:
184 ver = ver.replace('(', '')
185 ver = ver.replace(')', '')
186 array.append("%s: %s %s" % (tag, dep, ver))
187 if not len(depends_dict[dep]):
188 array.append("%s: %s" % (tag, dep))
189
190 def walk_files(walkpath, target, conffiles, dirfiles):
191 # We can race against the ipk/deb backends which create CONTROL or DEBIAN directories
192 # when packaging. We just ignore these files which are created in
193 # packages-split/ and not package/
194 # We have the odd situation where the CONTROL/DEBIAN directory can be removed in the middle of
195 # of the walk, the isdir() test would then fail and the walk code would assume its a file
196 # hence we check for the names in files too.
197 for rootpath, dirs, files in os.walk(walkpath):
198 path = rootpath.replace(walkpath, "")
199 if path.endswith("DEBIAN") or path.endswith("CONTROL"):
200 continue
201 path = path.replace("%", "%%%%%%%%")
202 path = path.replace("[", "?")
203 path = path.replace("]", "?")
204
205 # Treat all symlinks to directories as normal files.
206 # os.walk() lists them as directories.
207 def move_to_files(dir):
208 if os.path.islink(os.path.join(rootpath, dir)):
209 files.append(dir)
210 return True
211 else:
212 return False
213 dirs[:] = [dir for dir in dirs if not move_to_files(dir)]
214
215 # Directory handling can happen in two ways, either DIRFILES is not set at all
216 # in which case we fall back to the older behaviour of packages owning all their
217 # directories
218 if dirfiles is None:
219 for dir in dirs:
220 if dir == "CONTROL" or dir == "DEBIAN":
221 continue
222 dir = dir.replace("%", "%%%%%%%%")
223 dir = dir.replace("[", "?")
224 dir = dir.replace("]", "?")
225 # All packages own the directories their files are in...
226 target.append('%dir "' + path + '/' + dir + '"')
227 else:
228 # packages own only empty directories or explict directory.
229 # This will prevent the overlapping of security permission.
230 if path and not files and not dirs:
231 target.append('%dir "' + path + '"')
232 elif path and path in dirfiles:
233 target.append('%dir "' + path + '"')
234
235 for file in files:
236 if file == "CONTROL" or file == "DEBIAN":
237 continue
238 file = file.replace("%", "%%%%%%%%")
239 file = file.replace("[", "?")
240 file = file.replace("]", "?")
241 if conffiles.count(path + '/' + file):
242 target.append('%config "' + path + '/' + file + '"')
243 else:
244 target.append('"' + path + '/' + file + '"')
245
246 # Prevent the prerm/postrm scripts from being run during an upgrade
247 def wrap_uninstall(scriptvar):
248 scr = scriptvar.strip()
249 if scr.startswith("#!"):
250 pos = scr.find("\n") + 1
251 else:
252 pos = 0
253 scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi'
254 return scr
255
256 def get_perfile(varname, pkg, d):
257 deps = []
258 dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
259 dependsflist = (d.getVar(dependsflist_key) or "")
260 for dfile in dependsflist.split():
261 key = "FILE" + varname + ":" + dfile + ":" + pkg
262 depends = d.getVar(key)
263 if depends:
264 deps.append(depends)
265 return " ".join(deps)
266
267 def append_description(spec_preamble, text):
268 """
269 Add the description to the spec file.
270 """
271 import textwrap
272 dedent_text = textwrap.dedent(text).strip()
273 # Bitbake saves "\n" as "\\n"
274 if '\\n' in dedent_text:
275 for t in dedent_text.split('\\n'):
276 spec_preamble.append(t.strip())
277 else:
278 spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
279
280 packages = d.getVar('PACKAGES')
281 if not packages or packages == '':
282 bb.debug(1, "No packages; nothing to do")
283 return
284
285 pkgdest = d.getVar('PKGDEST')
286 if not pkgdest:
287 bb.fatal("No PKGDEST")
288
289 outspecfile = d.getVar('OUTSPECFILE')
290 if not outspecfile:
291 bb.fatal("No OUTSPECFILE")
292
293 # Construct the SPEC file...
294 srcname = d.getVar('PN')
295 localdata = bb.data.createCopy(d)
296 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + srcname)
297 srcsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
298 srcversion = localdata.getVar('PKGV').replace('-', '+')
299 srcrelease = localdata.getVar('PKGR')
300 srcepoch = (localdata.getVar('PKGE') or "")
301 srclicense = localdata.getVar('LICENSE')
302 srcsection = localdata.getVar('SECTION')
303 srcmaintainer = localdata.getVar('MAINTAINER')
304 srchomepage = localdata.getVar('HOMEPAGE')
305 srcdescription = localdata.getVar('DESCRIPTION') or "."
306 srccustomtagschunk = get_package_additional_metadata("rpm", localdata)
307
308 srcdepends = d.getVar('DEPENDS')
309 srcrdepends = ""
310 srcrrecommends = ""
311 srcrsuggests = ""
312 srcrprovides = ""
313 srcrreplaces = ""
314 srcrconflicts = ""
315 srcrobsoletes = ""
316
317 srcrpreinst = []
318 srcrpostinst = []
319 srcrprerm = []
320 srcrpostrm = []
321
322 spec_preamble_top = []
323 spec_preamble_bottom = []
324
325 spec_scriptlets_top = []
326 spec_scriptlets_bottom = []
327
328 spec_files_top = []
329 spec_files_bottom = []
330
331 perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
332 extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA") or "0") == "1"
333
334 for pkg in packages.split():
335 localdata = bb.data.createCopy(d)
336
337 root = "%s/%s" % (pkgdest, pkg)
338
339 localdata.setVar('ROOT', '')
340 localdata.setVar('ROOT_%s' % pkg, root)
341 pkgname = localdata.getVar('PKG:%s' % pkg)
342 if not pkgname:
343 pkgname = pkg
344 localdata.setVar('PKG', pkgname)
345
346 localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
347
348 conffiles = get_conffiles(pkg, d)
349 dirfiles = localdata.getVar('DIRFILES')
350 if dirfiles is not None:
351 dirfiles = dirfiles.split()
352
353 splitname = pkgname
354
355 splitsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
356 splitversion = (localdata.getVar('PKGV') or "").replace('-', '+')
357 splitrelease = (localdata.getVar('PKGR') or "")
358 splitepoch = (localdata.getVar('PKGE') or "")
359 splitlicense = (localdata.getVar('LICENSE') or "")
360 splitsection = (localdata.getVar('SECTION') or "")
361 splitdescription = (localdata.getVar('DESCRIPTION') or ".")
362 splitcustomtagschunk = get_package_additional_metadata("rpm", localdata)
363
364 translate_vers('RDEPENDS', localdata)
365 translate_vers('RRECOMMENDS', localdata)
366 translate_vers('RSUGGESTS', localdata)
367 translate_vers('RPROVIDES', localdata)
368 translate_vers('RREPLACES', localdata)
369 translate_vers('RCONFLICTS', localdata)
370
371 # Map the dependencies into their final form
372 mapping_rename_hook(localdata)
373
374 splitrdepends = localdata.getVar('RDEPENDS') or ""
375 splitrrecommends = localdata.getVar('RRECOMMENDS') or ""
376 splitrsuggests = localdata.getVar('RSUGGESTS') or ""
377 splitrprovides = localdata.getVar('RPROVIDES') or ""
378 splitrreplaces = localdata.getVar('RREPLACES') or ""
379 splitrconflicts = localdata.getVar('RCONFLICTS') or ""
380 splitrobsoletes = ""
381
382 splitrpreinst = localdata.getVar('pkg_preinst')
383 splitrpostinst = localdata.getVar('pkg_postinst')
384 splitrprerm = localdata.getVar('pkg_prerm')
385 splitrpostrm = localdata.getVar('pkg_postrm')
386
387
388 if not perfiledeps:
389 # Add in summary of per file dependencies
390 splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
391 splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
392
393 splitrdepends = filter_nativesdk_deps(srcname, splitrdepends)
394
395 # Gather special src/first package data
396 if srcname == splitname:
397 archiving = d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and \
398 bb.data.inherits_class('archiver', d)
399 if archiving and srclicense != splitlicense:
400 bb.warn("The SRPM produced may not have the correct overall source license in the License tag. This is due to the LICENSE for the primary package and SRPM conflicting.")
401
402 srclicense = splitlicense
403 srcrdepends = splitrdepends
404 srcrrecommends = splitrrecommends
405 srcrsuggests = splitrsuggests
406 srcrprovides = splitrprovides
407 srcrreplaces = splitrreplaces
408 srcrconflicts = splitrconflicts
409
410 srcrpreinst = splitrpreinst
411 srcrpostinst = splitrpostinst
412 srcrprerm = splitrprerm
413 srcrpostrm = splitrpostrm
414
415 file_list = []
416 walk_files(root, file_list, conffiles, dirfiles)
417 if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
418 bb.note("Not creating empty RPM package for %s" % splitname)
419 else:
420 spec_files_top.append('%files')
421 if extra_pkgdata:
422 package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
423 spec_files_top.append('%defattr(-,-,-,-)')
424 if file_list:
425 bb.note("Creating RPM package for %s" % splitname)
426 spec_files_top.extend(file_list)
427 else:
428 bb.note("Creating empty RPM package for %s" % splitname)
429 spec_files_top.append('')
430 continue
431
432 # Process subpackage data
433 spec_preamble_bottom.append('%%package -n %s' % splitname)
434 spec_preamble_bottom.append('Summary: %s' % splitsummary)
435 if srcversion != splitversion:
436 spec_preamble_bottom.append('Version: %s' % splitversion)
437 if srcrelease != splitrelease:
438 spec_preamble_bottom.append('Release: %s' % splitrelease)
439 if srcepoch != splitepoch:
440 spec_preamble_bottom.append('Epoch: %s' % splitepoch)
441 spec_preamble_bottom.append('License: %s' % splitlicense)
442 spec_preamble_bottom.append('Group: %s' % splitsection)
443
444 if srccustomtagschunk != splitcustomtagschunk:
445 spec_preamble_bottom.append(splitcustomtagschunk)
446
447 # Replaces == Obsoletes && Provides
448 robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes)
449 rprovides = bb.utils.explode_dep_versions2(splitrprovides)
450 rreplaces = bb.utils.explode_dep_versions2(splitrreplaces)
451 for dep in rreplaces:
452 if not dep in robsoletes:
453 robsoletes[dep] = rreplaces[dep]
454 if not dep in rprovides:
455 rprovides[dep] = rreplaces[dep]
456 splitrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
457 splitrprovides = bb.utils.join_deps(rprovides, commasep=False)
458
459 print_deps(splitrdepends, "Requires", spec_preamble_bottom, d)
460 if splitrpreinst:
461 print_deps(splitrdepends, "Requires(pre)", spec_preamble_bottom, d)
462 if splitrpostinst:
463 print_deps(splitrdepends, "Requires(post)", spec_preamble_bottom, d)
464 if splitrprerm:
465 print_deps(splitrdepends, "Requires(preun)", spec_preamble_bottom, d)
466 if splitrpostrm:
467 print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
468
469 print_deps(splitrrecommends, "Recommends", spec_preamble_bottom, d)
470 print_deps(splitrsuggests, "Suggests", spec_preamble_bottom, d)
471 print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
472 print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
473 print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
474
475 spec_preamble_bottom.append('')
476
477 spec_preamble_bottom.append('%%description -n %s' % splitname)
478 append_description(spec_preamble_bottom, splitdescription)
479
480 spec_preamble_bottom.append('')
481
482 # Now process scriptlets
483 if splitrpreinst:
484 spec_scriptlets_bottom.append('%%pre -n %s' % splitname)
485 spec_scriptlets_bottom.append('# %s - preinst' % splitname)
486 spec_scriptlets_bottom.append(splitrpreinst)
487 spec_scriptlets_bottom.append('')
488 if splitrpostinst:
489 spec_scriptlets_bottom.append('%%post -n %s' % splitname)
490 spec_scriptlets_bottom.append('# %s - postinst' % splitname)
491 spec_scriptlets_bottom.append(splitrpostinst)
492 spec_scriptlets_bottom.append('')
493 if splitrprerm:
494 spec_scriptlets_bottom.append('%%preun -n %s' % splitname)
495 spec_scriptlets_bottom.append('# %s - prerm' % splitname)
496 scriptvar = wrap_uninstall(splitrprerm)
497 spec_scriptlets_bottom.append(scriptvar)
498 spec_scriptlets_bottom.append('')
499 if splitrpostrm:
500 spec_scriptlets_bottom.append('%%postun -n %s' % splitname)
501 spec_scriptlets_bottom.append('# %s - postrm' % splitname)
502 scriptvar = wrap_uninstall(splitrpostrm)
503 spec_scriptlets_bottom.append(scriptvar)
504 spec_scriptlets_bottom.append('')
505
506 # Now process files
507 file_list = []
508 walk_files(root, file_list, conffiles, dirfiles)
509 if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
510 bb.note("Not creating empty RPM package for %s" % splitname)
511 else:
512 spec_files_bottom.append('%%files -n %s' % splitname)
513 if extra_pkgdata:
514 package_rpm_extra_pkgdata(splitname, spec_files_bottom, localdata)
515 spec_files_bottom.append('%defattr(-,-,-,-)')
516 if file_list:
517 bb.note("Creating RPM package for %s" % splitname)
518 spec_files_bottom.extend(file_list)
519 else:
520 bb.note("Creating empty RPM package for %s" % splitname)
521 spec_files_bottom.append('')
522
523 del localdata
524
525 add_prep(d,spec_files_bottom)
526 spec_preamble_top.append('Summary: %s' % srcsummary)
527 spec_preamble_top.append('Name: %s' % srcname)
528 spec_preamble_top.append('Version: %s' % srcversion)
529 spec_preamble_top.append('Release: %s' % srcrelease)
530 if srcepoch and srcepoch.strip() != "":
531 spec_preamble_top.append('Epoch: %s' % srcepoch)
532 spec_preamble_top.append('License: %s' % srclicense)
533 spec_preamble_top.append('Group: %s' % srcsection)
534 spec_preamble_top.append('Packager: %s' % srcmaintainer)
535 if srchomepage:
536 spec_preamble_top.append('URL: %s' % srchomepage)
537 if srccustomtagschunk:
538 spec_preamble_top.append(srccustomtagschunk)
539 tail_source(d)
540
541 # Replaces == Obsoletes && Provides
542 robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes)
543 rprovides = bb.utils.explode_dep_versions2(srcrprovides)
544 rreplaces = bb.utils.explode_dep_versions2(srcrreplaces)
545 for dep in rreplaces:
546 if not dep in robsoletes:
547 robsoletes[dep] = rreplaces[dep]
548 if not dep in rprovides:
549 rprovides[dep] = rreplaces[dep]
550 srcrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
551 srcrprovides = bb.utils.join_deps(rprovides, commasep=False)
552
553 print_deps(srcdepends, "BuildRequires", spec_preamble_top, d)
554 print_deps(srcrdepends, "Requires", spec_preamble_top, d)
555 if srcrpreinst:
556 print_deps(srcrdepends, "Requires(pre)", spec_preamble_top, d)
557 if srcrpostinst:
558 print_deps(srcrdepends, "Requires(post)", spec_preamble_top, d)
559 if srcrprerm:
560 print_deps(srcrdepends, "Requires(preun)", spec_preamble_top, d)
561 if srcrpostrm:
562 print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
563
564 print_deps(srcrrecommends, "Recommends", spec_preamble_top, d)
565 print_deps(srcrsuggests, "Suggests", spec_preamble_top, d)
566 print_deps(srcrprovides, "Provides", spec_preamble_top, d)
567 print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
568 print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
569
570 spec_preamble_top.append('')
571
572 spec_preamble_top.append('%description')
573 append_description(spec_preamble_top, srcdescription)
574
575 spec_preamble_top.append('')
576
577 if srcrpreinst:
578 spec_scriptlets_top.append('%pre')
579 spec_scriptlets_top.append('# %s - preinst' % srcname)
580 spec_scriptlets_top.append(srcrpreinst)
581 spec_scriptlets_top.append('')
582 if srcrpostinst:
583 spec_scriptlets_top.append('%post')
584 spec_scriptlets_top.append('# %s - postinst' % srcname)
585 spec_scriptlets_top.append(srcrpostinst)
586 spec_scriptlets_top.append('')
587 if srcrprerm:
588 spec_scriptlets_top.append('%preun')
589 spec_scriptlets_top.append('# %s - prerm' % srcname)
590 scriptvar = wrap_uninstall(srcrprerm)
591 spec_scriptlets_top.append(scriptvar)
592 spec_scriptlets_top.append('')
593 if srcrpostrm:
594 spec_scriptlets_top.append('%postun')
595 spec_scriptlets_top.append('# %s - postrm' % srcname)
596 scriptvar = wrap_uninstall(srcrpostrm)
597 spec_scriptlets_top.append(scriptvar)
598 spec_scriptlets_top.append('')
599
600 # Write the SPEC file
601 specfile = open(outspecfile, 'w')
602
603 # RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
604 # of the generated spec file
605 external_preamble = d.getVar("RPMSPEC_PREAMBLE")
606 if external_preamble:
607 specfile.write(external_preamble + "\n")
608
609 for line in spec_preamble_top:
610 specfile.write(line + "\n")
611
612 for line in spec_preamble_bottom:
613 specfile.write(line + "\n")
614
615 for line in spec_scriptlets_top:
616 specfile.write(line + "\n")
617
618 for line in spec_scriptlets_bottom:
619 specfile.write(line + "\n")
620
621 for line in spec_files_top:
622 specfile.write(line + "\n")
623
624 for line in spec_files_bottom:
625 specfile.write(line + "\n")
626
627 specfile.close()
628}
629# Otherwise allarch packages may change depending on override configuration
630write_specfile[vardepsexclude] = "OVERRIDES"
631
632# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
633RPMEXTRAVARS = "PACKAGE_ADD_METADATA_RPM"
634write_specfile[vardeps] += "${@gen_packagevar(d, 'RPMEXTRAVARS')}"
635
636python do_package_rpm () {
637 workdir = d.getVar('WORKDIR')
638 tmpdir = d.getVar('TMPDIR')
639 pkgd = d.getVar('PKGD')
640 pkgdest = d.getVar('PKGDEST')
641 if not workdir or not pkgd or not tmpdir:
642 bb.error("Variables incorrectly set, unable to package")
643 return
644
645 packages = d.getVar('PACKAGES')
646 if not packages or packages == '':
647 bb.debug(1, "No packages; nothing to do")
648 return
649
650 # Construct the spec file...
651 # If the spec file already exist, and has not been stored into
652 # pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
653 # so remove it before doing rpmbuild src.rpm.
654 srcname = d.getVar('PN')
655 outspecfile = workdir + "/" + srcname + ".spec"
656 if os.path.isfile(outspecfile):
657 os.remove(outspecfile)
658 d.setVar('OUTSPECFILE', outspecfile)
659 bb.build.exec_func('write_specfile', d)
660
661 perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
662 if perfiledeps:
663 outdepends, outprovides = write_rpm_perfiledata(srcname, d)
664
665 # Setup the rpmbuild arguments...
666 rpmbuild = d.getVar('RPMBUILD')
667 targetsys = d.getVar('TARGET_SYS')
668 targetvendor = d.getVar('HOST_VENDOR')
669
670 # Too many places in dnf stack assume that arch-independent packages are "noarch".
671 # Let's not fight against this.
672 package_arch = (d.getVar('PACKAGE_ARCH') or "").replace("-", "_")
673 if package_arch == "all":
674 package_arch = "noarch"
675
676 sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX') or "nativesdk").replace("-", "_")
677 d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
678 pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
679 d.setVar('RPM_PKGWRITEDIR', pkgwritedir)
680 bb.debug(1, 'PKGWRITEDIR: %s' % d.getVar('RPM_PKGWRITEDIR'))
681 pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-linux')
682 bb.utils.mkdirhier(pkgwritedir)
683 os.chmod(pkgwritedir, 0o755)
684
685 cmd = rpmbuild
686 cmd = cmd + " --noclean --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
687 cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
688 cmd = cmd + " --define '_builddir " + d.getVar('B') + "'"
689 cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
690 cmd = cmd + " --define '_use_internal_dependency_generator 0'"
691 cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
692 cmd = cmd + " --define '_build_id_links none'"
693 cmd = cmd + " --define '_binary_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
694 cmd = cmd + " --define '_source_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
695 cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
696 cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
697 cmd = cmd + " --define '_buildhost reproducible'"
698 cmd = cmd + " --define '__font_provides %{nil}'"
699 if perfiledeps:
700 cmd = cmd + " --define '__find_requires " + outdepends + "'"
701 cmd = cmd + " --define '__find_provides " + outprovides + "'"
702 else:
703 cmd = cmd + " --define '__find_requires %{nil}'"
704 cmd = cmd + " --define '__find_provides %{nil}'"
705 cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
706 cmd = cmd + " --define 'debug_package %{nil}'"
707 cmd = cmd + " --define '_tmppath " + workdir + "'"
708 if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
709 cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR') + "'"
710 cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_RPMOUTDIR') + "'"
711 cmdsrpm = cmdsrpm + " -bs " + outspecfile
712 # Build the .src.rpm
713 d.setVar('SBUILDSPEC', cmdsrpm + "\n")
714 d.setVarFlag('SBUILDSPEC', 'func', '1')
715 bb.build.exec_func('SBUILDSPEC', d)
716 cmd = cmd + " -bb " + outspecfile
717
718 # rpm 4 creates various empty directories in _topdir, let's clean them up
719 cleanupcmd = "rm -rf %s/BUILDROOT %s/SOURCES %s/SPECS %s/SRPMS" % (workdir, workdir, workdir, workdir)
720
721 # Build the rpm package!
722 d.setVar('BUILDSPEC', cmd + "\n" + cleanupcmd + "\n")
723 d.setVarFlag('BUILDSPEC', 'func', '1')
724 bb.build.exec_func('BUILDSPEC', d)
725
726 if d.getVar('RPM_SIGN_PACKAGES') == '1':
727 bb.build.exec_func("sign_rpm", d)
728}
729
730python () {
731 if d.getVar('PACKAGES') != '':
732 deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
733 d.appendVarFlag('do_package_write_rpm', 'depends', deps)
734 d.setVarFlag('do_package_write_rpm', 'fakeroot', '1')
735}
736
737SSTATETASKS += "do_package_write_rpm"
738do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}"
739do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}"
740# Take a shared lock, we can write multiple packages at the same time...
741# but we need to stop the rootfs/solver from running while we do...
742do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock"
743
744python do_package_write_rpm_setscene () {
745 sstate_setscene(d)
746}
747addtask do_package_write_rpm_setscene
748
749python do_package_write_rpm () {
750 bb.build.exec_func("read_subpackage_metadata", d)
751 bb.build.exec_func("do_package_rpm", d)
752}
753
754do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
755do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
756do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
757addtask package_write_rpm after do_packagedata do_package do_deploy_source_date_epoch before do_build
758do_build[rdeptask] += "do_package_write_rpm"
759
760PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
761PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
diff --git a/meta/classes-global/package_tar.bbclass b/meta/classes-global/package_tar.bbclass
new file mode 100644
index 0000000000..de995f9747
--- /dev/null
+++ b/meta/classes-global/package_tar.bbclass
@@ -0,0 +1,77 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7inherit package
8
9IMAGE_PKGTYPE ?= "tar"
10
11python do_package_tar () {
12 import subprocess
13
14 oldcwd = os.getcwd()
15
16 workdir = d.getVar('WORKDIR')
17 if not workdir:
18 bb.error("WORKDIR not defined, unable to package")
19 return
20
21 outdir = d.getVar('DEPLOY_DIR_TAR')
22 if not outdir:
23 bb.error("DEPLOY_DIR_TAR not defined, unable to package")
24 return
25
26 dvar = d.getVar('D')
27 if not dvar:
28 bb.error("D not defined, unable to package")
29 return
30
31 packages = d.getVar('PACKAGES')
32 if not packages:
33 bb.debug(1, "PACKAGES not defined, nothing to package")
34 return
35
36 pkgdest = d.getVar('PKGDEST')
37
38 bb.utils.mkdirhier(outdir)
39 bb.utils.mkdirhier(dvar)
40
41 for pkg in packages.split():
42 localdata = bb.data.createCopy(d)
43 root = "%s/%s" % (pkgdest, pkg)
44
45 overrides = localdata.getVar('OVERRIDES', False)
46 localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg))
47
48 bb.utils.mkdirhier(root)
49 basedir = os.path.dirname(root)
50 tarfn = localdata.expand("${DEPLOY_DIR_TAR}/${PKG}-${PKGV}-${PKGR}.tar.gz")
51 os.chdir(root)
52 dlist = os.listdir(root)
53 if not dlist:
54 bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
55 continue
56 args = "tar -cz --exclude=CONTROL --exclude=DEBIAN -f".split()
57 ret = subprocess.call(args + [tarfn] + dlist)
58 if ret != 0:
59 bb.error("Creation of tar %s failed." % tarfn)
60
61 os.chdir(oldcwd)
62}
63
64python () {
65 if d.getVar('PACKAGES') != '':
66 deps = ' tar-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
67 d.appendVarFlag('do_package_write_tar', 'depends', deps)
68 d.setVarFlag('do_package_write_tar', 'fakeroot', "1")
69}
70
71
72python do_package_write_tar () {
73 bb.build.exec_func("read_subpackage_metadata", d)
74 bb.build.exec_func("do_package_tar", d)
75}
76do_package_write_tar[dirs] = "${D}"
77addtask package_write_tar before do_build after do_packagedata do_package
diff --git a/meta/classes-global/packagedata.bbclass b/meta/classes-global/packagedata.bbclass
new file mode 100644
index 0000000000..9f72c01d77
--- /dev/null
+++ b/meta/classes-global/packagedata.bbclass
@@ -0,0 +1,40 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7python read_subpackage_metadata () {
8 import oe.packagedata
9
10 vars = {
11 "PN" : d.getVar('PN'),
12 "PE" : d.getVar('PE'),
13 "PV" : d.getVar('PV'),
14 "PR" : d.getVar('PR'),
15 }
16
17 data = oe.packagedata.read_pkgdata(vars["PN"], d)
18
19 for key in data.keys():
20 d.setVar(key, data[key])
21
22 for pkg in d.getVar('PACKAGES').split():
23 sdata = oe.packagedata.read_subpkgdata(pkg, d)
24 for key in sdata.keys():
25 if key in vars:
26 if sdata[key] != vars[key]:
27 if key == "PN":
28 bb.fatal("Recipe %s is trying to create package %s which was already written by recipe %s. This will cause corruption, please resolve this and only provide the package from one recipe or the other or only build one of the recipes." % (vars[key], pkg, sdata[key]))
29 bb.fatal("Recipe %s is trying to change %s from '%s' to '%s'. This will cause do_package_write_* failures since the incorrect data will be used and they will be unable to find the right workdir." % (vars["PN"], key, vars[key], sdata[key]))
30 continue
31 #
32 # If we set unsuffixed variables here there is a chance they could clobber override versions
33 # of that variable, e.g. DESCRIPTION could clobber DESCRIPTION:<pkgname>
34 # We therefore don't clobber for the unsuffixed variable versions
35 #
36 if key.endswith(":" + pkg):
37 d.setVar(key, sdata[key])
38 else:
39 d.setVar(key, sdata[key], parsing=True)
40}
diff --git a/meta/classes-global/patch.bbclass b/meta/classes-global/patch.bbclass
new file mode 100644
index 0000000000..e3157c7b18
--- /dev/null
+++ b/meta/classes-global/patch.bbclass
@@ -0,0 +1,171 @@
1# Copyright (C) 2006 OpenedHand LTD
2#
3# SPDX-License-Identifier: MIT
4
5# Point to an empty file so any user's custom settings don't break things
6QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
7
8PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
9
10# There is a bug in patch 2.7.3 and earlier where index lines
11# in patches can change file modes when they shouldn't:
12# http://git.savannah.gnu.org/cgit/patch.git/patch/?id=82b800c9552a088a241457948219d25ce0a407a4
13# This leaks into debug sources in particular. Add the dependency
14# to target recipes to avoid this problem until we can rely on 2.7.4 or later.
15PATCHDEPENDENCY:append:class-target = " patch-replacement-native:do_populate_sysroot"
16
17PATCH_GIT_USER_NAME ?= "OpenEmbedded"
18PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
19
20inherit terminal
21
22python () {
23 if d.getVar('PATCHTOOL') == 'git' and d.getVar('PATCH_COMMIT_FUNCTIONS') == '1':
24 extratasks = bb.build.tasksbetween('do_unpack', 'do_patch', d)
25 try:
26 extratasks.remove('do_unpack')
27 except ValueError:
28 # For some recipes do_unpack doesn't exist, ignore it
29 pass
30
31 d.appendVarFlag('do_patch', 'prefuncs', ' patch_task_patch_prefunc')
32 for task in extratasks:
33 d.appendVarFlag(task, 'postfuncs', ' patch_task_postfunc')
34}
35
36python patch_task_patch_prefunc() {
37 # Prefunc for do_patch
38 srcsubdir = d.getVar('S')
39
40 workdir = os.path.abspath(d.getVar('WORKDIR'))
41 testsrcdir = os.path.abspath(srcsubdir)
42 if (testsrcdir + os.sep).startswith(workdir + os.sep):
43 # Double-check that either workdir or S or some directory in-between is a git repository
44 found = False
45 while testsrcdir != workdir:
46 if os.path.exists(os.path.join(testsrcdir, '.git')):
47 found = True
48 break
49 if testsrcdir == workdir:
50 break
51 testsrcdir = os.path.dirname(testsrcdir)
52 if not found:
53 bb.fatal('PATCHTOOL = "git" set for source tree that is not a git repository. Refusing to continue as that may result in commits being made in your metadata repository.')
54
55 patchdir = os.path.join(srcsubdir, 'patches')
56 if os.path.exists(patchdir):
57 if os.listdir(patchdir):
58 d.setVar('PATCH_HAS_PATCHES_DIR', '1')
59 else:
60 os.rmdir(patchdir)
61}
62
63python patch_task_postfunc() {
64 # Prefunc for task functions between do_unpack and do_patch
65 import oe.patch
66 import shutil
67 func = d.getVar('BB_RUNTASK')
68 srcsubdir = d.getVar('S')
69
70 if os.path.exists(srcsubdir):
71 if func == 'do_patch':
72 haspatches = (d.getVar('PATCH_HAS_PATCHES_DIR') == '1')
73 patchdir = os.path.join(srcsubdir, 'patches')
74 if os.path.exists(patchdir):
75 shutil.rmtree(patchdir)
76 if haspatches:
77 stdout, _ = bb.process.run('git status --porcelain patches', cwd=srcsubdir)
78 if stdout:
79 bb.process.run('git checkout patches', cwd=srcsubdir)
80 stdout, _ = bb.process.run('git status --porcelain .', cwd=srcsubdir)
81 if stdout:
82 useroptions = []
83 oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=d)
84 bb.process.run('git add .; git %s commit -a -m "Committing changes from %s\n\n%s"' % (' '.join(useroptions), func, oe.patch.GitApplyTree.ignore_commit_prefix + ' - from %s' % func), cwd=srcsubdir)
85}
86
87def src_patches(d, all=False, expand=True):
88 import oe.patch
89 return oe.patch.src_patches(d, all, expand)
90
91def should_apply(parm, d):
92 """Determine if we should apply the given patch"""
93 import oe.patch
94 return oe.patch.should_apply(parm, d)
95
96should_apply[vardepsexclude] = "DATE SRCDATE"
97
98python patch_do_patch() {
99 import oe.patch
100
101 patchsetmap = {
102 "patch": oe.patch.PatchTree,
103 "quilt": oe.patch.QuiltTree,
104 "git": oe.patch.GitApplyTree,
105 }
106
107 cls = patchsetmap[d.getVar('PATCHTOOL') or 'quilt']
108
109 resolvermap = {
110 "noop": oe.patch.NOOPResolver,
111 "user": oe.patch.UserResolver,
112 }
113
114 rcls = resolvermap[d.getVar('PATCHRESOLVE') or 'user']
115
116 classes = {}
117
118 s = d.getVar('S')
119
120 os.putenv('PATH', d.getVar('PATH'))
121
122 # We must use one TMPDIR per process so that the "patch" processes
123 # don't generate the same temp file name.
124
125 import tempfile
126 process_tmpdir = tempfile.mkdtemp()
127 os.environ['TMPDIR'] = process_tmpdir
128
129 for patch in src_patches(d):
130 _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
131
132 if "patchdir" in parm:
133 patchdir = parm["patchdir"]
134 if not os.path.isabs(patchdir):
135 patchdir = os.path.join(s, patchdir)
136 if not os.path.isdir(patchdir):
137 bb.fatal("Target directory '%s' not found, patchdir '%s' is incorrect in patch file '%s'" %
138 (patchdir, parm["patchdir"], parm['patchname']))
139 else:
140 patchdir = s
141
142 if not patchdir in classes:
143 patchset = cls(patchdir, d)
144 resolver = rcls(patchset, oe_terminal)
145 classes[patchdir] = (patchset, resolver)
146 patchset.Clean()
147 else:
148 patchset, resolver = classes[patchdir]
149
150 bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
151 try:
152 patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
153 except Exception as exc:
154 bb.utils.remove(process_tmpdir, True)
155 bb.fatal("Importing patch '%s' with striplevel '%s'\n%s" % (parm['patchname'], parm['striplevel'], repr(exc).replace("\\n", "\n")))
156 try:
157 resolver.Resolve()
158 except bb.BBHandledException as e:
159 bb.utils.remove(process_tmpdir, True)
160 bb.fatal("Applying patch '%s' on target directory '%s'\n%s" % (parm['patchname'], patchdir, repr(e).replace("\\n", "\n")))
161
162 bb.utils.remove(process_tmpdir, True)
163 del os.environ['TMPDIR']
164}
165patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
166
167addtask patch after do_unpack
168do_patch[dirs] = "${WORKDIR}"
169do_patch[depends] = "${PATCHDEPENDENCY}"
170
171EXPORT_FUNCTIONS do_patch
diff --git a/meta/classes-global/sanity.bbclass b/meta/classes-global/sanity.bbclass
new file mode 100644
index 0000000000..4104694478
--- /dev/null
+++ b/meta/classes-global/sanity.bbclass
@@ -0,0 +1,1028 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7#
8# Sanity check the users setup for common misconfigurations
9#
10
11SANITY_REQUIRED_UTILITIES ?= "patch diffstat git bzip2 tar \
12 gzip gawk chrpath wget cpio perl file which"
13
14def bblayers_conf_file(d):
15 return os.path.join(d.getVar('TOPDIR'), 'conf/bblayers.conf')
16
17def sanity_conf_read(fn):
18 with open(fn, 'r') as f:
19 lines = f.readlines()
20 return lines
21
22def sanity_conf_find_line(pattern, lines):
23 import re
24 return next(((index, line)
25 for index, line in enumerate(lines)
26 if re.search(pattern, line)), (None, None))
27
28def sanity_conf_update(fn, lines, version_var_name, new_version):
29 index, line = sanity_conf_find_line(r"^%s" % version_var_name, lines)
30 lines[index] = '%s = "%d"\n' % (version_var_name, new_version)
31 with open(fn, "w") as f:
32 f.write(''.join(lines))
33
34# Functions added to this variable MUST throw a NotImplementedError exception unless
35# they successfully changed the config version in the config file. Exceptions
36# are used since exec_func doesn't handle return values.
37BBLAYERS_CONF_UPDATE_FUNCS += " \
38 conf/bblayers.conf:LCONF_VERSION:LAYER_CONF_VERSION:oecore_update_bblayers \
39 conf/local.conf:CONF_VERSION:LOCALCONF_VERSION:oecore_update_localconf \
40 conf/site.conf:SCONF_VERSION:SITE_CONF_VERSION:oecore_update_siteconf \
41"
42
43SANITY_DIFF_TOOL ?= "meld"
44
45SANITY_LOCALCONF_SAMPLE ?= "${COREBASE}/meta*/conf/local.conf.sample"
46python oecore_update_localconf() {
47 # Check we are using a valid local.conf
48 current_conf = d.getVar('CONF_VERSION')
49 conf_version = d.getVar('LOCALCONF_VERSION')
50
51 failmsg = """Your version of local.conf was generated from an older/newer version of
52local.conf.sample and there have been updates made to this file. Please compare the two
53files and merge any changes before continuing.
54
55Matching the version numbers will remove this message.
56
57\"${SANITY_DIFF_TOOL} conf/local.conf ${SANITY_LOCALCONF_SAMPLE}\"
58
59is a good way to visualise the changes."""
60 failmsg = d.expand(failmsg)
61
62 raise NotImplementedError(failmsg)
63}
64
65SANITY_SITECONF_SAMPLE ?= "${COREBASE}/meta*/conf/site.conf.sample"
66python oecore_update_siteconf() {
67 # If we have a site.conf, check it's valid
68 current_sconf = d.getVar('SCONF_VERSION')
69 sconf_version = d.getVar('SITE_CONF_VERSION')
70
71 failmsg = """Your version of site.conf was generated from an older version of
72site.conf.sample and there have been updates made to this file. Please compare the two
73files and merge any changes before continuing.
74
75Matching the version numbers will remove this message.
76
77\"${SANITY_DIFF_TOOL} conf/site.conf ${SANITY_SITECONF_SAMPLE}\"
78
79is a good way to visualise the changes."""
80 failmsg = d.expand(failmsg)
81
82 raise NotImplementedError(failmsg)
83}
84
85SANITY_BBLAYERCONF_SAMPLE ?= "${COREBASE}/meta*/conf/bblayers.conf.sample"
86python oecore_update_bblayers() {
87 # bblayers.conf is out of date, so see if we can resolve that
88
89 current_lconf = int(d.getVar('LCONF_VERSION'))
90 lconf_version = int(d.getVar('LAYER_CONF_VERSION'))
91
92 failmsg = """Your version of bblayers.conf has the wrong LCONF_VERSION (has ${LCONF_VERSION}, expecting ${LAYER_CONF_VERSION}).
93Please compare your file against bblayers.conf.sample and merge any changes before continuing.
94"${SANITY_DIFF_TOOL} conf/bblayers.conf ${SANITY_BBLAYERCONF_SAMPLE}"
95
96is a good way to visualise the changes."""
97 failmsg = d.expand(failmsg)
98
99 if not current_lconf:
100 raise NotImplementedError(failmsg)
101
102 lines = []
103
104 if current_lconf < 4:
105 raise NotImplementedError(failmsg)
106
107 bblayers_fn = bblayers_conf_file(d)
108 lines = sanity_conf_read(bblayers_fn)
109
110 if current_lconf == 4 and lconf_version > 4:
111 topdir_var = '$' + '{TOPDIR}'
112 index, bbpath_line = sanity_conf_find_line('BBPATH', lines)
113 if bbpath_line:
114 start = bbpath_line.find('"')
115 if start != -1 and (len(bbpath_line) != (start + 1)):
116 if bbpath_line[start + 1] == '"':
117 lines[index] = (bbpath_line[:start + 1] +
118 topdir_var + bbpath_line[start + 1:])
119 else:
120 if not topdir_var in bbpath_line:
121 lines[index] = (bbpath_line[:start + 1] +
122 topdir_var + ':' + bbpath_line[start + 1:])
123 else:
124 raise NotImplementedError(failmsg)
125 else:
126 index, bbfiles_line = sanity_conf_find_line('BBFILES', lines)
127 if bbfiles_line:
128 lines.insert(index, 'BBPATH = "' + topdir_var + '"\n')
129 else:
130 raise NotImplementedError(failmsg)
131
132 current_lconf += 1
133 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
134 bb.note("Your conf/bblayers.conf has been automatically updated.")
135 return
136
137 elif current_lconf == 5 and lconf_version > 5:
138 # Null update, to avoid issues with people switching between poky and other distros
139 current_lconf = 6
140 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
141 bb.note("Your conf/bblayers.conf has been automatically updated.")
142 return
143
144 status.addresult()
145
146 elif current_lconf == 6 and lconf_version > 6:
147 # Handle rename of meta-yocto -> meta-poky
148 # This marks the start of separate version numbers but code is needed in OE-Core
149 # for the migration, one last time.
150 layers = d.getVar('BBLAYERS').split()
151 layers = [ os.path.basename(path) for path in layers ]
152 if 'meta-yocto' in layers:
153 found = False
154 while True:
155 index, meta_yocto_line = sanity_conf_find_line(r'.*meta-yocto[\'"\s\n]', lines)
156 if meta_yocto_line:
157 lines[index] = meta_yocto_line.replace('meta-yocto', 'meta-poky')
158 found = True
159 else:
160 break
161 if not found:
162 raise NotImplementedError(failmsg)
163 index, meta_yocto_line = sanity_conf_find_line('LCONF_VERSION.*\n', lines)
164 if meta_yocto_line:
165 lines[index] = 'POKY_BBLAYERS_CONF_VERSION = "1"\n'
166 else:
167 raise NotImplementedError(failmsg)
168 with open(bblayers_fn, "w") as f:
169 f.write(''.join(lines))
170 bb.note("Your conf/bblayers.conf has been automatically updated.")
171 return
172 current_lconf += 1
173 sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
174 bb.note("Your conf/bblayers.conf has been automatically updated.")
175 return
176
177 raise NotImplementedError(failmsg)
178}
179
180def raise_sanity_error(msg, d, network_error=False):
181 if d.getVar("SANITY_USE_EVENTS") == "1":
182 try:
183 bb.event.fire(bb.event.SanityCheckFailed(msg, network_error), d)
184 except TypeError:
185 bb.event.fire(bb.event.SanityCheckFailed(msg), d)
186 return
187
188 bb.fatal(""" OE-core's config sanity checker detected a potential misconfiguration.
189 Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
190 Following is the list of potential problems / advisories:
191
192 %s""" % msg)
193
194# Check a single tune for validity.
195def check_toolchain_tune(data, tune, multilib):
196 tune_errors = []
197 if not tune:
198 return "No tuning found for %s multilib." % multilib
199 localdata = bb.data.createCopy(data)
200 if multilib != "default":
201 # Apply the overrides so we can look at the details.
202 overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
203 localdata.setVar("OVERRIDES", overrides)
204 bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
205 features = (localdata.getVar("TUNE_FEATURES:tune-%s" % tune) or "").split()
206 if not features:
207 return "Tuning '%s' has no defined features, and cannot be used." % tune
208 valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
209 conflicts = localdata.getVarFlags('TUNECONFLICTS') or {}
210 # [doc] is the documentation for the variable, not a real feature
211 if 'doc' in valid_tunes:
212 del valid_tunes['doc']
213 if 'doc' in conflicts:
214 del conflicts['doc']
215 for feature in features:
216 if feature in conflicts:
217 for conflict in conflicts[feature].split():
218 if conflict in features:
219 tune_errors.append("Feature '%s' conflicts with '%s'." %
220 (feature, conflict))
221 if feature in valid_tunes:
222 bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
223 else:
224 tune_errors.append("Feature '%s' is not defined." % feature)
225 if tune_errors:
226 return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
227
228def check_toolchain(data):
229 tune_error_set = []
230 deftune = data.getVar("DEFAULTTUNE")
231 tune_errors = check_toolchain_tune(data, deftune, 'default')
232 if tune_errors:
233 tune_error_set.append(tune_errors)
234
235 multilibs = (data.getVar("MULTILIB_VARIANTS") or "").split()
236 global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS") or "").split()
237
238 if multilibs:
239 seen_libs = []
240 seen_tunes = []
241 for lib in multilibs:
242 if lib in seen_libs:
243 tune_error_set.append("The multilib '%s' appears more than once." % lib)
244 else:
245 seen_libs.append(lib)
246 if not lib in global_multilibs:
247 tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
248 tune = data.getVar("DEFAULTTUNE:virtclass-multilib-%s" % lib)
249 if tune in seen_tunes:
250 tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
251 else:
252 seen_libs.append(tune)
253 if tune == deftune:
254 tune_error_set.append("Multilib '%s' (%s) is also the default tuning." % (lib, deftune))
255 else:
256 tune_errors = check_toolchain_tune(data, tune, lib)
257 if tune_errors:
258 tune_error_set.append(tune_errors)
259 if tune_error_set:
260 return "Toolchain tunings invalid:\n" + '\n'.join(tune_error_set) + "\n"
261
262 return ""
263
264def check_conf_exists(fn, data):
265 bbpath = []
266 fn = data.expand(fn)
267 vbbpath = data.getVar("BBPATH", False)
268 if vbbpath:
269 bbpath += vbbpath.split(":")
270 for p in bbpath:
271 currname = os.path.join(data.expand(p), fn)
272 if os.access(currname, os.R_OK):
273 return True
274 return False
275
276def check_create_long_filename(filepath, pathname):
277 import string, random
278 testfile = os.path.join(filepath, ''.join(random.choice(string.ascii_letters) for x in range(200)))
279 try:
280 if not os.path.exists(filepath):
281 bb.utils.mkdirhier(filepath)
282 f = open(testfile, "w")
283 f.close()
284 os.remove(testfile)
285 except IOError as e:
286 import errno
287 err, strerror = e.args
288 if err == errno.ENAMETOOLONG:
289 return "Failed to create a file with a long name in %s. Please use a filesystem that does not unreasonably limit filename length.\n" % pathname
290 else:
291 return "Failed to create a file in %s: %s.\n" % (pathname, strerror)
292 except OSError as e:
293 errno, strerror = e.args
294 return "Failed to create %s directory in which to run long name sanity check: %s.\n" % (pathname, strerror)
295 return ""
296
297def check_path_length(filepath, pathname, limit):
298 if len(filepath) > limit:
299 return "The length of %s is longer than %s, this would cause unexpected errors, please use a shorter path.\n" % (pathname, limit)
300 return ""
301
302def get_filesystem_id(path):
303 import subprocess
304 try:
305 return subprocess.check_output(["stat", "-f", "-c", "%t", path]).decode('utf-8').strip()
306 except subprocess.CalledProcessError:
307 bb.warn("Can't get filesystem id of: %s" % path)
308 return None
309
310# Check that the path isn't located on nfs.
311def check_not_nfs(path, name):
312 # The nfs' filesystem id is 6969
313 if get_filesystem_id(path) == "6969":
314 return "The %s: %s can't be located on nfs.\n" % (name, path)
315 return ""
316
317# Check that the path is on a case-sensitive file system
318def check_case_sensitive(path, name):
319 import tempfile
320 with tempfile.NamedTemporaryFile(prefix='TmP', dir=path) as tmp_file:
321 if os.path.exists(tmp_file.name.lower()):
322 return "The %s (%s) can't be on a case-insensitive file system.\n" % (name, path)
323 return ""
324
325# Check that path isn't a broken symlink
326def check_symlink(lnk, data):
327 if os.path.islink(lnk) and not os.path.exists(lnk):
328 raise_sanity_error("%s is a broken symlink." % lnk, data)
329
330def check_connectivity(d):
331 # URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable
332 # using the same syntax as for SRC_URI. If the variable is not set
333 # the check is skipped
334 test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS') or "").split()
335 retval = ""
336
337 bbn = d.getVar('BB_NO_NETWORK')
338 if bbn not in (None, '0', '1'):
339 return 'BB_NO_NETWORK should be "0" or "1", but it is "%s"' % bbn
340
341 # Only check connectivity if network enabled and the
342 # CONNECTIVITY_CHECK_URIS are set
343 network_enabled = not (bbn == '1')
344 check_enabled = len(test_uris)
345 if check_enabled and network_enabled:
346 # Take a copy of the data store and unset MIRRORS and PREMIRRORS
347 data = bb.data.createCopy(d)
348 data.delVar('PREMIRRORS')
349 data.delVar('MIRRORS')
350 try:
351 fetcher = bb.fetch2.Fetch(test_uris, data)
352 fetcher.checkstatus()
353 except Exception as err:
354 # Allow the message to be configured so that users can be
355 # pointed to a support mechanism.
356 msg = data.getVar('CONNECTIVITY_CHECK_MSG') or ""
357 if len(msg) == 0:
358 msg = "%s.\n" % err
359 msg += " Please ensure your host's network is configured correctly.\n"
360 msg += " If your ISP or network is blocking the above URL,\n"
361 msg += " try with another domain name, for example by setting:\n"
362 msg += " CONNECTIVITY_CHECK_URIS = \"https://www.example.com/\""
363 msg += " You could also set BB_NO_NETWORK = \"1\" to disable network\n"
364 msg += " access if all required sources are on local disk.\n"
365 retval = msg
366
367 return retval
368
369def check_supported_distro(sanity_data):
370 from fnmatch import fnmatch
371
372 tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS')
373 if not tested_distros:
374 return
375
376 try:
377 distro = oe.lsb.distro_identifier()
378 except Exception:
379 distro = None
380
381 if not distro:
382 bb.warn('Host distribution could not be determined; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.')
383
384 for supported in [x.strip() for x in tested_distros.split('\\n')]:
385 if fnmatch(distro, supported):
386 return
387
388 bb.warn('Host distribution "%s" has not been validated with this version of the build system; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.' % distro)
389
390# Checks we should only make if MACHINE is set correctly
391def check_sanity_validmachine(sanity_data):
392 messages = ""
393
394 # Check TUNE_ARCH is set
395 if sanity_data.getVar('TUNE_ARCH') == 'INVALID':
396 messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n'
397
398 # Check TARGET_OS is set
399 if sanity_data.getVar('TARGET_OS') == 'INVALID':
400 messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
401
402 # Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
403 pkgarchs = sanity_data.getVar('PACKAGE_ARCHS')
404 tunepkg = sanity_data.getVar('TUNE_PKGARCH')
405 defaulttune = sanity_data.getVar('DEFAULTTUNE')
406 tunefound = False
407 seen = {}
408 dups = []
409
410 for pa in pkgarchs.split():
411 if seen.get(pa, 0) == 1:
412 dups.append(pa)
413 else:
414 seen[pa] = 1
415 if pa == tunepkg:
416 tunefound = True
417
418 if len(dups):
419 messages = messages + "Error, the PACKAGE_ARCHS variable contains duplicates. The following archs are listed more than once: %s" % " ".join(dups)
420
421 if tunefound == False:
422 messages = messages + "Error, the PACKAGE_ARCHS variable (%s) for DEFAULTTUNE (%s) does not contain TUNE_PKGARCH (%s)." % (pkgarchs, defaulttune, tunepkg)
423
424 return messages
425
426# Patch before 2.7 can't handle all the features in git-style diffs. Some
427# patches may incorrectly apply, and others won't apply at all.
428def check_patch_version(sanity_data):
429 import re, subprocess
430
431 try:
432 result = subprocess.check_output(["patch", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
433 version = re.search(r"[0-9.]+", result.splitlines()[0]).group()
434 if bb.utils.vercmp_string_op(version, "2.7", "<"):
435 return "Your version of patch is older than 2.7 and has bugs which will break builds. Please install a newer version of patch.\n"
436 else:
437 return None
438 except subprocess.CalledProcessError as e:
439 return "Unable to execute patch --version, exit code %d:\n%s\n" % (e.returncode, e.output)
440
441# Glibc needs make 4.0 or later, we may as well match at this point
442def check_make_version(sanity_data):
443 import subprocess
444
445 try:
446 result = subprocess.check_output(['make', '--version'], stderr=subprocess.STDOUT).decode('utf-8')
447 except subprocess.CalledProcessError as e:
448 return "Unable to execute make --version, exit code %d\n%s\n" % (e.returncode, e.output)
449 version = result.split()[2]
450 if bb.utils.vercmp_string_op(version, "4.0", "<"):
451 return "Please install a make version of 4.0 or later.\n"
452
453 if bb.utils.vercmp_string_op(version, "4.2.1", "=="):
454 distro = oe.lsb.distro_identifier()
455 if "ubuntu" in distro or "debian" in distro or "linuxmint" in distro:
456 return None
457 return "make version 4.2.1 is known to have issues on Centos/OpenSUSE and other non-Ubuntu systems. Please use a buildtools-make-tarball or a newer version of make.\n"
458 return None
459
460
461# Check if we're running on WSL (Windows Subsystem for Linux).
462# WSLv1 is known not to work but WSLv2 should work properly as
463# long as the VHDX file is optimized often, let the user know
464# upfront.
465# More information on installing WSLv2 at:
466# https://docs.microsoft.com/en-us/windows/wsl/wsl2-install
467def check_wsl(d):
468 with open("/proc/version", "r") as f:
469 verdata = f.readlines()
470 for l in verdata:
471 if "Microsoft" in l:
472 return "OpenEmbedded doesn't work under WSLv1, please upgrade to WSLv2 if you want to run builds on Windows"
473 elif "microsoft" in l:
474 bb.warn("You are running bitbake under WSLv2, this works properly but you should optimize your VHDX file eventually to avoid running out of storage space")
475 return None
476
477# Require at least gcc version 7.5.
478#
479# This can be fixed on CentOS-7 with devtoolset-6+
480# https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/
481#
482# A less invasive fix is with scripts/install-buildtools (or with user
483# built buildtools-extended-tarball)
484#
485def check_gcc_version(sanity_data):
486 import subprocess
487
488 build_cc, version = oe.utils.get_host_compiler_version(sanity_data)
489 if build_cc.strip() == "gcc":
490 if bb.utils.vercmp_string_op(version, "7.5", "<"):
491 return "Your version of gcc is older than 7.5 and will break builds. Please install a newer version of gcc (you could use the project's buildtools-extended-tarball or use scripts/install-buildtools).\n"
492 return None
493
494# Tar version 1.24 and onwards handle overwriting symlinks correctly
495# but earlier versions do not; this needs to work properly for sstate
496# Version 1.28 is needed so opkg-build works correctly when reproducibile builds are enabled
497def check_tar_version(sanity_data):
498 import subprocess
499 try:
500 result = subprocess.check_output(["tar", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
501 except subprocess.CalledProcessError as e:
502 return "Unable to execute tar --version, exit code %d\n%s\n" % (e.returncode, e.output)
503 version = result.split()[3]
504 if bb.utils.vercmp_string_op(version, "1.28", "<"):
505 return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
506 return None
507
508# We use git parameters and functionality only found in 1.7.8 or later
509# The kernel tools assume git >= 1.8.3.1 (verified needed > 1.7.9.5) see #6162
510# The git fetcher also had workarounds for git < 1.7.9.2 which we've dropped
511def check_git_version(sanity_data):
512 import subprocess
513 try:
514 result = subprocess.check_output(["git", "--version"], stderr=subprocess.DEVNULL).decode('utf-8')
515 except subprocess.CalledProcessError as e:
516 return "Unable to execute git --version, exit code %d\n%s\n" % (e.returncode, e.output)
517 version = result.split()[2]
518 if bb.utils.vercmp_string_op(version, "1.8.3.1", "<"):
519 return "Your version of git is older than 1.8.3.1 and has bugs which will break builds. Please install a newer version of git.\n"
520 return None
521
522# Check the required perl modules which may not be installed by default
523def check_perl_modules(sanity_data):
524 import subprocess
525 ret = ""
526 modules = ( "Text::ParseWords", "Thread::Queue", "Data::Dumper" )
527 errresult = ''
528 for m in modules:
529 try:
530 subprocess.check_output(["perl", "-e", "use %s" % m])
531 except subprocess.CalledProcessError as e:
532 errresult += bytes.decode(e.output)
533 ret += "%s " % m
534 if ret:
535 return "Required perl module(s) not found: %s\n\n%s\n" % (ret, errresult)
536 return None
537
538def sanity_check_conffiles(d):
539 funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS').split()
540 for func in funcs:
541 conffile, current_version, required_version, func = func.split(":")
542 if check_conf_exists(conffile, d) and d.getVar(current_version) is not None and \
543 d.getVar(current_version) != d.getVar(required_version):
544 try:
545 bb.build.exec_func(func, d)
546 except NotImplementedError as e:
547 bb.fatal(str(e))
548 d.setVar("BB_INVALIDCONF", True)
549
550def drop_v14_cross_builds(d):
551 import glob
552 indexes = glob.glob(d.expand("${SSTATE_MANIFESTS}/index-${BUILD_ARCH}_*"))
553 for i in indexes:
554 with open(i, "r") as f:
555 lines = f.readlines()
556 for l in reversed(lines):
557 try:
558 (stamp, manifest, workdir) = l.split()
559 except ValueError:
560 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
561 for m in glob.glob(manifest + ".*"):
562 if m.endswith(".postrm"):
563 continue
564 sstate_clean_manifest(m, d)
565 bb.utils.remove(stamp + "*")
566 bb.utils.remove(workdir, recurse = True)
567
568def sanity_handle_abichanges(status, d):
569 #
570 # Check the 'ABI' of TMPDIR
571 #
572 import subprocess
573
574 current_abi = d.getVar('OELAYOUT_ABI')
575 abifile = d.getVar('SANITY_ABIFILE')
576 if os.path.exists(abifile):
577 with open(abifile, "r") as f:
578 abi = f.read().strip()
579 if not abi.isdigit():
580 with open(abifile, "w") as f:
581 f.write(current_abi)
582 elif int(abi) <= 11 and current_abi == "12":
583 status.addresult("The layout of TMPDIR changed for Recipe Specific Sysroots.\nConversion doesn't make sense and this change will rebuild everything so please delete TMPDIR (%s).\n" % d.getVar("TMPDIR"))
584 elif int(abi) <= 13 and current_abi == "14":
585 status.addresult("TMPDIR changed to include path filtering from the pseudo database.\nIt is recommended to use a clean TMPDIR with the new pseudo path filtering so TMPDIR (%s) would need to be removed to continue.\n" % d.getVar("TMPDIR"))
586 elif int(abi) == 14 and current_abi == "15":
587 drop_v14_cross_builds(d)
588 with open(abifile, "w") as f:
589 f.write(current_abi)
590 elif (abi != current_abi):
591 # Code to convert from one ABI to another could go here if possible.
592 status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
593 else:
594 with open(abifile, "w") as f:
595 f.write(current_abi)
596
597def check_sanity_sstate_dir_change(sstate_dir, data):
598 # Sanity checks to be done when the value of SSTATE_DIR changes
599
600 # Check that SSTATE_DIR isn't on a filesystem with limited filename length (eg. eCryptFS)
601 testmsg = ""
602 if sstate_dir != "":
603 testmsg = check_create_long_filename(sstate_dir, "SSTATE_DIR")
604 # If we don't have permissions to SSTATE_DIR, suggest the user set it as an SSTATE_MIRRORS
605 try:
606 err = testmsg.split(': ')[1].strip()
607 if err == "Permission denied.":
608 testmsg = testmsg + "You could try using %s in SSTATE_MIRRORS rather than as an SSTATE_CACHE.\n" % (sstate_dir)
609 except IndexError:
610 pass
611 return testmsg
612
613def check_sanity_version_change(status, d):
614 # Sanity checks to be done when SANITY_VERSION or NATIVELSBSTRING changes
615 # In other words, these tests run once in a given build directory and then
616 # never again until the sanity version or host distrubution id/version changes.
617
618 # Check the python install is complete. Examples that are often removed in
619 # minimal installations: glib-2.0-natives requries # xml.parsers.expat and icu
620 # requires distutils.sysconfig.
621 try:
622 import xml.parsers.expat
623 import distutils.sysconfig
624 except ImportError as e:
625 status.addresult('Your Python 3 is not a full install. Please install the module %s (see the Getting Started guide for further information).\n' % e.name)
626
627 status.addresult(check_gcc_version(d))
628 status.addresult(check_make_version(d))
629 status.addresult(check_patch_version(d))
630 status.addresult(check_tar_version(d))
631 status.addresult(check_git_version(d))
632 status.addresult(check_perl_modules(d))
633 status.addresult(check_wsl(d))
634
635 missing = ""
636
637 if not check_app_exists("${MAKE}", d):
638 missing = missing + "GNU make,"
639
640 if not check_app_exists('${BUILD_CC}', d):
641 missing = missing + "C Compiler (%s)," % d.getVar("BUILD_CC")
642
643 if not check_app_exists('${BUILD_CXX}', d):
644 missing = missing + "C++ Compiler (%s)," % d.getVar("BUILD_CXX")
645
646 required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES')
647
648 for util in required_utilities.split():
649 if not check_app_exists(util, d):
650 missing = missing + "%s," % util
651
652 if missing:
653 missing = missing.rstrip(',')
654 status.addresult("Please install the following missing utilities: %s\n" % missing)
655
656 assume_provided = d.getVar('ASSUME_PROVIDED').split()
657 # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
658 if "diffstat-native" not in assume_provided:
659 status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
660
661 # Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
662 import stat
663 tmpdir = d.getVar('TMPDIR')
664 status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
665 tmpdirmode = os.stat(tmpdir).st_mode
666 if (tmpdirmode & stat.S_ISGID):
667 status.addresult("TMPDIR is setgid, please don't build in a setgid directory")
668 if (tmpdirmode & stat.S_ISUID):
669 status.addresult("TMPDIR is setuid, please don't build in a setuid directory")
670
671 # Check that a user isn't building in a path in PSEUDO_IGNORE_PATHS
672 pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
673 workdir = d.getVar('WORKDIR', expand=True)
674 for i in pseudoignorepaths:
675 if i and workdir.startswith(i):
676 status.addresult("You are building in a path included in PSEUDO_IGNORE_PATHS " + str(i) + " please locate the build outside this path.\n")
677
678 # Check if PSEUDO_IGNORE_PATHS and and paths under pseudo control overlap
679 pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
680 pseudo_control_dir = "${D},${PKGD},${PKGDEST},${IMAGEROOTFS},${SDK_OUTPUT}"
681 pseudocontroldir = d.expand(pseudo_control_dir).split(",")
682 for i in pseudoignorepaths:
683 for j in pseudocontroldir:
684 if i and j:
685 if j.startswith(i):
686 status.addresult("A path included in PSEUDO_IGNORE_PATHS " + str(i) + " and the path " + str(j) + " overlap and this will break pseudo permission and ownership tracking. Please set the path " + str(j) + " to a different directory which does not overlap with pseudo controlled directories. \n")
687
688 # Some third-party software apparently relies on chmod etc. being suid root (!!)
689 import stat
690 suid_check_bins = "chown chmod mknod".split()
691 for bin_cmd in suid_check_bins:
692 bin_path = bb.utils.which(os.environ["PATH"], bin_cmd)
693 if bin_path:
694 bin_stat = os.stat(bin_path)
695 if bin_stat.st_uid == 0 and bin_stat.st_mode & stat.S_ISUID:
696 status.addresult('%s has the setuid bit set. This interferes with pseudo and may cause other issues that break the build process.\n' % bin_path)
697
698 # Check that we can fetch from various network transports
699 netcheck = check_connectivity(d)
700 status.addresult(netcheck)
701 if netcheck:
702 status.network_error = True
703
704 nolibs = d.getVar('NO32LIBS')
705 if not nolibs:
706 lib32path = '/lib'
707 if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ):
708 lib32path = '/lib32'
709
710 if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'):
711 status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n")
712
713 bbpaths = d.getVar('BBPATH').split(":")
714 if ("." in bbpaths or "./" in bbpaths or "" in bbpaths):
715 status.addresult("BBPATH references the current directory, either through " \
716 "an empty entry, a './' or a '.'.\n\t This is unsafe and means your "\
717 "layer configuration is adding empty elements to BBPATH.\n\t "\
718 "Please check your layer.conf files and other BBPATH " \
719 "settings to remove the current working directory " \
720 "references.\n" \
721 "Parsed BBPATH is" + str(bbpaths));
722
723 oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF')
724 if not oes_bb_conf:
725 status.addresult('You are not using the OpenEmbedded version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n')
726
727 # The length of TMPDIR can't be longer than 410
728 status.addresult(check_path_length(tmpdir, "TMPDIR", 410))
729
730 # Check that TMPDIR isn't located on nfs
731 status.addresult(check_not_nfs(tmpdir, "TMPDIR"))
732
733 # Check for case-insensitive file systems (such as Linux in Docker on
734 # macOS with default HFS+ file system)
735 status.addresult(check_case_sensitive(tmpdir, "TMPDIR"))
736
737def sanity_check_locale(d):
738 """
739 Currently bitbake switches locale to en_US.UTF-8 so check that this locale actually exists.
740 """
741 import locale
742 try:
743 locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
744 except locale.Error:
745 raise_sanity_error("Your system needs to support the en_US.UTF-8 locale.", d)
746
747def check_sanity_everybuild(status, d):
748 import os, stat
749 # Sanity tests which test the users environment so need to run at each build (or are so cheap
750 # it makes sense to always run them.
751
752 if 0 == os.getuid():
753 raise_sanity_error("Do not use Bitbake as root.", d)
754
755 # Check the Python version, we now have a minimum of Python 3.6
756 import sys
757 if sys.hexversion < 0x030600F0:
758 status.addresult('The system requires at least Python 3.6 to run. Please update your Python interpreter.\n')
759
760 # Check the bitbake version meets minimum requirements
761 minversion = d.getVar('BB_MIN_VERSION')
762 if bb.utils.vercmp_string_op(bb.__version__, minversion, "<"):
763 status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
764
765 sanity_check_locale(d)
766
767 paths = d.getVar('PATH').split(":")
768 if "." in paths or "./" in paths or "" in paths:
769 status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
770
771 #Check if bitbake is present in PATH environment variable
772 bb_check = bb.utils.which(d.getVar('PATH'), 'bitbake')
773 if not bb_check:
774 bb.warn("bitbake binary is not found in PATH, did you source the script?")
775
776 # Check whether 'inherit' directive is found (used for a class to inherit)
777 # in conf file it's supposed to be uppercase INHERIT
778 inherit = d.getVar('inherit')
779 if inherit:
780 status.addresult("Please don't use inherit directive in your local.conf. The directive is supposed to be used in classes and recipes only to inherit of bbclasses. Here INHERIT should be used.\n")
781
782 # Check that the DISTRO is valid, if set
783 # need to take into account DISTRO renaming DISTRO
784 distro = d.getVar('DISTRO')
785 if distro and distro != "nodistro":
786 if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ):
787 status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO"))
788
789 # Check that these variables don't use tilde-expansion as we don't do that
790 for v in ("TMPDIR", "DL_DIR", "SSTATE_DIR"):
791 if d.getVar(v).startswith("~"):
792 status.addresult("%s uses ~ but Bitbake will not expand this, use an absolute path or variables." % v)
793
794 # Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
795 # set, since so much relies on it being set.
796 dldir = d.getVar('DL_DIR')
797 if not dldir:
798 status.addresult("DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n")
799 if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
800 status.addresult("DL_DIR: %s exists but you do not appear to have write access to it. \n" % dldir)
801 check_symlink(dldir, d)
802
803 # Check that the MACHINE is valid, if it is set
804 machinevalid = True
805 if d.getVar('MACHINE'):
806 if not check_conf_exists("conf/machine/${MACHINE}.conf", d):
807 status.addresult('MACHINE=%s is invalid. Please set a valid MACHINE in your local.conf, environment or other configuration file.\n' % (d.getVar('MACHINE')))
808 machinevalid = False
809 else:
810 status.addresult(check_sanity_validmachine(d))
811 else:
812 status.addresult('Please set a MACHINE in your local.conf or environment\n')
813 machinevalid = False
814 if machinevalid:
815 status.addresult(check_toolchain(d))
816
817 # Check that the SDKMACHINE is valid, if it is set
818 if d.getVar('SDKMACHINE'):
819 if not check_conf_exists("conf/machine-sdk/${SDKMACHINE}.conf", d):
820 status.addresult('Specified SDKMACHINE value is not valid\n')
821 elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
822 status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
823
824 # If SDK_VENDOR looks like "-my-sdk" then the triples are badly formed so fail early
825 sdkvendor = d.getVar("SDK_VENDOR")
826 if not (sdkvendor.startswith("-") and sdkvendor.count("-") == 1):
827 status.addresult("SDK_VENDOR should be of the form '-foosdk' with a single dash; found '%s'\n" % sdkvendor)
828
829 check_supported_distro(d)
830
831 omask = os.umask(0o022)
832 if omask & 0o755:
833 status.addresult("Please use a umask which allows a+rx and u+rwx\n")
834 os.umask(omask)
835
836 if d.getVar('TARGET_ARCH') == "arm":
837 # This path is no longer user-readable in modern (very recent) Linux
838 try:
839 if os.path.exists("/proc/sys/vm/mmap_min_addr"):
840 f = open("/proc/sys/vm/mmap_min_addr", "r")
841 try:
842 if (int(f.read().strip()) > 65536):
843 status.addresult("/proc/sys/vm/mmap_min_addr is not <= 65536. This will cause problems with qemu so please fix the value (as root).\n\nTo fix this in later reboots, set vm.mmap_min_addr = 65536 in /etc/sysctl.conf.\n")
844 finally:
845 f.close()
846 except:
847 pass
848
849 for checkdir in ['COREBASE', 'TMPDIR']:
850 val = d.getVar(checkdir)
851 if val.find('..') != -1:
852 status.addresult("Error, you have '..' in your %s directory path. Please ensure the variable contains an absolute path as this can break some recipe builds in obtuse ways." % checkdir)
853 if val.find('+') != -1:
854 status.addresult("Error, you have an invalid character (+) in your %s directory path. Please move the installation to a directory which doesn't include any + characters." % checkdir)
855 if val.find('@') != -1:
856 status.addresult("Error, you have an invalid character (@) in your %s directory path. Please move the installation to a directory which doesn't include any @ characters." % checkdir)
857 if val.find(' ') != -1:
858 status.addresult("Error, you have a space in your %s directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this." % checkdir)
859 if val.find('%') != -1:
860 status.addresult("Error, you have an invalid character (%) in your %s directory path which causes problems with python string formatting. Please move the installation to a directory which doesn't include any % characters." % checkdir)
861
862 # Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS
863 import re
864 mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
865 protocols = ['http', 'ftp', 'file', 'https', \
866 'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
867 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3', 'az', 'ftps']
868 for mirror_var in mirror_vars:
869 mirrors = (d.getVar(mirror_var) or '').replace('\\n', ' ').split()
870
871 # Split into pairs
872 if len(mirrors) % 2 != 0:
873 bb.warn('Invalid mirror variable value for %s: %s, should contain paired members.' % (mirror_var, str(mirrors)))
874 continue
875 mirrors = list(zip(*[iter(mirrors)]*2))
876
877 for mirror_entry in mirrors:
878 pattern, mirror = mirror_entry
879
880 decoded = bb.fetch2.decodeurl(pattern)
881 try:
882 pattern_scheme = re.compile(decoded[0])
883 except re.error as exc:
884 bb.warn('Invalid scheme regex (%s) in %s; %s' % (pattern, mirror_var, mirror_entry))
885 continue
886
887 if not any(pattern_scheme.match(protocol) for protocol in protocols):
888 bb.warn('Invalid protocol (%s) in %s: %s' % (decoded[0], mirror_var, mirror_entry))
889 continue
890
891 if not any(mirror.startswith(protocol + '://') for protocol in protocols):
892 bb.warn('Invalid protocol in %s: %s' % (mirror_var, mirror_entry))
893 continue
894
895 if mirror.startswith('file://'):
896 import urllib
897 check_symlink(urllib.parse.urlparse(mirror).path, d)
898 # SSTATE_MIRROR ends with a /PATH string
899 if mirror.endswith('/PATH'):
900 # remove /PATH$ from SSTATE_MIRROR to get a working
901 # base directory path
902 mirror_base = urllib.parse.urlparse(mirror[:-1*len('/PATH')]).path
903 check_symlink(mirror_base, d)
904
905 # Check sstate mirrors aren't being used with a local hash server and no remote
906 hashserv = d.getVar("BB_HASHSERVE")
907 if d.getVar("SSTATE_MIRRORS") and hashserv and hashserv.startswith("unix://") and not d.getVar("BB_HASHSERVE_UPSTREAM"):
908 bb.warn("You are using a local hash equivalence server but have configured an sstate mirror. This will likely mean no sstate will match from the mirror. You may wish to disable the hash equivalence use (BB_HASHSERVE), or use a hash equivalence server alongside the sstate mirror.")
909
910 # Check that TMPDIR hasn't changed location since the last time we were run
911 tmpdir = d.getVar('TMPDIR')
912 checkfile = os.path.join(tmpdir, "saved_tmpdir")
913 if os.path.exists(checkfile):
914 with open(checkfile, "r") as f:
915 saved_tmpdir = f.read().strip()
916 if (saved_tmpdir != tmpdir):
917 status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or delete it and rebuild\n" % saved_tmpdir)
918 else:
919 bb.utils.mkdirhier(tmpdir)
920 # Remove setuid, setgid and sticky bits from TMPDIR
921 try:
922 os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISUID)
923 os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISGID)
924 os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISVTX)
925 except OSError as exc:
926 bb.warn("Unable to chmod TMPDIR: %s" % exc)
927 with open(checkfile, "w") as f:
928 f.write(tmpdir)
929
930 # If /bin/sh is a symlink, check that it points to dash or bash
931 if os.path.islink('/bin/sh'):
932 real_sh = os.path.realpath('/bin/sh')
933 # Due to update-alternatives, the shell name may take various
934 # forms, such as /bin/dash, bin/bash, /bin/bash.bash ...
935 if '/dash' not in real_sh and '/bash' not in real_sh:
936 status.addresult("Error, /bin/sh links to %s, must be dash or bash\n" % real_sh)
937
938def check_sanity(sanity_data):
939 class SanityStatus(object):
940 def __init__(self):
941 self.messages = ""
942 self.network_error = False
943
944 def addresult(self, message):
945 if message:
946 self.messages = self.messages + message
947
948 status = SanityStatus()
949
950 tmpdir = sanity_data.getVar('TMPDIR')
951 sstate_dir = sanity_data.getVar('SSTATE_DIR')
952
953 check_symlink(sstate_dir, sanity_data)
954
955 # Check saved sanity info
956 last_sanity_version = 0
957 last_tmpdir = ""
958 last_sstate_dir = ""
959 last_nativelsbstr = ""
960 sanityverfile = sanity_data.expand("${TOPDIR}/cache/sanity_info")
961 if os.path.exists(sanityverfile):
962 with open(sanityverfile, 'r') as f:
963 for line in f:
964 if line.startswith('SANITY_VERSION'):
965 last_sanity_version = int(line.split()[1])
966 if line.startswith('TMPDIR'):
967 last_tmpdir = line.split()[1]
968 if line.startswith('SSTATE_DIR'):
969 last_sstate_dir = line.split()[1]
970 if line.startswith('NATIVELSBSTRING'):
971 last_nativelsbstr = line.split()[1]
972
973 check_sanity_everybuild(status, sanity_data)
974
975 sanity_version = int(sanity_data.getVar('SANITY_VERSION') or 1)
976 network_error = False
977 # NATIVELSBSTRING var may have been overridden with "universal", so
978 # get actual host distribution id and version
979 nativelsbstr = lsb_distro_identifier(sanity_data)
980 if last_sanity_version < sanity_version or last_nativelsbstr != nativelsbstr:
981 check_sanity_version_change(status, sanity_data)
982 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
983 else:
984 if last_sstate_dir != sstate_dir:
985 status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
986
987 if os.path.exists(os.path.dirname(sanityverfile)) and not status.messages:
988 with open(sanityverfile, 'w') as f:
989 f.write("SANITY_VERSION %s\n" % sanity_version)
990 f.write("TMPDIR %s\n" % tmpdir)
991 f.write("SSTATE_DIR %s\n" % sstate_dir)
992 f.write("NATIVELSBSTRING %s\n" % nativelsbstr)
993
994 sanity_handle_abichanges(status, sanity_data)
995
996 if status.messages != "":
997 raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error)
998
999# Create a copy of the datastore and finalise it to ensure appends and
1000# overrides are set - the datastore has yet to be finalised at ConfigParsed
1001def copy_data(e):
1002 sanity_data = bb.data.createCopy(e.data)
1003 sanity_data.finalize()
1004 return sanity_data
1005
1006addhandler config_reparse_eventhandler
1007config_reparse_eventhandler[eventmask] = "bb.event.ConfigParsed"
1008python config_reparse_eventhandler() {
1009 sanity_check_conffiles(e.data)
1010}
1011
1012addhandler check_sanity_eventhandler
1013check_sanity_eventhandler[eventmask] = "bb.event.SanityCheck bb.event.NetworkTest"
1014python check_sanity_eventhandler() {
1015 if bb.event.getName(e) == "SanityCheck":
1016 sanity_data = copy_data(e)
1017 check_sanity(sanity_data)
1018 if e.generateevents:
1019 sanity_data.setVar("SANITY_USE_EVENTS", "1")
1020 bb.event.fire(bb.event.SanityCheckPassed(), e.data)
1021 elif bb.event.getName(e) == "NetworkTest":
1022 sanity_data = copy_data(e)
1023 if e.generateevents:
1024 sanity_data.setVar("SANITY_USE_EVENTS", "1")
1025 bb.event.fire(bb.event.NetworkTestFailed() if check_connectivity(sanity_data) else bb.event.NetworkTestPassed(), e.data)
1026
1027 return
1028}
diff --git a/meta/classes-global/sstate.bbclass b/meta/classes-global/sstate.bbclass
new file mode 100644
index 0000000000..cd77c58dbf
--- /dev/null
+++ b/meta/classes-global/sstate.bbclass
@@ -0,0 +1,1364 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SSTATE_VERSION = "10"
8
9SSTATE_ZSTD_CLEVEL ??= "8"
10
11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
13
14def generate_sstatefn(spec, hash, taskname, siginfo, d):
15 if taskname is None:
16 return ""
17 extension = ".tar.zst"
18 # 8 chars reserved for siginfo
19 limit = 254 - 8
20 if siginfo:
21 limit = 254
22 extension = ".tar.zst.siginfo"
23 if not hash:
24 hash = "INVALID"
25 fn = spec + hash + "_" + taskname + extension
26 # If the filename is too long, attempt to reduce it
27 if len(fn) > limit:
28 components = spec.split(":")
29 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
30 # 7 is for the separators
31 avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
32 components[2] = components[2][:avail]
33 components[3] = components[3][:avail]
34 components[4] = components[4][:avail]
35 spec = ":".join(components)
36 fn = spec + hash + "_" + taskname + extension
37 if len(fn) > limit:
38 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
39 return hash[:2] + "/" + hash[2:4] + "/" + fn
40
41SSTATE_PKGARCH = "${PACKAGE_ARCH}"
42SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
43SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
44SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
45SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
46SSTATE_EXTRAPATH = ""
47SSTATE_EXTRAPATHWILDCARD = ""
48SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
49
50# explicitly make PV to depend on evaluated value of PV variable
51PV[vardepvalue] = "${PV}"
52
53# We don't want the sstate to depend on things like the distro string
54# of the system, we let the sstate paths take care of this.
55SSTATE_EXTRAPATH[vardepvalue] = ""
56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
57
58# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
59SSTATE_ALLOW_OVERLAP_FILES = "${DEPLOY_DIR}/licenses/"
60# Avoid docbook/sgml catalog warnings for now
61SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
62# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
63SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
64SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
65# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
66SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
67# Archive the sources for many architectures in one deploy folder
68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
69# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
72SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
73SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
74
75SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
76SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
77SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
78SSTATE_HASHEQUIV_FILEMAP ?= " \
79 populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
80 populate_sysroot:*/postinst-useradd-*:${COREBASE} \
81 populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
82 populate_sysroot:*/crossscripts/*:${TMPDIR} \
83 populate_sysroot:*/crossscripts/*:${COREBASE} \
84 "
85
86BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
87
88SSTATE_ARCHS = " \
89 ${BUILD_ARCH} \
90 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
91 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
92 ${SDK_ARCH}_${SDK_OS} \
93 ${SDK_ARCH}_${PACKAGE_ARCH} \
94 allarch \
95 ${PACKAGE_ARCH} \
96 ${PACKAGE_EXTRA_ARCHS} \
97 ${MACHINE_ARCH}"
98SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
99
100SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
101
102SSTATECREATEFUNCS += "sstate_hardcode_path"
103SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
104SSTATEPOSTCREATEFUNCS = ""
105SSTATEPREINSTFUNCS = ""
106SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
107SSTATEPOSTINSTFUNCS = ""
108EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
109
110# Check whether sstate exists for tasks that support sstate and are in the
111# locked signatures file.
112SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
113
114# Check whether the task's computed hash matches the task's hash in the
115# locked signatures file.
116SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
117
118# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
119# not sign)
120SSTATE_SIG_KEY ?= ""
121SSTATE_SIG_PASSPHRASE ?= ""
122# Whether to verify the GnUPG signatures when extracting sstate archives
123SSTATE_VERIFY_SIG ?= "0"
124# List of signatures to consider valid.
125SSTATE_VALID_SIGS ??= ""
126SSTATE_VALID_SIGS[vardepvalue] = ""
127
128SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
129SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
130 the output hash for a task, which in turn is used to determine equivalency. \
131 "
132
133SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
134SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
135 hash equivalency server, such as PN, PV, taskname, etc. This information \
136 is very useful for developers looking at task data, but may leak sensitive \
137 data if the equivalence server is public. \
138 "
139
140python () {
141 if bb.data.inherits_class('native', d):
142 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
143 elif bb.data.inherits_class('crosssdk', d):
144 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
145 elif bb.data.inherits_class('cross', d):
146 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
147 elif bb.data.inherits_class('nativesdk', d):
148 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
149 elif bb.data.inherits_class('cross-canadian', d):
150 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
151 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
152 d.setVar('SSTATE_PKGARCH', "allarch")
153 else:
154 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
155
156 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
157 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
158 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
159 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
160
161 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
162 d.setVar('SSTATETASKS', " ".join(unique_tasks))
163 for task in unique_tasks:
164 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
165 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
166 d.setVarFlag(task, 'network', '1')
167 d.setVarFlag(task + "_setscene", 'network', '1')
168}
169
170def sstate_init(task, d):
171 ss = {}
172 ss['task'] = task
173 ss['dirs'] = []
174 ss['plaindirs'] = []
175 ss['lockfiles'] = []
176 ss['lockfiles-shared'] = []
177 return ss
178
179def sstate_state_fromvars(d, task = None):
180 if task is None:
181 task = d.getVar('BB_CURRENTTASK')
182 if not task:
183 bb.fatal("sstate code running without task context?!")
184 task = task.replace("_setscene", "")
185
186 if task.startswith("do_"):
187 task = task[3:]
188 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
189 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
190 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
191 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
192 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
193 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
194 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
195 if not task or len(inputs) != len(outputs):
196 bb.fatal("sstate variables not setup correctly?!")
197
198 if task == "populate_lic":
199 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
200 d.setVar("SSTATE_EXTRAPATH", "")
201 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
202
203 ss = sstate_init(task, d)
204 for i in range(len(inputs)):
205 sstate_add(ss, inputs[i], outputs[i], d)
206 ss['lockfiles'] = lockfiles
207 ss['lockfiles-shared'] = lockfilesshared
208 ss['plaindirs'] = plaindirs
209 ss['interceptfuncs'] = interceptfuncs
210 ss['fixmedir'] = fixmedir
211 return ss
212
213def sstate_add(ss, source, dest, d):
214 if not source.endswith("/"):
215 source = source + "/"
216 if not dest.endswith("/"):
217 dest = dest + "/"
218 source = os.path.normpath(source)
219 dest = os.path.normpath(dest)
220 srcbase = os.path.basename(source)
221 ss['dirs'].append([srcbase, source, dest])
222 return ss
223
224def sstate_install(ss, d):
225 import oe.path
226 import oe.sstatesig
227 import subprocess
228
229 sharedfiles = []
230 shareddirs = []
231 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
232
233 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
234
235 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
236
237 if os.access(manifest, os.R_OK):
238 bb.fatal("Package already staged (%s)?!" % manifest)
239
240 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
241
242 locks = []
243 for lock in ss['lockfiles-shared']:
244 locks.append(bb.utils.lockfile(lock, True))
245 for lock in ss['lockfiles']:
246 locks.append(bb.utils.lockfile(lock))
247
248 for state in ss['dirs']:
249 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
250 for walkroot, dirs, files in os.walk(state[1]):
251 for file in files:
252 srcpath = os.path.join(walkroot, file)
253 dstpath = srcpath.replace(state[1], state[2])
254 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
255 sharedfiles.append(dstpath)
256 for dir in dirs:
257 srcdir = os.path.join(walkroot, dir)
258 dstdir = srcdir.replace(state[1], state[2])
259 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
260 if os.path.islink(srcdir):
261 sharedfiles.append(dstdir)
262 continue
263 if not dstdir.endswith("/"):
264 dstdir = dstdir + "/"
265 shareddirs.append(dstdir)
266
267 # Check the file list for conflicts against files which already exist
268 overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
269 match = []
270 for f in sharedfiles:
271 if os.path.exists(f) and not os.path.islink(f):
272 f = os.path.normpath(f)
273 realmatch = True
274 for w in overlap_allowed:
275 w = os.path.normpath(w)
276 if f.startswith(w):
277 realmatch = False
278 break
279 if realmatch:
280 match.append(f)
281 sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
282 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
283 if search_output:
284 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
285 else:
286 match.append(" (not matched to any task)")
287 if match:
288 bb.error("The recipe %s is trying to install files into a shared " \
289 "area when those files already exist. Those files and their manifest " \
290 "location are:\n %s\nPlease verify which recipe should provide the " \
291 "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
292 "break things - if not now, possibly in the future (we've seen builds fail " \
293 "several months later). If the system knew how to recover from this " \
294 "automatically it would, however there are several different scenarios " \
295 "which can result in this and we don't know which one this is. It may be " \
296 "you have switched providers of something like virtual/kernel (e.g. from " \
297 "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
298 "clean task for both recipes and it will resolve this error. It may be " \
299 "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
300 "those recipes should again resolve this error, however switching " \
301 "DISTRO_FEATURES on an existing build directory is not supported - you " \
302 "should really clean out tmp and rebuild (reusing sstate should be safe). " \
303 "It could be the overlapping files detected are harmless in which case " \
304 "adding them to SSTATE_ALLOW_OVERLAP_FILES may be the correct solution. It could " \
305 "also be your build is including two different conflicting versions of " \
306 "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
307 "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
308 "sharing the error and filelist above." % \
309 (d.getVar('PN'), "\n ".join(match)))
310 bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
311
312 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
313 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
314 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
315
316 # Write out the manifest
317 f = open(manifest, "w")
318 for file in sharedfiles:
319 f.write(file + "\n")
320
321 # We want to ensure that directories appear at the end of the manifest
322 # so that when we test to see if they should be deleted any contents
323 # added by the task will have been removed first.
324 dirs = sorted(shareddirs, key=len)
325 # Must remove children first, which will have a longer path than the parent
326 for di in reversed(dirs):
327 f.write(di + "\n")
328 f.close()
329
330 # Append to the list of manifests for this PACKAGE_ARCH
331
332 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
333 l = bb.utils.lockfile(i + ".lock")
334 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
335 manifests = []
336 if os.path.exists(i):
337 with open(i, "r") as f:
338 manifests = f.readlines()
339 # We append new entries, we don't remove older entries which may have the same
340 # manifest name but different versions from stamp/workdir. See below.
341 if filedata not in manifests:
342 with open(i, "a+") as f:
343 f.write(filedata)
344 bb.utils.unlockfile(l)
345
346 # Run the actual file install
347 for state in ss['dirs']:
348 if os.path.exists(state[1]):
349 oe.path.copyhardlinktree(state[1], state[2])
350
351 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
352 # All hooks should run in the SSTATE_INSTDIR
353 bb.build.exec_func(postinst, d, (sstateinst,))
354
355 for lock in locks:
356 bb.utils.unlockfile(lock)
357
358sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
359sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
360
361def sstate_installpkg(ss, d):
362 from oe.gpg_sign import get_signer
363
364 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
365 d.setVar("SSTATE_CURRTASK", ss['task'])
366 sstatefetch = d.getVar('SSTATE_PKGNAME')
367 sstatepkg = d.getVar('SSTATE_PKG')
368
369 if not os.path.exists(sstatepkg):
370 pstaging_fetch(sstatefetch, d)
371
372 if not os.path.isfile(sstatepkg):
373 bb.note("Sstate package %s does not exist" % sstatepkg)
374 return False
375
376 sstate_clean(ss, d)
377
378 d.setVar('SSTATE_INSTDIR', sstateinst)
379
380 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
381 if not os.path.isfile(sstatepkg + '.sig'):
382 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
383 return False
384 signer = get_signer(d, 'local')
385 if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
386 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
387 return False
388
389 # Empty sstateinst directory, ensure its clean
390 if os.path.exists(sstateinst):
391 oe.path.remove(sstateinst)
392 bb.utils.mkdirhier(sstateinst)
393
394 sstateinst = d.getVar("SSTATE_INSTDIR")
395 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
396
397 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
398 # All hooks should run in the SSTATE_INSTDIR
399 bb.build.exec_func(f, d, (sstateinst,))
400
401 return sstate_installpkgdir(ss, d)
402
403def sstate_installpkgdir(ss, d):
404 import oe.path
405 import subprocess
406
407 sstateinst = d.getVar("SSTATE_INSTDIR")
408 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
409
410 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
411 # All hooks should run in the SSTATE_INSTDIR
412 bb.build.exec_func(f, d, (sstateinst,))
413
414 def prepdir(dir):
415 # remove dir if it exists, ensure any parent directories do exist
416 if os.path.exists(dir):
417 oe.path.remove(dir)
418 bb.utils.mkdirhier(dir)
419 oe.path.remove(dir)
420
421 for state in ss['dirs']:
422 prepdir(state[1])
423 bb.utils.rename(sstateinst + state[0], state[1])
424 sstate_install(ss, d)
425
426 for plain in ss['plaindirs']:
427 workdir = d.getVar('WORKDIR')
428 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
429 src = sstateinst + "/" + plain.replace(workdir, '')
430 if sharedworkdir in plain:
431 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
432 dest = plain
433 bb.utils.mkdirhier(src)
434 prepdir(dest)
435 bb.utils.rename(src, dest)
436
437 return True
438
439python sstate_hardcode_path_unpack () {
440 # Fixup hardcoded paths
441 #
442 # Note: The logic below must match the reverse logic in
443 # sstate_hardcode_path(d)
444 import subprocess
445
446 sstateinst = d.getVar('SSTATE_INSTDIR')
447 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
448 fixmefn = sstateinst + "fixmepath"
449 if os.path.isfile(fixmefn):
450 staging_target = d.getVar('RECIPE_SYSROOT')
451 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
452
453 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
454 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
455 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
456 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
457 else:
458 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
459
460 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
461 for fixmevar in extra_staging_fixmes.split():
462 fixme_path = d.getVar(fixmevar)
463 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
464
465 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
466 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
467
468 # Defer do_populate_sysroot relocation command
469 if sstatefixmedir:
470 bb.utils.mkdirhier(sstatefixmedir)
471 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
472 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
473 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
474 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
475 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
476 f.write(sstate_hardcode_cmd)
477 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
478 return
479
480 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
481 subprocess.check_call(sstate_hardcode_cmd, shell=True)
482
483 # Need to remove this or we'd copy it into the target directory and may
484 # conflict with another writer
485 os.remove(fixmefn)
486}
487
488def sstate_clean_cachefile(ss, d):
489 import oe.path
490
491 if d.getVarFlag('do_%s' % ss['task'], 'task'):
492 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
493 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
494 bb.note("Removing %s" % sstatepkgfile)
495 oe.path.remove(sstatepkgfile)
496
497def sstate_clean_cachefiles(d):
498 for task in (d.getVar('SSTATETASKS') or "").split():
499 ld = d.createCopy()
500 ss = sstate_state_fromvars(ld, task)
501 sstate_clean_cachefile(ss, ld)
502
503def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
504 import oe.path
505
506 mfile = open(manifest)
507 entries = mfile.readlines()
508 mfile.close()
509
510 for entry in entries:
511 entry = entry.strip()
512 if prefix and not entry.startswith("/"):
513 entry = prefix + "/" + entry
514 bb.debug(2, "Removing manifest: %s" % entry)
515 # We can race against another package populating directories as we're removing them
516 # so we ignore errors here.
517 try:
518 if entry.endswith("/"):
519 if os.path.islink(entry[:-1]):
520 os.remove(entry[:-1])
521 elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
522 # Removing directories whilst builds are in progress exposes a race. Only
523 # do it in contexts where it is safe to do so.
524 os.rmdir(entry[:-1])
525 else:
526 os.remove(entry)
527 except OSError:
528 pass
529
530 postrm = manifest + ".postrm"
531 if os.path.exists(manifest + ".postrm"):
532 import subprocess
533 os.chmod(postrm, 0o755)
534 subprocess.check_call(postrm, shell=True)
535 oe.path.remove(postrm)
536
537 oe.path.remove(manifest)
538
539def sstate_clean(ss, d):
540 import oe.path
541 import glob
542
543 d2 = d.createCopy()
544 stamp_clean = d.getVar("STAMPCLEAN")
545 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
546 if extrainf:
547 d2.setVar("SSTATE_MANMACH", extrainf)
548 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
549 else:
550 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
551
552 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
553
554 if os.path.exists(manifest):
555 locks = []
556 for lock in ss['lockfiles-shared']:
557 locks.append(bb.utils.lockfile(lock))
558 for lock in ss['lockfiles']:
559 locks.append(bb.utils.lockfile(lock))
560
561 sstate_clean_manifest(manifest, d, canrace=True)
562
563 for lock in locks:
564 bb.utils.unlockfile(lock)
565
566 # Remove the current and previous stamps, but keep the sigdata.
567 #
568 # The glob() matches do_task* which may match multiple tasks, for
569 # example: do_package and do_package_write_ipk, so we need to
570 # exactly match *.do_task.* and *.do_task_setscene.*
571 rm_stamp = '.do_%s.' % ss['task']
572 rm_setscene = '.do_%s_setscene.' % ss['task']
573 # For BB_SIGNATURE_HANDLER = "noop"
574 rm_nohash = ".do_%s" % ss['task']
575 for stfile in glob.glob(wildcard_stfile):
576 # Keep the sigdata
577 if ".sigdata." in stfile or ".sigbasedata." in stfile:
578 continue
579 # Preserve taint files in the stamps directory
580 if stfile.endswith('.taint'):
581 continue
582 if rm_stamp in stfile or rm_setscene in stfile or \
583 stfile.endswith(rm_nohash):
584 oe.path.remove(stfile)
585
586sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
587
588CLEANFUNCS += "sstate_cleanall"
589
590python sstate_cleanall() {
591 bb.note("Removing shared state for package %s" % d.getVar('PN'))
592
593 manifest_dir = d.getVar('SSTATE_MANIFESTS')
594 if not os.path.exists(manifest_dir):
595 return
596
597 tasks = d.getVar('SSTATETASKS').split()
598 for name in tasks:
599 ld = d.createCopy()
600 shared_state = sstate_state_fromvars(ld, name)
601 sstate_clean(shared_state, ld)
602}
603
604python sstate_hardcode_path () {
605 import subprocess, platform
606
607 # Need to remove hardcoded paths and fix these when we install the
608 # staging packages.
609 #
610 # Note: the logic in this function needs to match the reverse logic
611 # in sstate_installpkg(ss, d)
612
613 staging_target = d.getVar('RECIPE_SYSROOT')
614 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
615 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
616
617 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
618 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
619 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
620 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
621 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
622 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
623 else:
624 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
625 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
626
627 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
628 for fixmevar in extra_staging_fixmes.split():
629 fixme_path = d.getVar(fixmevar)
630 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
631 sstate_grep_cmd += " -e '%s'" % (fixme_path)
632
633 fixmefn = sstate_builddir + "fixmepath"
634
635 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
636 sstate_filelist_cmd = "tee %s" % (fixmefn)
637
638 # fixmepath file needs relative paths, drop sstate_builddir prefix
639 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
640
641 xargs_no_empty_run_cmd = '--no-run-if-empty'
642 if platform.system() == 'Darwin':
643 xargs_no_empty_run_cmd = ''
644
645 # Limit the fixpaths and sed operations based on the initial grep search
646 # This has the side effect of making sure the vfs cache is hot
647 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
648
649 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
650 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
651
652 # If the fixmefn is empty, remove it..
653 if os.stat(fixmefn).st_size == 0:
654 os.remove(fixmefn)
655 else:
656 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
657 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
658}
659
660def sstate_package(ss, d):
661 import oe.path
662 import time
663
664 tmpdir = d.getVar('TMPDIR')
665
666 fixtime = False
667 if ss['task'] == "package":
668 fixtime = True
669
670 def fixtimestamp(root, path):
671 f = os.path.join(root, path)
672 if os.lstat(f).st_mtime > sde:
673 os.utime(f, (sde, sde), follow_symlinks=False)
674
675 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
676 sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
677 d.setVar("SSTATE_CURRTASK", ss['task'])
678 bb.utils.remove(sstatebuild, recurse=True)
679 bb.utils.mkdirhier(sstatebuild)
680 for state in ss['dirs']:
681 if not os.path.exists(state[1]):
682 continue
683 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
684 # Find and error for absolute symlinks. We could attempt to relocate but its not
685 # clear where the symlink is relative to in this context. We could add that markup
686 # to sstate tasks but there aren't many of these so better just avoid them entirely.
687 for walkroot, dirs, files in os.walk(state[1]):
688 for file in files + dirs:
689 if fixtime:
690 fixtimestamp(walkroot, file)
691 srcpath = os.path.join(walkroot, file)
692 if not os.path.islink(srcpath):
693 continue
694 link = os.readlink(srcpath)
695 if not os.path.isabs(link):
696 continue
697 if not link.startswith(tmpdir):
698 continue
699 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
700 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
701 bb.utils.rename(state[1], sstatebuild + state[0])
702
703 workdir = d.getVar('WORKDIR')
704 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
705 for plain in ss['plaindirs']:
706 pdir = plain.replace(workdir, sstatebuild)
707 if sharedworkdir in plain:
708 pdir = plain.replace(sharedworkdir, sstatebuild)
709 bb.utils.mkdirhier(plain)
710 bb.utils.mkdirhier(pdir)
711 bb.utils.rename(plain, pdir)
712 if fixtime:
713 fixtimestamp(pdir, "")
714 for walkroot, dirs, files in os.walk(pdir):
715 for file in files + dirs:
716 fixtimestamp(walkroot, file)
717
718 d.setVar('SSTATE_BUILDDIR', sstatebuild)
719 d.setVar('SSTATE_INSTDIR', sstatebuild)
720
721 if d.getVar('SSTATE_SKIP_CREATION') == '1':
722 return
723
724 sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
725 if d.getVar('SSTATE_SIG_KEY'):
726 sstate_create_package.append('sstate_sign_package')
727
728 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
729 sstate_create_package + \
730 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
731 # All hooks should run in SSTATE_BUILDDIR.
732 bb.build.exec_func(f, d, (sstatebuild,))
733
734 # SSTATE_PKG may have been changed by sstate_report_unihash
735 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
736 if not os.path.exists(siginfo):
737 bb.siggen.dump_this_task(siginfo, d)
738 else:
739 try:
740 os.utime(siginfo, None)
741 except PermissionError:
742 pass
743 except OSError as e:
744 # Handle read-only file systems gracefully
745 import errno
746 if e.errno != errno.EROFS:
747 raise e
748
749 return
750
751sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
752
753def pstaging_fetch(sstatefetch, d):
754 import bb.fetch2
755
756 # Only try and fetch if the user has configured a mirror
757 mirrors = d.getVar('SSTATE_MIRRORS')
758 if not mirrors:
759 return
760
761 # Copy the data object and override DL_DIR and SRC_URI
762 localdata = bb.data.createCopy(d)
763
764 dldir = localdata.expand("${SSTATE_DIR}")
765 bb.utils.mkdirhier(dldir)
766
767 localdata.delVar('MIRRORS')
768 localdata.setVar('FILESPATH', dldir)
769 localdata.setVar('DL_DIR', dldir)
770 localdata.setVar('PREMIRRORS', mirrors)
771 localdata.setVar('SRCPV', d.getVar('SRCPV'))
772
773 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
774 # we'll want to allow network access for the current set of fetches.
775 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
776 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
777 localdata.delVar('BB_NO_NETWORK')
778
779 # Try a fetch from the sstate mirror, if it fails just return and
780 # we will build the package
781 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
782 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
783 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
784 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
785
786 for srcuri in uris:
787 localdata.setVar('SRC_URI', srcuri)
788 try:
789 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
790 fetcher.checkstatus()
791 fetcher.download()
792
793 except bb.fetch2.BBFetchException:
794 pass
795
796pstaging_fetch[vardepsexclude] += "SRCPV"
797
798
799def sstate_setscene(d):
800 shared_state = sstate_state_fromvars(d)
801 accelerate = sstate_installpkg(shared_state, d)
802 if not accelerate:
803 msg = "No sstate archive obtainable, will run full task instead."
804 bb.warn(msg)
805 raise bb.BBHandledException(msg)
806
807python sstate_task_prefunc () {
808 shared_state = sstate_state_fromvars(d)
809 sstate_clean(shared_state, d)
810}
811sstate_task_prefunc[dirs] = "${WORKDIR}"
812
813python sstate_task_postfunc () {
814 shared_state = sstate_state_fromvars(d)
815
816 for intercept in shared_state['interceptfuncs']:
817 bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
818
819 omask = os.umask(0o002)
820 if omask != 0o002:
821 bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
822 sstate_package(shared_state, d)
823 os.umask(omask)
824
825 sstateinst = d.getVar("SSTATE_INSTDIR")
826 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
827
828 sstate_installpkgdir(shared_state, d)
829
830 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
831}
832sstate_task_postfunc[dirs] = "${WORKDIR}"
833
834
835#
836# Shell function to generate a sstate package from a directory
837# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
838#
839sstate_create_package () {
840 # Exit early if it already exists
841 if [ -e ${SSTATE_PKG} ]; then
842 touch ${SSTATE_PKG} 2>/dev/null || true
843 return
844 fi
845
846 mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
847 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
848
849 OPT="-cS"
850 ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
851 # Use pzstd if available
852 if [ -x "$(command -v pzstd)" ]; then
853 ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
854 fi
855
856 # Need to handle empty directories
857 if [ "$(ls -A)" ]; then
858 set +e
859 tar -I "$ZSTD" $OPT -f $TFILE *
860 ret=$?
861 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
862 exit 1
863 fi
864 set -e
865 else
866 tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
867 fi
868 chmod 0664 $TFILE
869 # Skip if it was already created by some other process
870 if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
871 # There is a symbolic link, but it links to nothing.
872 # Forcefully replace it with the new file.
873 ln -f $TFILE ${SSTATE_PKG} || true
874 elif [ ! -e ${SSTATE_PKG} ]; then
875 # Move into place using ln to attempt an atomic op.
876 # Abort if it already exists
877 ln $TFILE ${SSTATE_PKG} || true
878 else
879 touch ${SSTATE_PKG} 2>/dev/null || true
880 fi
881 rm $TFILE
882}
883
884python sstate_sign_package () {
885 from oe.gpg_sign import get_signer
886
887
888 signer = get_signer(d, 'local')
889 sstate_pkg = d.getVar('SSTATE_PKG')
890 if os.path.exists(sstate_pkg + '.sig'):
891 os.unlink(sstate_pkg + '.sig')
892 signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
893 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
894}
895
896python sstate_report_unihash() {
897 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
898
899 if report_unihash:
900 ss = sstate_state_fromvars(d)
901 report_unihash(os.getcwd(), ss['task'], d)
902}
903
904#
905# Shell function to decompress and prepare a package for installation
906# Will be run from within SSTATE_INSTDIR.
907#
908sstate_unpack_package () {
909 ZSTD="zstd -T${ZSTD_THREADS}"
910 # Use pzstd if available
911 if [ -x "$(command -v pzstd)" ]; then
912 ZSTD="pzstd -p ${ZSTD_THREADS}"
913 fi
914
915 tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
916 # update .siginfo atime on local/NFS mirror if it is a symbolic link
917 [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
918 # update each symbolic link instead of any referenced file
919 touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
920 [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
921 [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
922}
923
924BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
925
926def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
927 found = set()
928 missed = set()
929
930 def gethash(task):
931 return sq_data['unihash'][task]
932
933 def getpathcomponents(task, d):
934 # Magic data from BB_HASHFILENAME
935 splithashfn = sq_data['hashfn'][task].split(" ")
936 spec = splithashfn[1]
937 if splithashfn[0] == "True":
938 extrapath = d.getVar("NATIVELSBSTRING") + "/"
939 else:
940 extrapath = ""
941
942 tname = bb.runqueue.taskname_from_tid(task)[3:]
943
944 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
945 spec = splithashfn[2]
946 extrapath = ""
947
948 return spec, extrapath, tname
949
950 def getsstatefile(tid, siginfo, d):
951 spec, extrapath, tname = getpathcomponents(tid, d)
952 return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
953
954 for tid in sq_data['hash']:
955
956 sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
957
958 if os.path.exists(sstatefile):
959 found.add(tid)
960 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
961 else:
962 missed.add(tid)
963 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
964
965 foundLocal = len(found)
966 mirrors = d.getVar("SSTATE_MIRRORS")
967 if mirrors:
968 # Copy the data object and override DL_DIR and SRC_URI
969 localdata = bb.data.createCopy(d)
970
971 dldir = localdata.expand("${SSTATE_DIR}")
972 localdata.delVar('MIRRORS')
973 localdata.setVar('FILESPATH', dldir)
974 localdata.setVar('DL_DIR', dldir)
975 localdata.setVar('PREMIRRORS', mirrors)
976
977 bb.debug(2, "SState using premirror of: %s" % mirrors)
978
979 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
980 # we'll want to allow network access for the current set of fetches.
981 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
982 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
983 localdata.delVar('BB_NO_NETWORK')
984
985 from bb.fetch2 import FetchConnectionCache
986 def checkstatus_init():
987 while not connection_cache_pool.full():
988 connection_cache_pool.put(FetchConnectionCache())
989
990 def checkstatus_end():
991 while not connection_cache_pool.empty():
992 connection_cache = connection_cache_pool.get()
993 connection_cache.close_connections()
994
995 def checkstatus(arg):
996 (tid, sstatefile) = arg
997
998 connection_cache = connection_cache_pool.get()
999 localdata2 = bb.data.createCopy(localdata)
1000 srcuri = "file://" + sstatefile
1001 localdata2.setVar('SRC_URI', srcuri)
1002 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
1003
1004 import traceback
1005
1006 try:
1007 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
1008 connection_cache=connection_cache)
1009 fetcher.checkstatus()
1010 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
1011 found.add(tid)
1012 missed.remove(tid)
1013 except bb.fetch2.FetchError as e:
1014 bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
1015 except Exception as e:
1016 bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
1017
1018 connection_cache_pool.put(connection_cache)
1019
1020 if progress:
1021 bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
1022
1023 tasklist = []
1024 for tid in missed:
1025 sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1026 tasklist.append((tid, sstatefile))
1027
1028 if tasklist:
1029 nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1030
1031 progress = len(tasklist) >= 100
1032 if progress:
1033 msg = "Checking sstate mirror object availability"
1034 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1035
1036 # Have to setup the fetcher environment here rather than in each thread as it would race
1037 fetcherenv = bb.fetch2.get_fetcher_environment(d)
1038 with bb.utils.environment(**fetcherenv):
1039 bb.event.enable_threadlock()
1040 import concurrent.futures
1041 from queue import Queue
1042 connection_cache_pool = Queue(nproc)
1043 checkstatus_init()
1044 with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor:
1045 executor.map(checkstatus, tasklist.copy())
1046 checkstatus_end()
1047 bb.event.disable_threadlock()
1048
1049 if progress:
1050 bb.event.fire(bb.event.ProcessFinished(msg), d)
1051
1052 inheritlist = d.getVar("INHERIT")
1053 if "toaster" in inheritlist:
1054 evdata = {'missed': [], 'found': []};
1055 for tid in missed:
1056 sstatefile = d.expand(getsstatefile(tid, False, d))
1057 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1058 for tid in found:
1059 sstatefile = d.expand(getsstatefile(tid, False, d))
1060 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1061 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1062
1063 if summary:
1064 # Print some summary statistics about the current task completion and how much sstate
1065 # reuse there was. Avoid divide by zero errors.
1066 total = len(sq_data['hash'])
1067 complete = 0
1068 if currentcount:
1069 complete = (len(found) + currentcount) / (total + currentcount) * 100
1070 match = 0
1071 if total:
1072 match = len(found) / total * 100
1073 bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1074 (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1075
1076 if hasattr(bb.parse.siggen, "checkhashes"):
1077 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1078
1079 return found
1080setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
1081
1082BB_SETSCENE_DEPVALID = "setscene_depvalid"
1083
1084def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1085 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1086 # task is included in taskdependees too
1087 # Return - False - We need this dependency
1088 # - True - We can skip this dependency
1089 import re
1090
1091 def logit(msg, log):
1092 if log is not None:
1093 log.append(msg)
1094 else:
1095 bb.debug(2, msg)
1096
1097 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1098
1099 directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx"]
1100
1101 def isNativeCross(x):
1102 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1103
1104 # We only need to trigger deploy_source_date_epoch through direct dependencies
1105 if taskdependees[task][1] in directtasks:
1106 return True
1107
1108 # We only need to trigger packagedata through direct dependencies
1109 # but need to preserve packagedata on packagedata links
1110 if taskdependees[task][1] == "do_packagedata":
1111 for dep in taskdependees:
1112 if taskdependees[dep][1] == "do_packagedata":
1113 return False
1114 return True
1115
1116 for dep in taskdependees:
1117 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1118 if task == dep:
1119 continue
1120 if dep in notneeded:
1121 continue
1122 # do_package_write_* and do_package doesn't need do_package
1123 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1124 continue
1125 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1126 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1127 return False
1128 # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1129 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1130 continue
1131 # Native/Cross packages don't exist and are noexec anyway
1132 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1133 continue
1134
1135 # This is due to the [depends] in useradd.bbclass complicating matters
1136 # The logic *is* reversed here due to the way hard setscene dependencies are injected
1137 if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
1138 continue
1139
1140 # Consider sysroot depending on sysroot tasks
1141 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1142 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1143 # specific dependency itself, rather than relying on one of its dependees to pull
1144 # them in.
1145 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1146 not_needed = False
1147 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1148 if excludedeps is None:
1149 # Cache the regular expressions for speed
1150 excludedeps = []
1151 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1152 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1153 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1154 for excl in excludedeps:
1155 if excl[0].match(taskdependees[dep][0]):
1156 if excl[1].match(taskdependees[task][0]):
1157 not_needed = True
1158 break
1159 if not_needed:
1160 continue
1161 # For meta-extsdk-toolchain we want all sysroot dependencies
1162 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1163 return False
1164 # Native/Cross populate_sysroot need their dependencies
1165 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1166 return False
1167 # Target populate_sysroot depended on by cross tools need to be installed
1168 if isNativeCross(taskdependees[dep][0]):
1169 return False
1170 # Native/cross tools depended upon by target sysroot are not needed
1171 # Add an exception for shadow-native as required by useradd.bbclass
1172 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1173 continue
1174 # Target populate_sysroot need their dependencies
1175 return False
1176
1177 if taskdependees[dep][1] in directtasks:
1178 continue
1179
1180 # Safe fallthrough default
1181 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1182 return False
1183 return True
1184
1185addhandler sstate_eventhandler
1186sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1187python sstate_eventhandler() {
1188 d = e.data
1189 writtensstate = d.getVar('SSTATE_CURRTASK')
1190 if not writtensstate:
1191 taskname = d.getVar("BB_RUNTASK")[3:]
1192 spec = d.getVar('SSTATE_PKGSPEC')
1193 swspec = d.getVar('SSTATE_SWSPEC')
1194 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1195 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1196 d.setVar("SSTATE_EXTRAPATH", "")
1197 d.setVar("SSTATE_CURRTASK", taskname)
1198 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1199 if not os.path.exists(siginfo):
1200 bb.siggen.dump_this_task(siginfo, d)
1201 else:
1202 try:
1203 os.utime(siginfo, None)
1204 except PermissionError:
1205 pass
1206 except OSError as e:
1207 # Handle read-only file systems gracefully
1208 import errno
1209 if e.errno != errno.EROFS:
1210 raise e
1211
1212}
1213
1214SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1215
1216#
1217# Event handler which removes manifests and stamps file for recipes which are no
1218# longer 'reachable' in a build where they once were. 'Reachable' refers to
1219# whether a recipe is parsed so recipes in a layer which was removed would no
1220# longer be reachable. Switching between systemd and sysvinit where recipes
1221# became skipped would be another example.
1222#
1223# Also optionally removes the workdir of those tasks/recipes
1224#
1225addhandler sstate_eventhandler_reachablestamps
1226sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1227python sstate_eventhandler_reachablestamps() {
1228 import glob
1229 d = e.data
1230 stamps = e.stamps.values()
1231 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1232 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1233 preservestamps = []
1234 if os.path.exists(preservestampfile):
1235 with open(preservestampfile, 'r') as f:
1236 preservestamps = f.readlines()
1237 seen = []
1238
1239 # The machine index contains all the stamps this machine has ever seen in this build directory.
1240 # We should only remove things which this machine once accessed but no longer does.
1241 machineindex = set()
1242 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1243 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1244 if os.path.exists(mi):
1245 with open(mi, "r") as f:
1246 machineindex = set(line.strip() for line in f.readlines())
1247
1248 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1249 toremove = []
1250 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1251 if not os.path.exists(i):
1252 continue
1253 manseen = set()
1254 ignore = []
1255 with open(i, "r") as f:
1256 lines = f.readlines()
1257 for l in reversed(lines):
1258 try:
1259 (stamp, manifest, workdir) = l.split()
1260 # The index may have multiple entries for the same manifest as the code above only appends
1261 # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1262 # The last entry in the list is the valid one, any earlier entries with matching manifests
1263 # should be ignored.
1264 if manifest in manseen:
1265 ignore.append(l)
1266 continue
1267 manseen.add(manifest)
1268 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1269 toremove.append(l)
1270 if stamp not in seen:
1271 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1272 seen.append(stamp)
1273 except ValueError:
1274 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1275
1276 if toremove:
1277 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1278 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1279
1280 removed = 0
1281 for r in toremove:
1282 (stamp, manifest, workdir) = r.split()
1283 for m in glob.glob(manifest + ".*"):
1284 if m.endswith(".postrm"):
1285 continue
1286 sstate_clean_manifest(m, d)
1287 bb.utils.remove(stamp + "*")
1288 if removeworkdir:
1289 bb.utils.remove(workdir, recurse = True)
1290 lines.remove(r)
1291 removed = removed + 1
1292 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1293
1294 bb.event.fire(bb.event.ProcessFinished(msg), d)
1295
1296 with open(i, "w") as f:
1297 for l in lines:
1298 if l in ignore:
1299 continue
1300 f.write(l)
1301 machineindex |= set(stamps)
1302 with open(mi, "w") as f:
1303 for l in machineindex:
1304 f.write(l + "\n")
1305
1306 if preservestamps:
1307 os.remove(preservestampfile)
1308}
1309
1310
1311#
1312# Bitbake can generate an event showing which setscene tasks are 'stale',
1313# i.e. which ones will be rerun. These are ones where a stamp file is present but
1314# it is stable (e.g. taskhash doesn't match). With that list we can go through
1315# the manifests for matching tasks and "uninstall" those manifests now. We do
1316# this now rather than mid build since the distribution of files between sstate
1317# objects may have changed, new tasks may run first and if those new tasks overlap
1318# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1319# removing these files is fast.
1320#
1321addhandler sstate_eventhandler_stalesstate
1322sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1323python sstate_eventhandler_stalesstate() {
1324 d = e.data
1325 tasks = e.tasks
1326
1327 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1328
1329 for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1330 toremove = []
1331 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1332 if not os.path.exists(i):
1333 continue
1334 with open(i, "r") as f:
1335 lines = f.readlines()
1336 for l in lines:
1337 try:
1338 (stamp, manifest, workdir) = l.split()
1339 for tid in tasks:
1340 for s in tasks[tid]:
1341 if s.startswith(stamp):
1342 taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1343 manname = manifest + "." + taskname
1344 if os.path.exists(manname):
1345 bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1346 toremove.append((manname, tid, tasks[tid]))
1347 break
1348 except ValueError:
1349 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1350
1351 if toremove:
1352 msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1353 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1354
1355 removed = 0
1356 for (manname, tid, stamps) in toremove:
1357 sstate_clean_manifest(manname, d)
1358 for stamp in stamps:
1359 bb.utils.remove(stamp)
1360 removed = removed + 1
1361 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1362
1363 bb.event.fire(bb.event.ProcessFinished(msg), d)
1364}
diff --git a/meta/classes-global/staging.bbclass b/meta/classes-global/staging.bbclass
new file mode 100644
index 0000000000..5a1f43de78
--- /dev/null
+++ b/meta/classes-global/staging.bbclass
@@ -0,0 +1,690 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7# These directories will be staged in the sysroot
8SYSROOT_DIRS = " \
9 ${includedir} \
10 ${libdir} \
11 ${base_libdir} \
12 ${nonarch_base_libdir} \
13 ${datadir} \
14 /sysroot-only \
15"
16
17# These directories are also staged in the sysroot when they contain files that
18# are usable on the build system
19SYSROOT_DIRS_NATIVE = " \
20 ${bindir} \
21 ${sbindir} \
22 ${base_bindir} \
23 ${base_sbindir} \
24 ${libexecdir} \
25 ${sysconfdir} \
26 ${localstatedir} \
27"
28SYSROOT_DIRS:append:class-native = " ${SYSROOT_DIRS_NATIVE}"
29SYSROOT_DIRS:append:class-cross = " ${SYSROOT_DIRS_NATIVE}"
30SYSROOT_DIRS:append:class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
31
32# These directories will not be staged in the sysroot
33SYSROOT_DIRS_IGNORE = " \
34 ${mandir} \
35 ${docdir} \
36 ${infodir} \
37 ${datadir}/X11/locale \
38 ${datadir}/applications \
39 ${datadir}/bash-completion \
40 ${datadir}/fonts \
41 ${datadir}/gtk-doc/html \
42 ${datadir}/installed-tests \
43 ${datadir}/locale \
44 ${datadir}/pixmaps \
45 ${datadir}/terminfo \
46 ${libdir}/${BPN}/ptest \
47"
48
49sysroot_stage_dir() {
50 src="$1"
51 dest="$2"
52 # if the src doesn't exist don't do anything
53 if [ ! -d "$src" ]; then
54 return
55 fi
56
57 mkdir -p "$dest"
58 rdest=$(realpath --relative-to="$src" "$dest")
59 (
60 cd $src
61 find . -print0 | cpio --null -pdlu $rdest
62 )
63}
64
65sysroot_stage_dirs() {
66 from="$1"
67 to="$2"
68
69 for dir in ${SYSROOT_DIRS}; do
70 sysroot_stage_dir "$from$dir" "$to$dir"
71 done
72
73 # Remove directories we do not care about
74 for dir in ${SYSROOT_DIRS_IGNORE}; do
75 rm -rf "$to$dir"
76 done
77}
78
79sysroot_stage_all() {
80 sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
81}
82
83python sysroot_strip () {
84 inhibit_sysroot = d.getVar('INHIBIT_SYSROOT_STRIP')
85 if inhibit_sysroot and oe.types.boolean(inhibit_sysroot):
86 return
87
88 dstdir = d.getVar('SYSROOT_DESTDIR')
89 pn = d.getVar('PN')
90 libdir = d.getVar("libdir")
91 base_libdir = d.getVar("base_libdir")
92 qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split()
93 strip_cmd = d.getVar("STRIP")
94
95 oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d,
96 qa_already_stripped=qa_already_stripped)
97}
98
99do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
100
101addtask populate_sysroot after do_install
102
103SYSROOT_PREPROCESS_FUNCS ?= ""
104SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir"
105
106python do_populate_sysroot () {
107 # SYSROOT 'version' 2
108 bb.build.exec_func("sysroot_stage_all", d)
109 bb.build.exec_func("sysroot_strip", d)
110 for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split():
111 bb.build.exec_func(f, d)
112 pn = d.getVar("PN")
113 multiprov = d.getVar("BB_MULTI_PROVIDER_ALLOWED").split()
114 provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
115 bb.utils.mkdirhier(provdir)
116 for p in d.getVar("PROVIDES").split():
117 if p in multiprov:
118 continue
119 p = p.replace("/", "_")
120 with open(provdir + p, "w") as f:
121 f.write(pn)
122}
123
124do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
125do_populate_sysroot[vardepsexclude] += "BB_MULTI_PROVIDER_ALLOWED"
126
127POPULATESYSROOTDEPS = ""
128POPULATESYSROOTDEPS:class-target = "virtual/${MLPREFIX}${HOST_PREFIX}binutils:do_populate_sysroot"
129POPULATESYSROOTDEPS:class-nativesdk = "virtual/${HOST_PREFIX}binutils-crosssdk:do_populate_sysroot"
130do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}"
131
132SSTATETASKS += "do_populate_sysroot"
133do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
134do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
135do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
136do_populate_sysroot[sstate-fixmedir] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
137
138python do_populate_sysroot_setscene () {
139 sstate_setscene(d)
140}
141addtask do_populate_sysroot_setscene
142
143def staging_copyfile(c, target, dest, postinsts, seendirs):
144 import errno
145
146 destdir = os.path.dirname(dest)
147 if destdir not in seendirs:
148 bb.utils.mkdirhier(destdir)
149 seendirs.add(destdir)
150 if "/usr/bin/postinst-" in c:
151 postinsts.append(dest)
152 if os.path.islink(c):
153 linkto = os.readlink(c)
154 if os.path.lexists(dest):
155 if not os.path.islink(dest):
156 raise OSError(errno.EEXIST, "Link %s already exists as a file" % dest, dest)
157 if os.readlink(dest) == linkto:
158 return dest
159 raise OSError(errno.EEXIST, "Link %s already exists to a different location? (%s vs %s)" % (dest, os.readlink(dest), linkto), dest)
160 os.symlink(linkto, dest)
161 #bb.warn(c)
162 else:
163 try:
164 os.link(c, dest)
165 except OSError as err:
166 if err.errno == errno.EXDEV:
167 bb.utils.copyfile(c, dest)
168 else:
169 raise
170 return dest
171
172def staging_copydir(c, target, dest, seendirs):
173 if dest not in seendirs:
174 bb.utils.mkdirhier(dest)
175 seendirs.add(dest)
176
177def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
178 import subprocess
179
180 if not fixme:
181 return
182 cmd = "sed -e 's:^[^/]*/:%s/:g' %s | xargs sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (target, " ".join(fixme), recipesysroot, recipesysrootnative)
183 for fixmevar in ['PSEUDO_SYSROOT', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
184 fixme_path = d.getVar(fixmevar)
185 cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
186 bb.debug(2, cmd)
187 subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
188
189
190def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
191 import glob
192 import subprocess
193 import errno
194
195 fixme = []
196 postinsts = []
197 seendirs = set()
198 stagingdir = d.getVar("STAGING_DIR")
199 if native:
200 pkgarchs = ['${BUILD_ARCH}', '${BUILD_ARCH}_*']
201 targetdir = nativesysroot
202 else:
203 pkgarchs = ['${MACHINE_ARCH}']
204 pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
205 pkgarchs.append('allarch')
206 targetdir = targetsysroot
207
208 bb.utils.mkdirhier(targetdir)
209 for pkgarch in pkgarchs:
210 for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.populate_sysroot" % pkgarch)):
211 if manifest.endswith("-initial.populate_sysroot"):
212 # skip libgcc-initial due to file overlap
213 continue
214 if not native and (manifest.endswith("-native.populate_sysroot") or "nativesdk-" in manifest):
215 continue
216 if native and not (manifest.endswith("-native.populate_sysroot") or manifest.endswith("-cross.populate_sysroot") or "-cross-" in manifest):
217 continue
218 tmanifest = targetdir + "/" + os.path.basename(manifest)
219 if os.path.exists(tmanifest):
220 continue
221 try:
222 os.link(manifest, tmanifest)
223 except OSError as err:
224 if err.errno == errno.EXDEV:
225 bb.utils.copyfile(manifest, tmanifest)
226 else:
227 raise
228 with open(manifest, "r") as f:
229 for l in f:
230 l = l.strip()
231 if l.endswith("/fixmepath"):
232 fixme.append(l)
233 continue
234 if l.endswith("/fixmepath.cmd"):
235 continue
236 dest = l.replace(stagingdir, "")
237 dest = targetdir + "/" + "/".join(dest.split("/")[3:])
238 if l.endswith("/"):
239 staging_copydir(l, targetdir, dest, seendirs)
240 continue
241 try:
242 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
243 except FileExistsError:
244 continue
245
246 staging_processfixme(fixme, targetdir, targetsysroot, nativesysroot, d)
247 for p in postinsts:
248 subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)
249
250#
251# Manifests here are complicated. The main sysroot area has the unpacked sstate
252# which us unrelocated and tracked by the main sstate manifests. Each recipe
253# specific sysroot has manifests for each dependency that is installed there.
254# The task hash is used to tell whether the data needs to be reinstalled. We
255# use a symlink to point to the currently installed hash. There is also a
256# "complete" stamp file which is used to mark if installation completed. If
257# something fails (e.g. a postinst), this won't get written and we would
258# remove and reinstall the dependency. This also means partially installed
259# dependencies should get cleaned up correctly.
260#
261
262python extend_recipe_sysroot() {
263 import copy
264 import subprocess
265 import errno
266 import collections
267 import glob
268
269 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
270 mytaskname = d.getVar("BB_RUNTASK")
271 if mytaskname.endswith("_setscene"):
272 mytaskname = mytaskname.replace("_setscene", "")
273 workdir = d.getVar("WORKDIR")
274 #bb.warn(str(taskdepdata))
275 pn = d.getVar("PN")
276 stagingdir = d.getVar("STAGING_DIR")
277 sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
278 recipesysroot = d.getVar("RECIPE_SYSROOT")
279 recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
280
281 # Detect bitbake -b usage
282 nodeps = d.getVar("BB_LIMITEDDEPS") or False
283 if nodeps:
284 lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
285 staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, True, d)
286 staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, False, d)
287 bb.utils.unlockfile(lock)
288 return
289
290 start = None
291 configuredeps = []
292 owntaskdeps = []
293 for dep in taskdepdata:
294 data = taskdepdata[dep]
295 if data[1] == mytaskname and data[0] == pn:
296 start = dep
297 elif data[0] == pn:
298 owntaskdeps.append(data[1])
299 if start is None:
300 bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
301
302 # We need to figure out which sysroot files we need to expose to this task.
303 # This needs to match what would get restored from sstate, which is controlled
304 # ultimately by calls from bitbake to setscene_depvalid().
305 # That function expects a setscene dependency tree. We build a dependency tree
306 # condensed to inter-sstate task dependencies, similar to that used by setscene
307 # tasks. We can then call into setscene_depvalid() and decide
308 # which dependencies we can "see" and should expose in the recipe specific sysroot.
309 setscenedeps = copy.deepcopy(taskdepdata)
310
311 start = set([start])
312
313 sstatetasks = d.getVar("SSTATETASKS").split()
314 # Add recipe specific tasks referenced by setscene_depvalid()
315 sstatetasks.append("do_stash_locale")
316 sstatetasks.append("do_deploy")
317
318 def print_dep_tree(deptree):
319 data = ""
320 for dep in deptree:
321 deps = " " + "\n ".join(deptree[dep][3]) + "\n"
322 data = data + "%s:\n %s\n %s\n%s %s\n %s\n" % (deptree[dep][0], deptree[dep][1], deptree[dep][2], deps, deptree[dep][4], deptree[dep][5])
323 return data
324
325 #bb.note("Full dep tree is:\n%s" % print_dep_tree(taskdepdata))
326
327 #bb.note(" start2 is %s" % str(start))
328
329 # If start is an sstate task (like do_package) we need to add in its direct dependencies
330 # else the code below won't recurse into them.
331 for dep in set(start):
332 for dep2 in setscenedeps[dep][3]:
333 start.add(dep2)
334 start.remove(dep)
335
336 #bb.note(" start3 is %s" % str(start))
337
338 # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
339 for dep in taskdepdata:
340 data = setscenedeps[dep]
341 if data[1] not in sstatetasks:
342 for dep2 in setscenedeps:
343 data2 = setscenedeps[dep2]
344 if dep in data2[3]:
345 data2[3].update(setscenedeps[dep][3])
346 data2[3].remove(dep)
347 if dep in start:
348 start.update(setscenedeps[dep][3])
349 start.remove(dep)
350 del setscenedeps[dep]
351
352 # Remove circular references
353 for dep in setscenedeps:
354 if dep in setscenedeps[dep][3]:
355 setscenedeps[dep][3].remove(dep)
356
357 #bb.note("Computed dep tree is:\n%s" % print_dep_tree(setscenedeps))
358 #bb.note(" start is %s" % str(start))
359
360 # Direct dependencies should be present and can be depended upon
361 for dep in sorted(set(start)):
362 if setscenedeps[dep][1] == "do_populate_sysroot":
363 if dep not in configuredeps:
364 configuredeps.append(dep)
365 bb.note("Direct dependencies are %s" % str(configuredeps))
366 #bb.note(" or %s" % str(start))
367
368 msgbuf = []
369 # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
370 # for ones that would be restored from sstate.
371 done = list(start)
372 next = list(start)
373 while next:
374 new = []
375 for dep in next:
376 data = setscenedeps[dep]
377 for datadep in data[3]:
378 if datadep in done:
379 continue
380 taskdeps = {}
381 taskdeps[dep] = setscenedeps[dep][:2]
382 taskdeps[datadep] = setscenedeps[datadep][:2]
383 retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
384 if retval:
385 msgbuf.append("Skipping setscene dependency %s for installation into the sysroot" % datadep)
386 continue
387 done.append(datadep)
388 new.append(datadep)
389 if datadep not in configuredeps and setscenedeps[datadep][1] == "do_populate_sysroot":
390 configuredeps.append(datadep)
391 msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
392 else:
393 msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
394 next = new
395
396 # This logging is too verbose for day to day use sadly
397 #bb.debug(2, "\n".join(msgbuf))
398
399 depdir = recipesysrootnative + "/installeddeps"
400 bb.utils.mkdirhier(depdir)
401 bb.utils.mkdirhier(sharedmanifests)
402
403 lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
404
405 fixme = {}
406 seendirs = set()
407 postinsts = []
408 multilibs = {}
409 manifests = {}
410 # All files that we're going to be installing, to find conflicts.
411 fileset = {}
412
413 invalidate_tasks = set()
414 for f in os.listdir(depdir):
415 removed = []
416 if not f.endswith(".complete"):
417 continue
418 f = depdir + "/" + f
419 if os.path.islink(f) and not os.path.exists(f):
420 bb.note("%s no longer exists, removing from sysroot" % f)
421 lnk = os.readlink(f.replace(".complete", ""))
422 sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
423 os.unlink(f)
424 os.unlink(f.replace(".complete", ""))
425 removed.append(os.path.basename(f.replace(".complete", "")))
426
427 # If we've removed files from the sysroot above, the task that installed them may still
428 # have a stamp file present for the task. This is probably invalid right now but may become
429 # valid again if the user were to change configuration back for example. Since we've removed
430 # the files a task might need, remove the stamp file too to force it to rerun.
431 # YOCTO #14790
432 if removed:
433 for i in glob.glob(depdir + "/index.*"):
434 if i.endswith("." + mytaskname):
435 continue
436 with open(i, "r") as f:
437 for l in f:
438 if l.startswith("TaskDeps:"):
439 continue
440 l = l.strip()
441 if l in removed:
442 invalidate_tasks.add(i.rsplit(".", 1)[1])
443 break
444 for t in invalidate_tasks:
445 bb.note("Invalidating stamps for task %s" % t)
446 bb.build.clean_stamp(t, d)
447
448 installed = []
449 for dep in configuredeps:
450 c = setscenedeps[dep][0]
451 if mytaskname in ["do_sdk_depends", "do_populate_sdk_ext"] and c.endswith("-initial"):
452 bb.note("Skipping initial setscene dependency %s for installation into the sysroot" % c)
453 continue
454 installed.append(c)
455
456 # We want to remove anything which this task previously installed but is no longer a dependency
457 taskindex = depdir + "/" + "index." + mytaskname
458 if os.path.exists(taskindex):
459 potential = []
460 with open(taskindex, "r") as f:
461 for l in f:
462 l = l.strip()
463 if l not in installed:
464 fl = depdir + "/" + l
465 if not os.path.exists(fl):
466 # Was likely already uninstalled
467 continue
468 potential.append(l)
469 # We need to ensure no other task needs this dependency. We hold the sysroot
470 # lock so we ca search the indexes to check
471 if potential:
472 for i in glob.glob(depdir + "/index.*"):
473 if i.endswith("." + mytaskname):
474 continue
475 with open(i, "r") as f:
476 for l in f:
477 if l.startswith("TaskDeps:"):
478 prevtasks = l.split()[1:]
479 if mytaskname in prevtasks:
480 # We're a dependency of this task so we can clear items out the sysroot
481 break
482 l = l.strip()
483 if l in potential:
484 potential.remove(l)
485 for l in potential:
486 fl = depdir + "/" + l
487 bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
488 lnk = os.readlink(fl)
489 sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
490 os.unlink(fl)
491 os.unlink(fl + ".complete")
492
493 msg_exists = []
494 msg_adding = []
495
496 # Handle all removals first since files may move between recipes
497 for dep in configuredeps:
498 c = setscenedeps[dep][0]
499 if c not in installed:
500 continue
501 taskhash = setscenedeps[dep][5]
502 taskmanifest = depdir + "/" + c + "." + taskhash
503
504 if os.path.exists(depdir + "/" + c):
505 lnk = os.readlink(depdir + "/" + c)
506 if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
507 continue
508 else:
509 bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
510 sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
511 os.unlink(depdir + "/" + c)
512 if os.path.lexists(depdir + "/" + c + ".complete"):
513 os.unlink(depdir + "/" + c + ".complete")
514 elif os.path.lexists(depdir + "/" + c):
515 os.unlink(depdir + "/" + c)
516
517 binfiles = {}
518 # Now handle installs
519 for dep in configuredeps:
520 c = setscenedeps[dep][0]
521 if c not in installed:
522 continue
523 taskhash = setscenedeps[dep][5]
524 taskmanifest = depdir + "/" + c + "." + taskhash
525
526 if os.path.exists(depdir + "/" + c):
527 lnk = os.readlink(depdir + "/" + c)
528 if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
529 msg_exists.append(c)
530 continue
531
532 msg_adding.append(c)
533
534 os.symlink(c + "." + taskhash, depdir + "/" + c)
535
536 manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "populate_sysroot", d, multilibs)
537 if d2 is not d:
538 # If we don't do this, the recipe sysroot will be placed in the wrong WORKDIR for multilibs
539 # We need a consistent WORKDIR for the image
540 d2.setVar("WORKDIR", d.getVar("WORKDIR"))
541 destsysroot = d2.getVar("RECIPE_SYSROOT")
542 # We put allarch recipes into the default sysroot
543 if manifest and "allarch" in manifest:
544 destsysroot = d.getVar("RECIPE_SYSROOT")
545
546 native = False
547 if c.endswith("-native") or "-cross-" in c or "-crosssdk" in c:
548 native = True
549
550 if manifest:
551 newmanifest = collections.OrderedDict()
552 targetdir = destsysroot
553 if native:
554 targetdir = recipesysrootnative
555 if targetdir not in fixme:
556 fixme[targetdir] = []
557 fm = fixme[targetdir]
558
559 with open(manifest, "r") as f:
560 manifests[dep] = manifest
561 for l in f:
562 l = l.strip()
563 if l.endswith("/fixmepath"):
564 fm.append(l)
565 continue
566 if l.endswith("/fixmepath.cmd"):
567 continue
568 dest = l.replace(stagingdir, "")
569 dest = "/" + "/".join(dest.split("/")[3:])
570 newmanifest[l] = targetdir + dest
571
572 # Check if files have already been installed by another
573 # recipe and abort if they have, explaining what recipes are
574 # conflicting.
575 hashname = targetdir + dest
576 if not hashname.endswith("/"):
577 if hashname in fileset:
578 bb.fatal("The file %s is installed by both %s and %s, aborting" % (dest, c, fileset[hashname]))
579 else:
580 fileset[hashname] = c
581
582 # Having multiple identical manifests in each sysroot eats diskspace so
583 # create a shared pool of them and hardlink if we can.
584 # We create the manifest in advance so that if something fails during installation,
585 # or the build is interrupted, subsequent exeuction can cleanup.
586 sharedm = sharedmanifests + "/" + os.path.basename(taskmanifest)
587 if not os.path.exists(sharedm):
588 smlock = bb.utils.lockfile(sharedm + ".lock")
589 # Can race here. You'd think it just means we may not end up with all copies hardlinked to each other
590 # but python can lose file handles so we need to do this under a lock.
591 if not os.path.exists(sharedm):
592 with open(sharedm, 'w') as m:
593 for l in newmanifest:
594 dest = newmanifest[l]
595 m.write(dest.replace(workdir + "/", "") + "\n")
596 bb.utils.unlockfile(smlock)
597 try:
598 os.link(sharedm, taskmanifest)
599 except OSError as err:
600 if err.errno == errno.EXDEV:
601 bb.utils.copyfile(sharedm, taskmanifest)
602 else:
603 raise
604 # Finally actually install the files
605 for l in newmanifest:
606 dest = newmanifest[l]
607 if l.endswith("/"):
608 staging_copydir(l, targetdir, dest, seendirs)
609 continue
610 if "/bin/" in l or "/sbin/" in l:
611 # defer /*bin/* files until last in case they need libs
612 binfiles[l] = (targetdir, dest)
613 else:
614 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
615
616 # Handle deferred binfiles
617 for l in binfiles:
618 (targetdir, dest) = binfiles[l]
619 staging_copyfile(l, targetdir, dest, postinsts, seendirs)
620
621 bb.note("Installed into sysroot: %s" % str(msg_adding))
622 bb.note("Skipping as already exists in sysroot: %s" % str(msg_exists))
623
624 for f in fixme:
625 staging_processfixme(fixme[f], f, recipesysroot, recipesysrootnative, d)
626
627 for p in postinsts:
628 subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)
629
630 for dep in manifests:
631 c = setscenedeps[dep][0]
632 os.symlink(manifests[dep], depdir + "/" + c + ".complete")
633
634 with open(taskindex, "w") as f:
635 f.write("TaskDeps: " + " ".join(owntaskdeps) + "\n")
636 for l in sorted(installed):
637 f.write(l + "\n")
638
639 bb.utils.unlockfile(lock)
640}
641extend_recipe_sysroot[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
642
643do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
644python do_prepare_recipe_sysroot () {
645 bb.build.exec_func("extend_recipe_sysroot", d)
646}
647addtask do_prepare_recipe_sysroot before do_configure after do_fetch
648
649python staging_taskhandler() {
650 bbtasks = e.tasklist
651 for task in bbtasks:
652 deps = d.getVarFlag(task, "depends")
653 if task == "do_configure" or (deps and "populate_sysroot" in deps):
654 d.prependVarFlag(task, "prefuncs", "extend_recipe_sysroot ")
655}
656staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
657addhandler staging_taskhandler
658
659
660#
661# Target build output, stored in do_populate_sysroot or do_package can depend
662# not only upon direct dependencies but also indirect ones. A good example is
663# linux-libc-headers. The toolchain depends on this but most target recipes do
664# not. There are some headers which are not used by the toolchain build and do
665# not change the toolchain task output, hence the task hashes can change without
666# changing the sysroot output of that recipe yet they can influence others.
667#
668# A specific example is rtc.h which can change rtcwake.c in util-linux but is not
669# used in the glibc or gcc build. To account for this, we need to account for the
670# populate_sysroot hashes in the task output hashes.
671#
672python target_add_sysroot_deps () {
673 current_task = "do_" + d.getVar("BB_CURRENTTASK")
674 if current_task not in ["do_populate_sysroot", "do_package"]:
675 return
676
677 pn = d.getVar("PN")
678 if pn.endswith("-native"):
679 return
680
681 taskdepdata = d.getVar("BB_TASKDEPDATA", False)
682 deps = {}
683 for dep in taskdepdata.values():
684 if dep[1] == "do_populate_sysroot" and not dep[0].endswith(("-native", "-initial")) and "-cross-" not in dep[0] and dep[0] != pn:
685 deps[dep[0]] = dep[6]
686
687 d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
688}
689SSTATECREATEFUNCS += "target_add_sysroot_deps"
690
diff --git a/meta/classes-global/uninative.bbclass b/meta/classes-global/uninative.bbclass
new file mode 100644
index 0000000000..4b7fb36449
--- /dev/null
+++ b/meta/classes-global/uninative.bbclass
@@ -0,0 +1,177 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}${@bb.utils.contains('BUILD_ARCH', 'ppc64le', 'ld64.so.2', '', d)}"
8UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
9
10UNINATIVE_URL ?= "unset"
11UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc-${UNINATIVE_VERSION}.tar.xz"
12# Example checksums
13#UNINATIVE_CHECKSUM[aarch64] = "dead"
14#UNINATIVE_CHECKSUM[i686] = "dead"
15#UNINATIVE_CHECKSUM[x86_64] = "dead"
16UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
17
18# Enabling uninative will change the following variables so they need to go the parsing ignored variables list to prevent multiple recipe parsing
19BB_HASHCONFIG_IGNORE_VARS += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
20
21addhandler uninative_event_fetchloader
22uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
23
24addhandler uninative_event_enable
25uninative_event_enable[eventmask] = "bb.event.ConfigParsed"
26
27python uninative_event_fetchloader() {
28 """
29 This event fires on the parent and will try to fetch the tarball if the
30 loader isn't already present.
31 """
32
33 chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH"))
34 if not chksum:
35 bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH"))
36
37 loader = d.getVar("UNINATIVE_LOADER")
38 loaderchksum = loader + ".chksum"
39 if os.path.exists(loader) and os.path.exists(loaderchksum):
40 with open(loaderchksum, "r") as f:
41 readchksum = f.read().strip()
42 if readchksum == chksum:
43 return
44
45 import subprocess
46 try:
47 # Save and restore cwd as Fetch.download() does a chdir()
48 olddir = os.getcwd()
49
50 tarball = d.getVar("UNINATIVE_TARBALL")
51 tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
52 tarballpath = os.path.join(tarballdir, tarball)
53
54 if not os.path.exists(tarballpath + ".done"):
55 bb.utils.mkdirhier(tarballdir)
56 if d.getVar("UNINATIVE_URL") == "unset":
57 bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
58
59 localdata = bb.data.createCopy(d)
60 localdata.setVar('FILESPATH', "")
61 localdata.setVar('DL_DIR', tarballdir)
62 # Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
63 # and we can't easily put 'chksum' into the url path from a url parameter with
64 # the current fetcher url handling
65 premirrors = bb.fetch2.mirror_from_string(localdata.getVar("PREMIRRORS"))
66 for line in premirrors:
67 try:
68 (find, replace) = line
69 except ValueError:
70 continue
71 if find.startswith("http"):
72 localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} %s/uninative/%s/${UNINATIVE_TARBALL}" % (replace, chksum))
73
74 srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
75 bb.note("Fetching uninative binary shim %s (will check PREMIRRORS first)" % srcuri)
76
77 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
78 fetcher.download()
79 localpath = fetcher.localpath(srcuri)
80 if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath):
81 # Follow the symlink behavior from the bitbake fetch2.
82 # This will cover the case where an existing symlink is broken
83 # as well as if there are two processes trying to create it
84 # at the same time.
85 if os.path.islink(tarballpath):
86 # Broken symbolic link
87 os.unlink(tarballpath)
88
89 # Deal with two processes trying to make symlink at once
90 try:
91 os.symlink(localpath, tarballpath)
92 except FileExistsError:
93 pass
94
95 # ldd output is "ldd (Ubuntu GLIBC 2.23-0ubuntu10) 2.23", extract last option from first line
96 glibcver = subprocess.check_output(["ldd", "--version"]).decode('utf-8').split('\n')[0].split()[-1]
97 if bb.utils.vercmp_string(d.getVar("UNINATIVE_MAXGLIBCVERSION"), glibcver) < 0:
98 raise RuntimeError("Your host glibc version (%s) is newer than that in uninative (%s). Disabling uninative so that sstate is not corrupted." % (glibcver, d.getVar("UNINATIVE_MAXGLIBCVERSION")))
99
100 cmd = d.expand("\
101mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; \
102cd ${UNINATIVE_STAGING_DIR}-uninative; \
103tar -xJf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; \
104${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
105 ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux \
106 ${UNINATIVE_LOADER} \
107 ${UNINATIVE_LOADER} \
108 ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
109 ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so*" % chksum)
110 subprocess.check_output(cmd, shell=True)
111
112 with open(loaderchksum, "w") as f:
113 f.write(chksum)
114
115 enable_uninative(d)
116
117 except RuntimeError as e:
118 bb.warn(str(e))
119 except bb.fetch2.BBFetchException as exc:
120 bb.warn("Disabling uninative as unable to fetch uninative tarball: %s" % str(exc))
121 bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
122 except subprocess.CalledProcessError as exc:
123 bb.warn("Disabling uninative as unable to install uninative tarball: %s" % str(exc))
124 bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
125 finally:
126 os.chdir(olddir)
127}
128
129python uninative_event_enable() {
130 """
131 This event handler is called in the workers and is responsible for setting
132 up uninative if a loader is found.
133 """
134 enable_uninative(d)
135}
136
137def enable_uninative(d):
138 loader = d.getVar("UNINATIVE_LOADER")
139 if os.path.exists(loader):
140 bb.debug(2, "Enabling uninative")
141 d.setVar("NATIVELSBSTRING", "universal%s" % oe.utils.host_gcc_version(d))
142 d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp")
143 d.appendVarFlag("SSTATEPOSTUNPACKFUNCS", "vardepvalueexclude", "| uninative_changeinterp")
144 d.appendVar("BUILD_LDFLAGS", " -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER}")
145 d.appendVarFlag("BUILD_LDFLAGS", "vardepvalueexclude", "| -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER}")
146 d.appendVarFlag("BUILD_LDFLAGS", "vardepsexclude", "UNINATIVE_LOADER")
147 d.prependVar("PATH", "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
148
149python uninative_changeinterp () {
150 import subprocess
151 import stat
152 import oe.qa
153
154 if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)):
155 return
156
157 sstateinst = d.getVar('SSTATE_INSTDIR')
158 for walkroot, dirs, files in os.walk(sstateinst):
159 for file in files:
160 if file.endswith(".so") or ".so." in file:
161 continue
162 f = os.path.join(walkroot, file)
163 if os.path.islink(f):
164 continue
165 s = os.stat(f)
166 if not ((s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH)):
167 continue
168 elf = oe.qa.ELFFile(f)
169 try:
170 elf.open()
171 except oe.qa.NotELFFileError:
172 continue
173 if not elf.isDynamic():
174 continue
175
176 subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
177}
diff --git a/meta/classes-global/utility-tasks.bbclass b/meta/classes-global/utility-tasks.bbclass
new file mode 100644
index 0000000000..ae2da330b8
--- /dev/null
+++ b/meta/classes-global/utility-tasks.bbclass
@@ -0,0 +1,60 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7addtask listtasks
8do_listtasks[nostamp] = "1"
9python do_listtasks() {
10 taskdescs = {}
11 maxlen = 0
12 for e in d.keys():
13 if d.getVarFlag(e, 'task'):
14 maxlen = max(maxlen, len(e))
15 if e.endswith('_setscene'):
16 desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc') or '')
17 else:
18 desc = d.getVarFlag(e, 'doc') or ''
19 taskdescs[e] = desc
20
21 tasks = sorted(taskdescs.keys())
22 for taskname in tasks:
23 bb.plain("%s %s" % (taskname.ljust(maxlen), taskdescs[taskname]))
24}
25
26CLEANFUNCS ?= ""
27
28T:task-clean = "${LOG_DIR}/cleanlogs/${PN}"
29addtask clean
30do_clean[nostamp] = "1"
31python do_clean() {
32 """clear the build and temp directories"""
33 dir = d.expand("${WORKDIR}")
34 bb.note("Removing " + dir)
35 oe.path.remove(dir)
36
37 dir = "%s.*" % d.getVar('STAMP')
38 bb.note("Removing " + dir)
39 oe.path.remove(dir)
40
41 for f in (d.getVar('CLEANFUNCS') or '').split():
42 bb.build.exec_func(f, d)
43}
44
45addtask checkuri
46do_checkuri[nostamp] = "1"
47do_checkuri[network] = "1"
48python do_checkuri() {
49 src_uri = (d.getVar('SRC_URI') or "").split()
50 if len(src_uri) == 0:
51 return
52
53 try:
54 fetcher = bb.fetch2.Fetch(src_uri, d)
55 fetcher.checkstatus()
56 except bb.fetch2.BBFetchException as e:
57 bb.fatal(str(e))
58}
59
60
diff --git a/meta/classes-global/utils.bbclass b/meta/classes-global/utils.bbclass
new file mode 100644
index 0000000000..8d797ff126
--- /dev/null
+++ b/meta/classes-global/utils.bbclass
@@ -0,0 +1,369 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7oe_soinstall() {
8 # Purpose: Install shared library file and
9 # create the necessary links
10 # Example: oe_soinstall libfoo.so.1.2.3 ${D}${libdir}
11 libname=`basename $1`
12 case "$libname" in
13 *.so)
14 bbfatal "oe_soinstall: Shared library must haved versioned filename (e.g. libfoo.so.1.2.3)"
15 ;;
16 esac
17 install -m 755 $1 $2/$libname
18 sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
19 if [ -z $sonamelink ]; then
20 bbfatal "oe_soinstall: $libname is missing ELF tag 'SONAME'."
21 fi
22 solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
23 ln -sf $libname $2/$sonamelink
24 ln -sf $libname $2/$solink
25}
26
27oe_libinstall() {
28 # Purpose: Install a library, in all its forms
29 # Example
30 #
31 # oe_libinstall libltdl ${STAGING_LIBDIR}/
32 # oe_libinstall -C src/libblah libblah ${D}/${libdir}/
33 dir=""
34 libtool=""
35 silent=""
36 require_static=""
37 require_shared=""
38 while [ "$#" -gt 0 ]; do
39 case "$1" in
40 -C)
41 shift
42 dir="$1"
43 ;;
44 -s)
45 silent=1
46 ;;
47 -a)
48 require_static=1
49 ;;
50 -so)
51 require_shared=1
52 ;;
53 -*)
54 bbfatal "oe_libinstall: unknown option: $1"
55 ;;
56 *)
57 break;
58 ;;
59 esac
60 shift
61 done
62
63 libname="$1"
64 shift
65 destpath="$1"
66 if [ -z "$destpath" ]; then
67 bbfatal "oe_libinstall: no destination path specified"
68 fi
69
70 __runcmd () {
71 if [ -z "$silent" ]; then
72 echo >&2 "oe_libinstall: $*"
73 fi
74 $*
75 }
76
77 if [ -z "$dir" ]; then
78 dir=`pwd`
79 fi
80
81 dotlai=$libname.lai
82
83 # Sanity check that the libname.lai is unique
84 number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
85 if [ $number_of_files -gt 1 ]; then
86 bbfatal "oe_libinstall: $dotlai is not unique in $dir"
87 fi
88
89
90 dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
91 olddir=`pwd`
92 __runcmd cd $dir
93
94 lafile=$libname.la
95
96 # If such file doesn't exist, try to cut version suffix
97 if [ ! -f "$lafile" ]; then
98 libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
99 lafile1=$libname.la
100 if [ -f "$lafile1" ]; then
101 libname=$libname1
102 lafile=$lafile1
103 fi
104 fi
105
106 if [ -f "$lafile" ]; then
107 # libtool archive
108 eval `cat $lafile|grep "^library_names="`
109 libtool=1
110 else
111 library_names="$libname.so* $libname.dll.a $libname.*.dylib"
112 fi
113
114 __runcmd install -d $destpath/
115 dota=$libname.a
116 if [ -f "$dota" -o -n "$require_static" ]; then
117 rm -f $destpath/$dota
118 __runcmd install -m 0644 $dota $destpath/
119 fi
120 if [ -f "$dotlai" -a -n "$libtool" ]; then
121 rm -f $destpath/$libname.la
122 __runcmd install -m 0644 $dotlai $destpath/$libname.la
123 fi
124
125 for name in $library_names; do
126 files=`eval echo $name`
127 for f in $files; do
128 if [ ! -e "$f" ]; then
129 if [ -n "$libtool" ]; then
130 bbfatal "oe_libinstall: $dir/$f not found."
131 fi
132 elif [ -L "$f" ]; then
133 __runcmd cp -P "$f" $destpath/
134 elif [ ! -L "$f" ]; then
135 libfile="$f"
136 rm -f $destpath/$libfile
137 __runcmd install -m 0755 $libfile $destpath/
138 fi
139 done
140 done
141
142 if [ -z "$libfile" ]; then
143 if [ -n "$require_shared" ]; then
144 bbfatal "oe_libinstall: unable to locate shared library"
145 fi
146 elif [ -z "$libtool" ]; then
147 # special case hack for non-libtool .so.#.#.# links
148 baselibfile=`basename "$libfile"`
149 if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
150 sonamelink=`${HOST_PREFIX}readelf -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
151 solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
152 if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
153 __runcmd ln -sf $baselibfile $destpath/$sonamelink
154 fi
155 __runcmd ln -sf $baselibfile $destpath/$solink
156 fi
157 fi
158
159 __runcmd cd "$olddir"
160}
161
162create_cmdline_wrapper () {
163 # Create a wrapper script where commandline options are needed
164 #
165 # These are useful to work around relocation issues, by passing extra options
166 # to a program
167 #
168 # Usage: create_cmdline_wrapper FILENAME <extra-options>
169
170 cmd=$1
171 shift
172
173 echo "Generating wrapper script for $cmd"
174
175 mv $cmd $cmd.real
176 cmdname=`basename $cmd`
177 dirname=`dirname $cmd`
178 cmdoptions=$@
179 if [ "${base_prefix}" != "" ]; then
180 relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
181 cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
182 fi
183 cat <<END >$cmd
184#!/bin/bash
185realpath=\`readlink -fn \$0\`
186realdir=\`dirname \$realpath\`
187exec -a \$realdir/$cmdname \$realdir/$cmdname.real $cmdoptions "\$@"
188END
189 chmod +x $cmd
190}
191
192create_cmdline_shebang_wrapper () {
193 # Create a wrapper script where commandline options are needed
194 #
195 # These are useful to work around shebang relocation issues, where shebangs are too
196 # long or have arguments in them, thus preventing them from using the /usr/bin/env
197 # shebang
198 #
199 # Usage: create_cmdline_wrapper FILENAME <extra-options>
200
201 cmd=$1
202 shift
203
204 echo "Generating wrapper script for $cmd"
205
206 # Strip #! and get remaining interpreter + arg
207 argument="$(sed -ne 's/^#! *//p;q' $cmd)"
208 # strip the shebang from the real script as we do not want it to be usable anyway
209 tail -n +2 $cmd > $cmd.real
210 chown --reference=$cmd $cmd.real
211 chmod --reference=$cmd $cmd.real
212 rm -f $cmd
213 cmdname=$(basename $cmd)
214 dirname=$(dirname $cmd)
215 cmdoptions=$@
216 if [ "${base_prefix}" != "" ]; then
217 relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
218 cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
219 fi
220 cat <<END >$cmd
221#!/usr/bin/env bash
222realpath=\`readlink -fn \$0\`
223realdir=\`dirname \$realpath\`
224exec -a \$realdir/$cmdname $argument \$realdir/$cmdname.real $cmdoptions "\$@"
225END
226 chmod +x $cmd
227}
228
229create_wrapper () {
230 # Create a wrapper script where extra environment variables are needed
231 #
232 # These are useful to work around relocation issues, by setting environment
233 # variables which point to paths in the filesystem.
234 #
235 # Usage: create_wrapper FILENAME [[VAR=VALUE]..]
236
237 cmd=$1
238 shift
239
240 echo "Generating wrapper script for $cmd"
241
242 mv $cmd $cmd.real
243 cmdname=`basename $cmd`
244 dirname=`dirname $cmd`
245 exportstring=$@
246 if [ "${base_prefix}" != "" ]; then
247 relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
248 exportstring=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
249 fi
250 cat <<END >$cmd
251#!/bin/bash
252realpath=\`readlink -fn \$0\`
253realdir=\`dirname \$realpath\`
254export $exportstring
255exec -a "\$0" \$realdir/$cmdname.real "\$@"
256END
257 chmod +x $cmd
258}
259
260# Copy files/directories from $1 to $2 but using hardlinks
261# (preserve symlinks)
262hardlinkdir () {
263 from=$1
264 to=$2
265 (cd $from; find . -print0 | cpio --null -pdlu $to)
266}
267
268
269def check_app_exists(app, d):
270 app = d.expand(app).split()[0].strip()
271 path = d.getVar('PATH')
272 return bool(bb.utils.which(path, app))
273
274def explode_deps(s):
275 return bb.utils.explode_deps(s)
276
277def base_set_filespath(path, d):
278 filespath = []
279 extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
280 # Remove default flag which was used for checking
281 extrapaths = extrapaths.replace("__default:", "")
282 # Don't prepend empty strings to the path list
283 if extrapaths != "":
284 path = extrapaths.split(":") + path
285 # The ":" ensures we have an 'empty' override
286 overrides = (":" + (d.getVar("FILESOVERRIDES") or "")).split(":")
287 overrides.reverse()
288 for o in overrides:
289 for p in path:
290 if p != "":
291 filespath.append(os.path.join(p, o))
292 return ":".join(filespath)
293
294def extend_variants(d, var, extend, delim=':'):
295 """Return a string of all bb class extend variants for the given extend"""
296 variants = []
297 whole = d.getVar(var) or ""
298 for ext in whole.split():
299 eext = ext.split(delim)
300 if len(eext) > 1 and eext[0] == extend:
301 variants.append(eext[1])
302 return " ".join(variants)
303
304def multilib_pkg_extend(d, pkg):
305 variants = (d.getVar("MULTILIB_VARIANTS") or "").split()
306 if not variants:
307 return pkg
308 pkgs = pkg
309 for v in variants:
310 pkgs = pkgs + " " + v + "-" + pkg
311 return pkgs
312
313def get_multilib_datastore(variant, d):
314 return oe.utils.get_multilib_datastore(variant, d)
315
316def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
317 """Return a string of all ${var} in all multilib tune configuration"""
318 values = []
319 variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
320 for item in variants:
321 localdata = get_multilib_datastore(item, d)
322 # We need WORKDIR to be consistent with the original datastore
323 localdata.setVar("WORKDIR", d.getVar("WORKDIR"))
324 value = localdata.getVar(var) or ""
325 if value != "":
326 if need_split:
327 for item in value.split(delim):
328 values.append(item)
329 else:
330 values.append(value)
331 if unique:
332 #we do this to keep order as much as possible
333 ret = []
334 for value in values:
335 if not value in ret:
336 ret.append(value)
337 else:
338 ret = values
339 return " ".join(ret)
340
341def all_multilib_tune_list(vars, d):
342 """
343 Return a list of ${VAR} for each variable VAR in vars from each
344 multilib tune configuration.
345 Is safe to be called from a multilib recipe/context as it can
346 figure out the original tune and remove the multilib overrides.
347 """
348 values = {}
349 for v in vars:
350 values[v] = []
351 values['ml'] = ['']
352
353 variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
354 for item in variants:
355 localdata = get_multilib_datastore(item, d)
356 values[v].append(localdata.getVar(v))
357 values['ml'].append(item)
358 return values
359all_multilib_tune_list[vardepsexclude] = "OVERRIDES"
360
361# If the user hasn't set up their name/email, set some defaults
362check_git_config() {
363 if ! git config user.email > /dev/null ; then
364 git config --local user.email "${PATCH_GIT_USER_EMAIL}"
365 fi
366 if ! git config user.name > /dev/null ; then
367 git config --local user.name "${PATCH_GIT_USER_NAME}"
368 fi
369}