summaryrefslogtreecommitdiffstats
path: root/meta/classes-global/sstate.bbclass
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes-global/sstate.bbclass')
-rw-r--r--meta/classes-global/sstate.bbclass1372
1 files changed, 0 insertions, 1372 deletions
diff --git a/meta/classes-global/sstate.bbclass b/meta/classes-global/sstate.bbclass
deleted file mode 100644
index 2fd29d7323..0000000000
--- a/meta/classes-global/sstate.bbclass
+++ /dev/null
@@ -1,1372 +0,0 @@
1#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SSTATE_VERSION = "14"
8
9SSTATE_ZSTD_CLEVEL ??= "8"
10
11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
13
14def generate_sstatefn(spec, hash, taskname, siginfo, d):
15 if taskname is None:
16 return ""
17 extension = ".tar.zst"
18 # 8 chars reserved for siginfo
19 limit = 254 - 8
20 if siginfo:
21 limit = 254
22 extension = ".tar.zst.siginfo"
23 if not hash:
24 hash = "INVALID"
25 fn = spec + hash + "_" + taskname + extension
26 # If the filename is too long, attempt to reduce it
27 if len(fn) > limit:
28 components = spec.split(":")
29 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
30 # 7 is for the separators
31 avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
32 components[2] = components[2][:avail]
33 components[3] = components[3][:avail]
34 components[4] = components[4][:avail]
35 spec = ":".join(components)
36 fn = spec + hash + "_" + taskname + extension
37 if len(fn) > limit:
38 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
39 return hash[:2] + "/" + hash[2:4] + "/" + fn
40
41SSTATE_PKGARCH = "${PACKAGE_ARCH}"
42SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
43SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
44SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
45SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
46SSTATE_EXTRAPATH = ""
47SSTATE_EXTRAPATHWILDCARD = ""
48SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
49
50# explicitly make PV to depend on evaluated value of PV variable
51PV[vardepvalue] = "${PV}"
52
53# We don't want the sstate to depend on things like the distro string
54# of the system, we let the sstate paths take care of this.
55SSTATE_EXTRAPATH[vardepvalue] = ""
56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
57
58# Avoid docbook/sgml catalog warnings for now
59SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
60# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
61SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
62SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
63# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
64SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
65# Archive the sources for many architectures in one deploy folder
66SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
67# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
69SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
72
73SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
74SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
75SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
76SSTATE_HASHEQUIV_FILEMAP ?= " \
77 populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
78 populate_sysroot:*/postinst-useradd-*:${COREBASE} \
79 populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_INCLUDE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
80 populate_sysroot:*/crossscripts/*:${TMPDIR} \
81 populate_sysroot:*/crossscripts/*:${COREBASE} \
82 "
83
84BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
85
86SSTATE_ARCHS_TUNEPKG ??= "${TUNE_PKGARCH}"
87SSTATE_ARCHS = " \
88 ${BUILD_ARCH} \
89 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
90 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
91 ${SDK_ARCH}_${SDK_OS} \
92 ${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX} \
93 allarch \
94 ${SSTATE_ARCHS_TUNEPKG} \
95 ${PACKAGE_EXTRA_ARCHS} \
96 ${MACHINE_ARCH}"
97SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
98
99SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
100
101SSTATECREATEFUNCS += "sstate_hardcode_path"
102SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
103SSTATEPOSTCREATEFUNCS = ""
104SSTATEPREINSTFUNCS = ""
105SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
106EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
107
108# Check whether sstate exists for tasks that support sstate and are in the
109# locked signatures file.
110SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
111
112# Check whether the task's computed hash matches the task's hash in the
113# locked signatures file.
114SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
115
116# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
117# not sign)
118SSTATE_SIG_KEY ?= ""
119SSTATE_SIG_PASSPHRASE ?= ""
120# Whether to verify the GnUPG signatures when extracting sstate archives
121SSTATE_VERIFY_SIG ?= "0"
122# List of signatures to consider valid.
123SSTATE_VALID_SIGS ??= ""
124SSTATE_VALID_SIGS[vardepvalue] = ""
125
126SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
127SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
128 the output hash for a task, which in turn is used to determine equivalency. \
129 "
130
131SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
132SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
133 hash equivalency server, such as PN, PV, taskname, etc. This information \
134 is very useful for developers looking at task data, but may leak sensitive \
135 data if the equivalence server is public. \
136 "
137
138python () {
139 if bb.data.inherits_class('native', d):
140 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
141 elif bb.data.inherits_class('crosssdk', d):
142 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
143 elif bb.data.inherits_class('cross', d):
144 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
145 elif bb.data.inherits_class('nativesdk', d):
146 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
147 elif bb.data.inherits_class('cross-canadian', d):
148 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
149 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
150 d.setVar('SSTATE_PKGARCH', "allarch")
151 else:
152 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
153
154 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
155 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
156 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
157 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
158
159 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
160 d.setVar('SSTATETASKS', " ".join(unique_tasks))
161 for task in unique_tasks:
162 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
163 # Generally sstate should be last, execpt for buildhistory functions
164 postfuncs = (d.getVarFlag(task, 'postfuncs') or "").split()
165 newpostfuncs = [p for p in postfuncs if "buildhistory" not in p] + ["sstate_task_postfunc"] + [p for p in postfuncs if "buildhistory" in p]
166 d.setVarFlag(task, 'postfuncs', " ".join(newpostfuncs))
167 d.setVarFlag(task, 'network', '1')
168 d.setVarFlag(task + "_setscene", 'network', '1')
169}
170
171def sstate_init(task, d):
172 ss = {}
173 ss['task'] = task
174 ss['dirs'] = []
175 ss['plaindirs'] = []
176 ss['lockfiles'] = []
177 ss['lockfiles-shared'] = []
178 return ss
179
180def sstate_state_fromvars(d, task = None):
181 if task is None:
182 task = d.getVar('BB_CURRENTTASK')
183 if not task:
184 bb.fatal("sstate code running without task context?!")
185 task = task.replace("_setscene", "")
186
187 if task.startswith("do_"):
188 task = task[3:]
189 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
190 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
191 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
192 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
193 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
194 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
195 if not task or len(inputs) != len(outputs):
196 bb.fatal("sstate variables not setup correctly?!")
197
198 if task == "populate_lic":
199 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
200 d.setVar("SSTATE_EXTRAPATH", "")
201 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
202
203 ss = sstate_init(task, d)
204 for i in range(len(inputs)):
205 sstate_add(ss, inputs[i], outputs[i], d)
206 ss['lockfiles'] = lockfiles
207 ss['lockfiles-shared'] = lockfilesshared
208 ss['plaindirs'] = plaindirs
209 ss['fixmedir'] = fixmedir
210 return ss
211
212def sstate_add(ss, source, dest, d):
213 if not source.endswith("/"):
214 source = source + "/"
215 if not dest.endswith("/"):
216 dest = dest + "/"
217 source = os.path.normpath(source)
218 dest = os.path.normpath(dest)
219 srcbase = os.path.basename(source)
220 ss['dirs'].append([srcbase, source, dest])
221 return ss
222
223def sstate_install(ss, d):
224 import oe.path
225 import oe.sstatesig
226 import subprocess
227
228 def prepdir(dir):
229 # remove dir if it exists, ensure any parent directories do exist
230 if os.path.exists(dir):
231 oe.path.remove(dir)
232 bb.utils.mkdirhier(dir)
233 oe.path.remove(dir)
234
235 sstateinst = d.getVar("SSTATE_INSTDIR")
236
237 for state in ss['dirs']:
238 prepdir(state[1])
239 bb.utils.rename(sstateinst + state[0], state[1])
240
241 sharedfiles = []
242 shareddirs = []
243 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
244
245 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
246
247 if os.access(manifest, os.R_OK):
248 bb.fatal("Package already staged (%s)?!" % manifest)
249
250 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
251
252 locks = []
253 for lock in ss['lockfiles-shared']:
254 locks.append(bb.utils.lockfile(lock, True))
255 for lock in ss['lockfiles']:
256 locks.append(bb.utils.lockfile(lock))
257
258 for state in ss['dirs']:
259 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
260 for walkroot, dirs, files in os.walk(state[1]):
261 for file in files:
262 srcpath = os.path.join(walkroot, file)
263 dstpath = srcpath.replace(state[1], state[2])
264 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
265 sharedfiles.append(dstpath)
266 for dir in dirs:
267 srcdir = os.path.join(walkroot, dir)
268 dstdir = srcdir.replace(state[1], state[2])
269 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
270 if os.path.islink(srcdir):
271 sharedfiles.append(dstdir)
272 continue
273 if not dstdir.endswith("/"):
274 dstdir = dstdir + "/"
275 shareddirs.append(dstdir)
276
277 # Check the file list for conflicts against files which already exist
278 overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
279 match = []
280 for f in sharedfiles:
281 if os.path.exists(f):
282 f = os.path.normpath(f)
283 realmatch = True
284 for w in overlap_allowed:
285 w = os.path.normpath(w)
286 if f.startswith(w):
287 realmatch = False
288 break
289 if realmatch:
290 match.append(f)
291 sstate_search_cmd = "grep -rlF '%s' %s --exclude=index-* | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
292 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
293 if search_output:
294 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
295 else:
296 match.append(" (not matched to any task)")
297 if match:
298 bb.fatal("Recipe %s is trying to install files into a shared " \
299 "area when those files already exist. The files and the manifests listing " \
300 "them are:\n %s\n"
301 "Please adjust the recipes so only one recipe provides a given file. " % \
302 (d.getVar('PN'), "\n ".join(match)))
303
304 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
305 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
306 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
307
308 # Write out the manifest
309 with open(manifest, "w") as f:
310 for file in sharedfiles:
311 f.write(file + "\n")
312
313 # We want to ensure that directories appear at the end of the manifest
314 # so that when we test to see if they should be deleted any contents
315 # added by the task will have been removed first.
316 dirs = sorted(shareddirs, key=len)
317 # Must remove children first, which will have a longer path than the parent
318 for di in reversed(dirs):
319 f.write(di + "\n")
320
321 # Append to the list of manifests for this PACKAGE_ARCH
322
323 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
324 l = bb.utils.lockfile(i + ".lock")
325 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
326 manifests = []
327 if os.path.exists(i):
328 with open(i, "r") as f:
329 manifests = f.readlines()
330 # We append new entries, we don't remove older entries which may have the same
331 # manifest name but different versions from stamp/workdir. See below.
332 if filedata not in manifests:
333 with open(i, "a+") as f:
334 f.write(filedata)
335 bb.utils.unlockfile(l)
336
337 # Run the actual file install
338 for state in ss['dirs']:
339 if os.path.exists(state[1]):
340 oe.path.copyhardlinktree(state[1], state[2])
341
342 for plain in ss['plaindirs']:
343 workdir = d.getVar('WORKDIR')
344 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
345 src = sstateinst + "/" + plain.replace(workdir, '')
346 if sharedworkdir in plain:
347 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
348 dest = plain
349 bb.utils.mkdirhier(src)
350 prepdir(dest)
351 bb.utils.rename(src, dest)
352
353 for lock in locks:
354 bb.utils.unlockfile(lock)
355
356sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES SSTATE_MANMACH SSTATE_MANFILEPREFIX STAMP"
357
358def sstate_installpkg(ss, d):
359 from oe.gpg_sign import get_signer
360
361 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
362 d.setVar("SSTATE_CURRTASK", ss['task'])
363 sstatefetch = d.getVar('SSTATE_PKGNAME')
364 sstatepkg = d.getVar('SSTATE_PKG')
365 verify_sig = bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False)
366
367 if not os.path.exists(sstatepkg) or (verify_sig and not os.path.exists(sstatepkg + '.sig')):
368 pstaging_fetch(sstatefetch, d)
369
370 if not os.path.isfile(sstatepkg):
371 bb.note("Sstate package %s does not exist" % sstatepkg)
372 return False
373
374 sstate_clean(ss, d)
375
376 d.setVar('SSTATE_INSTDIR', sstateinst)
377
378 if verify_sig:
379 if not os.path.isfile(sstatepkg + '.sig'):
380 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
381 return False
382 signer = get_signer(d, 'local')
383 if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
384 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
385 return False
386
387 # Empty sstateinst directory, ensure its clean
388 if os.path.exists(sstateinst):
389 oe.path.remove(sstateinst)
390 bb.utils.mkdirhier(sstateinst)
391
392 sstateinst = d.getVar("SSTATE_INSTDIR")
393 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
394
395 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
396 # All hooks should run in the SSTATE_INSTDIR
397 bb.build.exec_func(f, d, (sstateinst,))
398
399 return sstate_installpkgdir(ss, d)
400
401def sstate_installpkgdir(ss, d):
402 import oe.path
403 import subprocess
404
405 sstateinst = d.getVar("SSTATE_INSTDIR")
406 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
407
408 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
409 # All hooks should run in the SSTATE_INSTDIR
410 bb.build.exec_func(f, d, (sstateinst,))
411
412 sstate_install(ss, d)
413
414 return True
415
416python sstate_hardcode_path_unpack () {
417 # Fixup hardcoded paths
418 #
419 # Note: The logic below must match the reverse logic in
420 # sstate_hardcode_path(d)
421 import subprocess
422
423 sstateinst = d.getVar('SSTATE_INSTDIR')
424 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
425 fixmefn = sstateinst + "fixmepath"
426 if os.path.isfile(fixmefn):
427 staging_target = d.getVar('RECIPE_SYSROOT')
428 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
429
430 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
431 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
432 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
433 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
434 else:
435 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
436
437 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
438 for fixmevar in extra_staging_fixmes.split():
439 fixme_path = d.getVar(fixmevar)
440 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
441
442 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
443 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
444
445 # Defer do_populate_sysroot relocation command
446 if sstatefixmedir:
447 bb.utils.mkdirhier(sstatefixmedir)
448 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
449 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
450 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
451 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
452 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
453 f.write(sstate_hardcode_cmd)
454 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
455 return
456
457 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
458 subprocess.check_call(sstate_hardcode_cmd, shell=True)
459
460 # Need to remove this or we'd copy it into the target directory and may
461 # conflict with another writer
462 os.remove(fixmefn)
463}
464
465def sstate_clean_cachefile(ss, d):
466 import oe.path
467
468 if d.getVarFlag('do_%s' % ss['task'], 'task'):
469 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
470 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
471 bb.note("Removing %s" % sstatepkgfile)
472 oe.path.remove(sstatepkgfile)
473
474def sstate_clean_cachefiles(d):
475 for task in (d.getVar('SSTATETASKS') or "").split():
476 ld = d.createCopy()
477 ss = sstate_state_fromvars(ld, task)
478 sstate_clean_cachefile(ss, ld)
479
480def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
481 import oe.path
482
483 with open(manifest) as mfile:
484 entries = mfile.readlines()
485
486 for entry in entries:
487 entry = entry.strip()
488 if prefix and not entry.startswith("/"):
489 entry = prefix + "/" + entry
490 bb.debug(2, "Removing manifest: %s" % entry)
491 # We can race against another package populating directories as we're removing them
492 # so we ignore errors here.
493 try:
494 if entry.endswith("/"):
495 if os.path.islink(entry[:-1]):
496 os.remove(entry[:-1])
497 elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
498 # Removing directories whilst builds are in progress exposes a race. Only
499 # do it in contexts where it is safe to do so.
500 os.rmdir(entry[:-1])
501 else:
502 os.remove(entry)
503 except OSError:
504 pass
505
506 postrm = manifest + ".postrm"
507 if os.path.exists(manifest + ".postrm"):
508 import subprocess
509 os.chmod(postrm, 0o755)
510 subprocess.check_call(postrm, shell=True)
511 oe.path.remove(postrm)
512
513 oe.path.remove(manifest)
514
515def sstate_clean(ss, d):
516 import oe.path
517 import glob
518
519 d2 = d.createCopy()
520 stamp_clean = d.getVar("STAMPCLEAN")
521 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
522 if extrainf:
523 d2.setVar("SSTATE_MANMACH", extrainf)
524 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
525 else:
526 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
527
528 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
529
530 if os.path.exists(manifest):
531 locks = []
532 for lock in ss['lockfiles-shared']:
533 locks.append(bb.utils.lockfile(lock))
534 for lock in ss['lockfiles']:
535 locks.append(bb.utils.lockfile(lock))
536
537 sstate_clean_manifest(manifest, d, canrace=True)
538
539 for lock in locks:
540 bb.utils.unlockfile(lock)
541
542 # Remove the current and previous stamps, but keep the sigdata.
543 #
544 # The glob() matches do_task* which may match multiple tasks, for
545 # example: do_package and do_package_write_ipk, so we need to
546 # exactly match *.do_task.* and *.do_task_setscene.*
547 rm_stamp = '.do_%s.' % ss['task']
548 rm_setscene = '.do_%s_setscene.' % ss['task']
549 # For BB_SIGNATURE_HANDLER = "noop"
550 rm_nohash = ".do_%s" % ss['task']
551 for stfile in glob.glob(wildcard_stfile):
552 # Keep the sigdata
553 if ".sigdata." in stfile or ".sigbasedata." in stfile:
554 continue
555 # Preserve taint files in the stamps directory
556 if stfile.endswith('.taint'):
557 continue
558 if rm_stamp in stfile or rm_setscene in stfile or \
559 stfile.endswith(rm_nohash):
560 oe.path.remove(stfile)
561
562sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
563
564CLEANFUNCS += "sstate_cleanall"
565
566python sstate_cleanall() {
567 bb.note("Removing shared state for package %s" % d.getVar('PN'))
568
569 manifest_dir = d.getVar('SSTATE_MANIFESTS')
570 if not os.path.exists(manifest_dir):
571 return
572
573 tasks = d.getVar('SSTATETASKS').split()
574 for name in tasks:
575 ld = d.createCopy()
576 shared_state = sstate_state_fromvars(ld, name)
577 sstate_clean(shared_state, ld)
578}
579
580python sstate_hardcode_path () {
581 import subprocess, platform
582
583 # Need to remove hardcoded paths and fix these when we install the
584 # staging packages.
585 #
586 # Note: the logic in this function needs to match the reverse logic
587 # in sstate_installpkg(ss, d)
588
589 staging_target = d.getVar('RECIPE_SYSROOT')
590 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
591 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
592
593 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
594 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
595 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
596 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
597 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
598 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
599 else:
600 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
601 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
602
603 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
604 for fixmevar in extra_staging_fixmes.split():
605 fixme_path = d.getVar(fixmevar)
606 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
607 sstate_grep_cmd += " -e '%s'" % (fixme_path)
608
609 fixmefn = sstate_builddir + "fixmepath"
610
611 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
612 sstate_filelist_cmd = "tee %s" % (fixmefn)
613
614 # fixmepath file needs relative paths, drop sstate_builddir prefix
615 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
616
617 xargs_no_empty_run_cmd = '--no-run-if-empty'
618 if platform.system() == 'Darwin':
619 xargs_no_empty_run_cmd = ''
620
621 # Limit the fixpaths and sed operations based on the initial grep search
622 # This has the side effect of making sure the vfs cache is hot
623 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
624
625 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
626 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
627
628 # If the fixmefn is empty, remove it..
629 if os.stat(fixmefn).st_size == 0:
630 os.remove(fixmefn)
631 else:
632 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
633 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
634}
635
636def sstate_package(ss, d):
637 import oe.path
638 import time
639
640 tmpdir = d.getVar('TMPDIR')
641
642 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
643 sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
644 d.setVar("SSTATE_CURRTASK", ss['task'])
645 bb.utils.remove(sstatebuild, recurse=True)
646 bb.utils.mkdirhier(sstatebuild)
647 exit = False
648 for state in ss['dirs']:
649 if not os.path.exists(state[1]):
650 continue
651 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
652 # Find and error for absolute symlinks. We could attempt to relocate but its not
653 # clear where the symlink is relative to in this context. We could add that markup
654 # to sstate tasks but there aren't many of these so better just avoid them entirely.
655 for walkroot, dirs, files in os.walk(state[1]):
656 for file in files + dirs:
657 srcpath = os.path.join(walkroot, file)
658 if not os.path.islink(srcpath):
659 continue
660 link = os.readlink(srcpath)
661 if not os.path.isabs(link):
662 continue
663 if not link.startswith(tmpdir):
664 continue
665 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
666 exit = True
667 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
668 bb.utils.rename(state[1], sstatebuild + state[0])
669 if exit:
670 bb.fatal("Failing task due to absolute path symlinks")
671
672 workdir = d.getVar('WORKDIR')
673 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
674 for plain in ss['plaindirs']:
675 pdir = plain.replace(workdir, sstatebuild)
676 if sharedworkdir in plain:
677 pdir = plain.replace(sharedworkdir, sstatebuild)
678 bb.utils.mkdirhier(plain)
679 bb.utils.mkdirhier(pdir)
680 bb.utils.rename(plain, pdir)
681
682 d.setVar('SSTATE_BUILDDIR', sstatebuild)
683 d.setVar('SSTATE_INSTDIR', sstatebuild)
684
685 if d.getVar('SSTATE_SKIP_CREATION') == '1':
686 return
687
688 sstate_create_package = ['sstate_report_unihash', 'sstate_create_and_sign_package']
689
690 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
691 sstate_create_package + \
692 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
693 # All hooks should run in SSTATE_BUILDDIR.
694 bb.build.exec_func(f, d, (sstatebuild,))
695
696 # SSTATE_PKG may have been changed by sstate_report_unihash
697 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
698 if not os.path.exists(siginfo):
699 bb.siggen.dump_this_task(siginfo, d)
700 else:
701 try:
702 os.utime(siginfo, None)
703 except PermissionError:
704 pass
705 except OSError as e:
706 # Handle read-only file systems gracefully
707 import errno
708 if e.errno != errno.EROFS:
709 raise e
710
711 return
712
713sstate_package[vardepsexclude] += "SSTATE_SIG_KEY SSTATE_PKG"
714
715def pstaging_fetch(sstatefetch, d):
716 import bb.fetch2
717
718 # Only try and fetch if the user has configured a mirror
719 mirrors = d.getVar('SSTATE_MIRRORS')
720 if not mirrors:
721 return
722
723 # Copy the data object and override DL_DIR and SRC_URI
724 localdata = bb.data.createCopy(d)
725
726 dldir = localdata.expand("${SSTATE_DIR}")
727
728 localdata.delVar('MIRRORS')
729 localdata.setVar('FILESPATH', dldir)
730 localdata.setVar('DL_DIR', dldir)
731 localdata.setVar('PREMIRRORS', mirrors)
732
733 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
734 # we'll want to allow network access for the current set of fetches.
735 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
736 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
737 localdata.delVar('BB_NO_NETWORK')
738
739 # Try a fetch from the sstate mirror, if it fails just return and
740 # we will build the package
741 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
742 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
743 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
744 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
745
746 with bb.utils.umask(bb.utils.to_filemode(d.getVar("OE_SHARED_UMASK"))):
747 bb.utils.mkdirhier(dldir)
748
749 for srcuri in uris:
750 localdata.delVar('SRC_URI')
751 localdata.setVar('SRC_URI', srcuri)
752 try:
753 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
754 fetcher.checkstatus()
755 fetcher.download()
756
757 except bb.fetch2.BBFetchException:
758 pass
759
760def sstate_setscene(d):
761 shared_state = sstate_state_fromvars(d)
762 accelerate = sstate_installpkg(shared_state, d)
763 if not accelerate:
764 msg = "No sstate archive obtainable, will run full task instead."
765 bb.warn(msg)
766 raise bb.BBHandledException(msg)
767
768python sstate_task_prefunc () {
769 shared_state = sstate_state_fromvars(d)
770 sstate_clean(shared_state, d)
771}
772sstate_task_prefunc[dirs] = "${WORKDIR}"
773
774python sstate_task_postfunc () {
775 shared_state = sstate_state_fromvars(d)
776
777 shared_umask = bb.utils.to_filemode(d.getVar("OE_SHARED_UMASK"))
778 omask = os.umask(shared_umask)
779 if omask != shared_umask:
780 bb.note("Using umask %0o (not %0o) for sstate packaging" % (shared_umask, omask))
781 sstate_package(shared_state, d)
782 os.umask(omask)
783
784 sstateinst = d.getVar("SSTATE_INSTDIR")
785 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
786
787 sstate_installpkgdir(shared_state, d)
788
789 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
790}
791sstate_task_postfunc[dirs] = "${WORKDIR}"
792
793# Create a sstate package
794# If enabled, sign the package.
795# Package and signature are created in a sub-directory
796# and renamed in place once created.
797python sstate_create_and_sign_package () {
798 from pathlib import Path
799
800 # Best effort touch
801 def touch(file):
802 try:
803 file.touch()
804 except:
805 pass
806
807 def update_file(src, dst, force=False):
808 if dst.is_symlink() and not dst.exists():
809 force=True
810 try:
811 # This relies on that src is a temporary file that can be renamed
812 # or left as is.
813 if force:
814 src.rename(dst)
815 else:
816 os.link(src, dst)
817 return True
818 except:
819 pass
820
821 if dst.exists():
822 touch(dst)
823
824 return False
825
826 sign_pkg = (
827 bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG")) and
828 bool(d.getVar("SSTATE_SIG_KEY"))
829 )
830
831 sstate_pkg = Path(d.getVar("SSTATE_PKG"))
832 sstate_pkg_sig = Path(str(sstate_pkg) + ".sig")
833 if sign_pkg:
834 if sstate_pkg.exists() and sstate_pkg_sig.exists():
835 touch(sstate_pkg)
836 touch(sstate_pkg_sig)
837 return
838 else:
839 if sstate_pkg.exists():
840 touch(sstate_pkg)
841 return
842
843 # Create the required sstate directory if it is not present.
844 if not sstate_pkg.parent.is_dir():
845 shared_umask = bb.utils.to_filemode(d.getVar("OE_SHARED_UMASK"))
846 with bb.utils.umask(shared_umask):
847 bb.utils.mkdirhier(str(sstate_pkg.parent))
848
849 if sign_pkg:
850 from tempfile import TemporaryDirectory
851 with TemporaryDirectory(dir=sstate_pkg.parent) as tmp_dir:
852 tmp_pkg = Path(tmp_dir) / sstate_pkg.name
853 sstate_archive_package(tmp_pkg, d)
854
855 from oe.gpg_sign import get_signer
856 signer = get_signer(d, 'local')
857 signer.detach_sign(str(tmp_pkg), d.getVar('SSTATE_SIG_KEY'), None,
858 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
859
860 tmp_pkg_sig = Path(tmp_dir) / sstate_pkg_sig.name
861 if not update_file(tmp_pkg_sig, sstate_pkg_sig):
862 # If the created signature file could not be copied into place,
863 # then we should not use the sstate package either.
864 return
865
866 # If the .sig file was updated, then the sstate package must also
867 # be updated.
868 update_file(tmp_pkg, sstate_pkg, force=True)
869 else:
870 from tempfile import NamedTemporaryFile
871 with NamedTemporaryFile(prefix=sstate_pkg.name, dir=sstate_pkg.parent) as tmp_pkg_fd:
872 tmp_pkg = tmp_pkg_fd.name
873 sstate_archive_package(tmp_pkg, d)
874 update_file(tmp_pkg, sstate_pkg)
875 # update_file() may have renamed tmp_pkg, which must exist when the
876 # NamedTemporaryFile() context handler ends.
877 touch(Path(tmp_pkg))
878
879}
880
881# Function to generate a sstate package from the current directory.
882# The calling function handles moving the sstate package into the final
883# destination.
884def sstate_archive_package(sstate_pkg, d):
885 import subprocess
886
887 cmd = [
888 "tar",
889 "-I", d.expand("pzstd -${SSTATE_ZSTD_CLEVEL} -p${ZSTD_THREADS}"),
890 "-cS",
891 "-f", sstate_pkg,
892 ]
893
894 # tar refuses to create an empty archive unless told explicitly
895 files = sorted(os.listdir("."))
896 if not files:
897 files = ["--files-from=/dev/null"]
898
899 try:
900 subprocess.run(cmd + files, check=True)
901 except subprocess.CalledProcessError as e:
902 # Ignore error 1 as this is caused by files changing
903 # (link count increasing from hardlinks being created).
904 if e.returncode != 1:
905 raise
906
907 os.chmod(sstate_pkg, 0o664)
908
909
910python sstate_report_unihash() {
911 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
912
913 if report_unihash:
914 ss = sstate_state_fromvars(d)
915 report_unihash(os.getcwd(), ss['task'], d)
916}
917
918#
919# Shell function to decompress and prepare a package for installation
920# Will be run from within SSTATE_INSTDIR.
921#
922sstate_unpack_package () {
923 ZSTD="zstd -T${ZSTD_THREADS}"
924 # Use pzstd if available
925 if [ -x "$(command -v pzstd)" ]; then
926 ZSTD="pzstd -p ${ZSTD_THREADS}"
927 fi
928
929 tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
930 # update .siginfo atime on local/NFS mirror if it is a symbolic link
931 [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
932 # update each symbolic link instead of any referenced file
933 touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
934 [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
935 [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
936}
937
938BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
939
940def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
941 import itertools
942
943 found = set()
944 missed = set()
945
946 def gethash(task):
947 return sq_data['unihash'][task]
948
949 def getpathcomponents(task, d):
950 # Magic data from BB_HASHFILENAME
951 splithashfn = sq_data['hashfn'][task].split(" ")
952 spec = splithashfn[1]
953 if splithashfn[0] == "True":
954 extrapath = d.getVar("NATIVELSBSTRING") + "/"
955 else:
956 extrapath = ""
957
958 tname = bb.runqueue.taskname_from_tid(task)[3:]
959
960 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
961 spec = splithashfn[2]
962 extrapath = ""
963
964 return spec, extrapath, tname
965
966 def getsstatefile(tid, siginfo, d):
967 spec, extrapath, tname = getpathcomponents(tid, d)
968 return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
969
970 for tid in sq_data['hash']:
971
972 sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
973
974 if os.path.exists(sstatefile):
975 oe.utils.touch(sstatefile)
976 found.add(tid)
977 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
978 else:
979 missed.add(tid)
980 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
981
982 foundLocal = len(found)
983 mirrors = d.getVar("SSTATE_MIRRORS")
984 if mirrors:
985 # Copy the data object and override DL_DIR and SRC_URI
986 localdata = bb.data.createCopy(d)
987
988 dldir = localdata.expand("${SSTATE_DIR}")
989 localdata.delVar('MIRRORS')
990 localdata.setVar('FILESPATH', dldir)
991 localdata.setVar('DL_DIR', dldir)
992 localdata.setVar('PREMIRRORS', mirrors)
993
994 bb.debug(2, "SState using premirror of: %s" % mirrors)
995
996 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
997 # we'll want to allow network access for the current set of fetches.
998 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
999 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
1000 localdata.delVar('BB_NO_NETWORK')
1001
1002 from bb.fetch2 import FetchConnectionCache
1003 def checkstatus_init():
1004 while not connection_cache_pool.full():
1005 connection_cache_pool.put(FetchConnectionCache())
1006
1007 def checkstatus_end():
1008 while not connection_cache_pool.empty():
1009 connection_cache = connection_cache_pool.get()
1010 connection_cache.close_connections()
1011
1012 def checkstatus(arg):
1013 (tid, sstatefile) = arg
1014
1015 connection_cache = connection_cache_pool.get()
1016 localdata2 = bb.data.createCopy(localdata)
1017 srcuri = "file://" + sstatefile
1018 localdata2.setVar('SRC_URI', srcuri)
1019 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
1020
1021 import traceback
1022
1023 try:
1024 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
1025 connection_cache=connection_cache)
1026 fetcher.checkstatus()
1027 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
1028 found.add(tid)
1029 missed.remove(tid)
1030 except bb.fetch2.FetchError as e:
1031 bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
1032 except Exception as e:
1033 bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
1034
1035 connection_cache_pool.put(connection_cache)
1036
1037 if progress:
1038 bb.event.fire(bb.event.ProcessProgress(msg, next(cnt_tasks_done)), d)
1039 bb.event.check_for_interrupts()
1040
1041 tasklist = []
1042 for tid in missed:
1043 sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1044 tasklist.append((tid, sstatefile))
1045
1046 if tasklist:
1047 nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1048
1049 ## thread-safe counter
1050 cnt_tasks_done = itertools.count(start = 1)
1051 progress = len(tasklist) >= 100
1052 if progress:
1053 msg = "Checking sstate mirror object availability"
1054 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1055
1056 # Have to setup the fetcher environment here rather than in each thread as it would race
1057 fetcherenv = bb.fetch2.get_fetcher_environment(d)
1058 with bb.utils.environment(**fetcherenv):
1059 bb.event.enable_threadlock()
1060 import concurrent.futures
1061 from queue import Queue
1062 connection_cache_pool = Queue(nproc)
1063 checkstatus_init()
1064 with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor:
1065 executor.map(checkstatus, tasklist.copy())
1066 checkstatus_end()
1067 bb.event.disable_threadlock()
1068
1069 if progress:
1070 bb.event.fire(bb.event.ProcessFinished(msg), d)
1071
1072 inheritlist = d.getVar("INHERIT")
1073 if "toaster" in inheritlist:
1074 evdata = {'missed': [], 'found': []};
1075 for tid in missed:
1076 sstatefile = d.expand(getsstatefile(tid, False, d))
1077 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1078 for tid in found:
1079 sstatefile = d.expand(getsstatefile(tid, False, d))
1080 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1081 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1082
1083 if summary:
1084 # Print some summary statistics about the current task completion and how much sstate
1085 # reuse there was. Avoid divide by zero errors.
1086 total = len(sq_data['hash'])
1087 complete = 0
1088 if currentcount:
1089 complete = (len(found) + currentcount) / (total + currentcount) * 100
1090 match = 0
1091 if total:
1092 match = len(found) / total * 100
1093 bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1094 (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1095
1096 if hasattr(bb.parse.siggen, "checkhashes"):
1097 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1098
1099 return found
1100setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT _SSTATE_EXCLUDEDEPS_SYSROOT"
1101
1102BB_SETSCENE_DEPVALID = "setscene_depvalid"
1103
1104def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1105 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1106 # task is included in taskdependees too
1107 # Return - False - We need this dependency
1108 # - True - We can skip this dependency
1109 import re
1110
1111 def logit(msg, log):
1112 if log is not None:
1113 log.append(msg)
1114 else:
1115 bb.debug(2, msg)
1116
1117 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1118
1119 directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx", "do_deploy_archives"]
1120
1121 def isNativeCross(x):
1122 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1123
1124 # We only need to trigger deploy_source_date_epoch through direct dependencies
1125 if taskdependees[task][1] in directtasks:
1126 return True
1127
1128 # We only need to trigger packagedata through direct dependencies
1129 # but need to preserve packagedata on packagedata links
1130 if taskdependees[task][1] == "do_packagedata":
1131 for dep in taskdependees:
1132 if taskdependees[dep][1] == "do_packagedata":
1133 return False
1134 return True
1135
1136 for dep in taskdependees:
1137 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1138 if task == dep:
1139 continue
1140 if dep in notneeded:
1141 continue
1142 # do_package_write_* and do_package doesn't need do_package
1143 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1144 continue
1145 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1146 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1147 return False
1148 # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1149 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1150 continue
1151 # Native/Cross packages don't exist and are noexec anyway
1152 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1153 continue
1154
1155 # Consider sysroot depending on sysroot tasks
1156 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1157 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1158 # specific dependency itself, rather than relying on one of its dependees to pull
1159 # them in.
1160 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1161 not_needed = False
1162 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1163 if excludedeps is None:
1164 # Cache the regular expressions for speed
1165 excludedeps = []
1166 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1167 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1168 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1169 for excl in excludedeps:
1170 if excl[0].match(taskdependees[dep][0]):
1171 if excl[1].match(taskdependees[task][0]):
1172 not_needed = True
1173 break
1174 if not_needed:
1175 continue
1176 # For meta-extsdk-toolchain we want all sysroot dependencies
1177 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1178 return False
1179 # Native/Cross populate_sysroot need their dependencies
1180 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1181 return False
1182 # Target populate_sysroot depended on by cross tools need to be installed
1183 if isNativeCross(taskdependees[dep][0]):
1184 return False
1185 # Native/cross tools depended upon by target sysroot are not needed
1186 # Add an exception for shadow-native as required by useradd.bbclass
1187 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1188 continue
1189 # Target populate_sysroot need their dependencies
1190 return False
1191
1192 if taskdependees[dep][1] in directtasks:
1193 continue
1194
1195 # Safe fallthrough default
1196 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1197 return False
1198 return True
1199
1200addhandler sstate_eventhandler
1201sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1202python sstate_eventhandler() {
1203 d = e.data
1204 writtensstate = d.getVar('SSTATE_CURRTASK')
1205 if not writtensstate:
1206 taskname = d.getVar("BB_RUNTASK")[3:]
1207 spec = d.getVar('SSTATE_PKGSPEC')
1208 swspec = d.getVar('SSTATE_SWSPEC')
1209 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1210 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1211 d.setVar("SSTATE_EXTRAPATH", "")
1212 d.setVar("SSTATE_CURRTASK", taskname)
1213 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1214 if not os.path.exists(siginfo):
1215 bb.siggen.dump_this_task(siginfo, d)
1216 else:
1217 oe.utils.touch(siginfo)
1218}
1219
1220SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1221
1222#
1223# Event handler which removes manifests and stamps file for recipes which are no
1224# longer 'reachable' in a build where they once were. 'Reachable' refers to
1225# whether a recipe is parsed so recipes in a layer which was removed would no
1226# longer be reachable. Switching between systemd and sysvinit where recipes
1227# became skipped would be another example.
1228#
1229# Also optionally removes the workdir of those tasks/recipes
1230#
1231addhandler sstate_eventhandler_reachablestamps
1232sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1233python sstate_eventhandler_reachablestamps() {
1234 import glob
1235 d = e.data
1236 stamps = e.stamps.values()
1237 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1238 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1239 preservestamps = []
1240 if os.path.exists(preservestampfile):
1241 with open(preservestampfile, 'r') as f:
1242 preservestamps = f.readlines()
1243 seen = []
1244
1245 # The machine index contains all the stamps this machine has ever seen in this build directory.
1246 # We should only remove things which this machine once accessed but no longer does.
1247 machineindex = set()
1248 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1249 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1250 if os.path.exists(mi):
1251 with open(mi, "r") as f:
1252 machineindex = set(line.strip() for line in f.readlines())
1253
1254 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1255 toremove = []
1256 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1257 if not os.path.exists(i):
1258 continue
1259 manseen = set()
1260 ignore = []
1261 with open(i, "r") as f:
1262 lines = f.readlines()
1263 for l in reversed(lines):
1264 try:
1265 (stamp, manifest, workdir) = l.split()
1266 # The index may have multiple entries for the same manifest as the code above only appends
1267 # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1268 # The last entry in the list is the valid one, any earlier entries with matching manifests
1269 # should be ignored.
1270 if manifest in manseen:
1271 ignore.append(l)
1272 continue
1273 manseen.add(manifest)
1274 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1275 toremove.append(l)
1276 if stamp not in seen:
1277 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1278 seen.append(stamp)
1279 except ValueError:
1280 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1281
1282 if toremove:
1283 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1284 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1285
1286 removed = 0
1287 for r in toremove:
1288 (stamp, manifest, workdir) = r.split()
1289 for m in glob.glob(manifest + ".*"):
1290 if m.endswith(".postrm"):
1291 continue
1292 sstate_clean_manifest(m, d)
1293 bb.utils.remove(stamp + "*")
1294 if removeworkdir:
1295 bb.utils.remove(workdir, recurse = True)
1296 lines.remove(r)
1297 removed = removed + 1
1298 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1299 bb.event.check_for_interrupts()
1300
1301 bb.event.fire(bb.event.ProcessFinished(msg), d)
1302
1303 with open(i, "w") as f:
1304 for l in lines:
1305 if l in ignore:
1306 continue
1307 f.write(l)
1308 machineindex |= set(stamps)
1309 with open(mi, "w") as f:
1310 for l in machineindex:
1311 f.write(l + "\n")
1312
1313 if preservestamps:
1314 os.remove(preservestampfile)
1315}
1316
1317
1318#
1319# Bitbake can generate an event showing which setscene tasks are 'stale',
1320# i.e. which ones will be rerun. These are ones where a stamp file is present but
1321# it is stable (e.g. taskhash doesn't match). With that list we can go through
1322# the manifests for matching tasks and "uninstall" those manifests now. We do
1323# this now rather than mid build since the distribution of files between sstate
1324# objects may have changed, new tasks may run first and if those new tasks overlap
1325# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1326# removing these files is fast.
1327#
1328addhandler sstate_eventhandler_stalesstate
1329sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1330python sstate_eventhandler_stalesstate() {
1331 d = e.data
1332 tasks = e.tasks
1333
1334 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1335
1336 for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1337 toremove = []
1338 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1339 if not os.path.exists(i):
1340 continue
1341 with open(i, "r") as f:
1342 lines = f.readlines()
1343 for l in lines:
1344 try:
1345 (stamp, manifest, workdir) = l.split()
1346 for tid in tasks:
1347 for s in tasks[tid]:
1348 if s.startswith(stamp):
1349 taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1350 manname = manifest + "." + taskname
1351 if os.path.exists(manname):
1352 bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1353 toremove.append((manname, tid, tasks[tid]))
1354 break
1355 except ValueError:
1356 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1357
1358 if toremove:
1359 msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1360 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1361
1362 removed = 0
1363 for (manname, tid, stamps) in toremove:
1364 sstate_clean_manifest(manname, d)
1365 for stamp in stamps:
1366 bb.utils.remove(stamp)
1367 removed = removed + 1
1368 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1369 bb.event.check_for_interrupts()
1370
1371 bb.event.fire(bb.event.ProcessFinished(msg), d)
1372}