summaryrefslogtreecommitdiffstats
path: root/meta/classes/sstate.bbclass
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes/sstate.bbclass')
-rw-r--r--meta/classes/sstate.bbclass1225
1 files changed, 0 insertions, 1225 deletions
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
deleted file mode 100644
index f579168162..0000000000
--- a/meta/classes/sstate.bbclass
+++ /dev/null
@@ -1,1225 +0,0 @@
1SSTATE_VERSION = "3"
2
3SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
4SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
5
6def generate_sstatefn(spec, hash, taskname, siginfo, d):
7 if taskname is None:
8 return ""
9 extension = ".tgz"
10 # 8 chars reserved for siginfo
11 limit = 254 - 8
12 if siginfo:
13 limit = 254
14 extension = ".tgz.siginfo"
15 if not hash:
16 hash = "INVALID"
17 fn = spec + hash + "_" + taskname + extension
18 # If the filename is too long, attempt to reduce it
19 if len(fn) > limit:
20 components = spec.split(":")
21 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
22 # 7 is for the separators
23 avail = (254 - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
24 components[2] = components[2][:avail]
25 components[3] = components[3][:avail]
26 components[4] = components[4][:avail]
27 spec = ":".join(components)
28 fn = spec + hash + "_" + taskname + extension
29 if len(fn) > limit:
30 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
31 return hash[:2] + "/" + hash[2:4] + "/" + fn
32
33SSTATE_PKGARCH = "${PACKAGE_ARCH}"
34SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
35SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
36SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
37SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
38SSTATE_EXTRAPATH = ""
39SSTATE_EXTRAPATHWILDCARD = ""
40SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tgz*"
41
42# explicitly make PV to depend on evaluated value of PV variable
43PV[vardepvalue] = "${PV}"
44
45# We don't want the sstate to depend on things like the distro string
46# of the system, we let the sstate paths take care of this.
47SSTATE_EXTRAPATH[vardepvalue] = ""
48SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
49
50# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
51SSTATE_DUPWHITELIST = "${DEPLOY_DIR}/licenses/"
52# Avoid docbook/sgml catalog warnings for now
53SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
54# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
55SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
56SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
57# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
58SSTATE_DUPWHITELIST += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
59# Archive the sources for many architectures in one deploy folder
60SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
61# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
62SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/ovmf"
63SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/grub-efi"
64SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/systemd-boot"
65SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/microcode"
66
67SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
68SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
69SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
70
71BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
72
73SSTATE_ARCHS = " \
74 ${BUILD_ARCH} \
75 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
76 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
77 ${BUILD_ARCH}_${TARGET_ARCH} \
78 ${SDK_ARCH}_${SDK_OS} \
79 ${SDK_ARCH}_${PACKAGE_ARCH} \
80 allarch \
81 ${PACKAGE_ARCH} \
82 ${PACKAGE_EXTRA_ARCHS} \
83 ${MACHINE_ARCH}"
84SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
85
86SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
87
88SSTATECREATEFUNCS = "sstate_hardcode_path"
89SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
90SSTATEPOSTCREATEFUNCS = ""
91SSTATEPREINSTFUNCS = ""
92SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
93SSTATEPOSTINSTFUNCS = ""
94EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
95
96# Check whether sstate exists for tasks that support sstate and are in the
97# locked signatures file.
98SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
99
100# Check whether the task's computed hash matches the task's hash in the
101# locked signatures file.
102SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
103
104# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
105# not sign)
106SSTATE_SIG_KEY ?= ""
107SSTATE_SIG_PASSPHRASE ?= ""
108# Whether to verify the GnUPG signatures when extracting sstate archives
109SSTATE_VERIFY_SIG ?= "0"
110
111SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
112SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
113 the output hash for a task, which in turn is used to determine equivalency. \
114 "
115
116SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
117SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
118 hash equivalency server, such as PN, PV, taskname, etc. This information \
119 is very useful for developers looking at task data, but may leak sensitive \
120 data if the equivalence server is public. \
121 "
122
123python () {
124 if bb.data.inherits_class('native', d):
125 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
126 if d.getVar("PN") == "pseudo-native":
127 d.appendVar('SSTATE_PKGARCH', '_${ORIGNATIVELSBSTRING}')
128 elif bb.data.inherits_class('crosssdk', d):
129 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
130 elif bb.data.inherits_class('cross', d):
131 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}"))
132 elif bb.data.inherits_class('nativesdk', d):
133 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
134 elif bb.data.inherits_class('cross-canadian', d):
135 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
136 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
137 d.setVar('SSTATE_PKGARCH', "allarch")
138 else:
139 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
140
141 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
142 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
143 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
144 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
145
146 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
147 d.setVar('SSTATETASKS', " ".join(unique_tasks))
148 for task in unique_tasks:
149 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
150 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
151}
152
153def sstate_init(task, d):
154 ss = {}
155 ss['task'] = task
156 ss['dirs'] = []
157 ss['plaindirs'] = []
158 ss['lockfiles'] = []
159 ss['lockfiles-shared'] = []
160 return ss
161
162def sstate_state_fromvars(d, task = None):
163 if task is None:
164 task = d.getVar('BB_CURRENTTASK')
165 if not task:
166 bb.fatal("sstate code running without task context?!")
167 task = task.replace("_setscene", "")
168
169 if task.startswith("do_"):
170 task = task[3:]
171 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
172 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
173 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
174 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
175 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
176 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
177 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
178 if not task or len(inputs) != len(outputs):
179 bb.fatal("sstate variables not setup correctly?!")
180
181 if task == "populate_lic":
182 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
183 d.setVar("SSTATE_EXTRAPATH", "")
184 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
185
186 ss = sstate_init(task, d)
187 for i in range(len(inputs)):
188 sstate_add(ss, inputs[i], outputs[i], d)
189 ss['lockfiles'] = lockfiles
190 ss['lockfiles-shared'] = lockfilesshared
191 ss['plaindirs'] = plaindirs
192 ss['interceptfuncs'] = interceptfuncs
193 ss['fixmedir'] = fixmedir
194 return ss
195
196def sstate_add(ss, source, dest, d):
197 if not source.endswith("/"):
198 source = source + "/"
199 if not dest.endswith("/"):
200 dest = dest + "/"
201 source = os.path.normpath(source)
202 dest = os.path.normpath(dest)
203 srcbase = os.path.basename(source)
204 ss['dirs'].append([srcbase, source, dest])
205 return ss
206
207def sstate_install(ss, d):
208 import oe.path
209 import oe.sstatesig
210 import subprocess
211
212 sharedfiles = []
213 shareddirs = []
214 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
215
216 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
217
218 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
219
220 if os.access(manifest, os.R_OK):
221 bb.fatal("Package already staged (%s)?!" % manifest)
222
223 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
224
225 locks = []
226 for lock in ss['lockfiles-shared']:
227 locks.append(bb.utils.lockfile(lock, True))
228 for lock in ss['lockfiles']:
229 locks.append(bb.utils.lockfile(lock))
230
231 for state in ss['dirs']:
232 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
233 for walkroot, dirs, files in os.walk(state[1]):
234 for file in files:
235 srcpath = os.path.join(walkroot, file)
236 dstpath = srcpath.replace(state[1], state[2])
237 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
238 sharedfiles.append(dstpath)
239 for dir in dirs:
240 srcdir = os.path.join(walkroot, dir)
241 dstdir = srcdir.replace(state[1], state[2])
242 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
243 if os.path.islink(srcdir):
244 sharedfiles.append(dstdir)
245 continue
246 if not dstdir.endswith("/"):
247 dstdir = dstdir + "/"
248 shareddirs.append(dstdir)
249
250 # Check the file list for conflicts against files which already exist
251 whitelist = (d.getVar("SSTATE_DUPWHITELIST") or "").split()
252 match = []
253 for f in sharedfiles:
254 if os.path.exists(f) and not os.path.islink(f):
255 f = os.path.normpath(f)
256 realmatch = True
257 for w in whitelist:
258 w = os.path.normpath(w)
259 if f.startswith(w):
260 realmatch = False
261 break
262 if realmatch:
263 match.append(f)
264 sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
265 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
266 if search_output:
267 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
268 else:
269 match.append(" (not matched to any task)")
270 if match:
271 bb.error("The recipe %s is trying to install files into a shared " \
272 "area when those files already exist. Those files and their manifest " \
273 "location are:\n %s\nPlease verify which recipe should provide the " \
274 "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
275 "break things - if not now, possibly in the future (we've seen builds fail " \
276 "several months later). If the system knew how to recover from this " \
277 "automatically it would, however there are several different scenarios " \
278 "which can result in this and we don't know which one this is. It may be " \
279 "you have switched providers of something like virtual/kernel (e.g. from " \
280 "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
281 "clean task for both recipes and it will resolve this error. It may be " \
282 "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
283 "those recipes should again resolve this error, however switching " \
284 "DISTRO_FEATURES on an existing build directory is not supported - you " \
285 "should really clean out tmp and rebuild (reusing sstate should be safe). " \
286 "It could be the overlapping files detected are harmless in which case " \
287 "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
288 "also be your build is including two different conflicting versions of " \
289 "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
290 "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
291 "sharing the error and filelist above." % \
292 (d.getVar('PN'), "\n ".join(match)))
293 bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
294
295 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
296 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
297 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
298
299 # Write out the manifest
300 f = open(manifest, "w")
301 for file in sharedfiles:
302 f.write(file + "\n")
303
304 # We want to ensure that directories appear at the end of the manifest
305 # so that when we test to see if they should be deleted any contents
306 # added by the task will have been removed first.
307 dirs = sorted(shareddirs, key=len)
308 # Must remove children first, which will have a longer path than the parent
309 for di in reversed(dirs):
310 f.write(di + "\n")
311 f.close()
312
313 # Append to the list of manifests for this PACKAGE_ARCH
314
315 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
316 l = bb.utils.lockfile(i + ".lock")
317 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
318 manifests = []
319 if os.path.exists(i):
320 with open(i, "r") as f:
321 manifests = f.readlines()
322 if filedata not in manifests:
323 with open(i, "a+") as f:
324 f.write(filedata)
325 bb.utils.unlockfile(l)
326
327 # Run the actual file install
328 for state in ss['dirs']:
329 if os.path.exists(state[1]):
330 oe.path.copyhardlinktree(state[1], state[2])
331
332 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
333 # All hooks should run in the SSTATE_INSTDIR
334 bb.build.exec_func(postinst, d, (sstateinst,))
335
336 for lock in locks:
337 bb.utils.unlockfile(lock)
338
339sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX"
340sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
341
342def sstate_installpkg(ss, d):
343 from oe.gpg_sign import get_signer
344
345 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
346 d.setVar("SSTATE_CURRTASK", ss['task'])
347 sstatefetch = d.getVar('SSTATE_PKGNAME')
348 sstatepkg = d.getVar('SSTATE_PKG')
349
350 if not os.path.exists(sstatepkg):
351 pstaging_fetch(sstatefetch, d)
352
353 if not os.path.isfile(sstatepkg):
354 bb.note("Sstate package %s does not exist" % sstatepkg)
355 return False
356
357 sstate_clean(ss, d)
358
359 d.setVar('SSTATE_INSTDIR', sstateinst)
360
361 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
362 if not os.path.isfile(sstatepkg + '.sig'):
363 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
364 return False
365 signer = get_signer(d, 'local')
366 if not signer.verify(sstatepkg + '.sig'):
367 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
368 return False
369
370 # Empty sstateinst directory, ensure its clean
371 if os.path.exists(sstateinst):
372 oe.path.remove(sstateinst)
373 bb.utils.mkdirhier(sstateinst)
374
375 sstateinst = d.getVar("SSTATE_INSTDIR")
376 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
377
378 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
379 # All hooks should run in the SSTATE_INSTDIR
380 bb.build.exec_func(f, d, (sstateinst,))
381
382 return sstate_installpkgdir(ss, d)
383
384def sstate_installpkgdir(ss, d):
385 import oe.path
386 import subprocess
387
388 sstateinst = d.getVar("SSTATE_INSTDIR")
389 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
390
391 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
392 # All hooks should run in the SSTATE_INSTDIR
393 bb.build.exec_func(f, d, (sstateinst,))
394
395 def prepdir(dir):
396 # remove dir if it exists, ensure any parent directories do exist
397 if os.path.exists(dir):
398 oe.path.remove(dir)
399 bb.utils.mkdirhier(dir)
400 oe.path.remove(dir)
401
402 for state in ss['dirs']:
403 prepdir(state[1])
404 os.rename(sstateinst + state[0], state[1])
405 sstate_install(ss, d)
406
407 for plain in ss['plaindirs']:
408 workdir = d.getVar('WORKDIR')
409 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
410 src = sstateinst + "/" + plain.replace(workdir, '')
411 if sharedworkdir in plain:
412 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
413 dest = plain
414 bb.utils.mkdirhier(src)
415 prepdir(dest)
416 os.rename(src, dest)
417
418 return True
419
420python sstate_hardcode_path_unpack () {
421 # Fixup hardcoded paths
422 #
423 # Note: The logic below must match the reverse logic in
424 # sstate_hardcode_path(d)
425 import subprocess
426
427 sstateinst = d.getVar('SSTATE_INSTDIR')
428 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
429 fixmefn = sstateinst + "fixmepath"
430 if os.path.isfile(fixmefn):
431 staging_target = d.getVar('RECIPE_SYSROOT')
432 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
433
434 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
435 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
436 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
437 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
438 else:
439 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
440
441 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
442 for fixmevar in extra_staging_fixmes.split():
443 fixme_path = d.getVar(fixmevar)
444 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
445
446 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
447 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
448
449 # Defer do_populate_sysroot relocation command
450 if sstatefixmedir:
451 bb.utils.mkdirhier(sstatefixmedir)
452 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
453 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
454 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
455 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
456 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
457 f.write(sstate_hardcode_cmd)
458 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
459 return
460
461 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
462 subprocess.check_call(sstate_hardcode_cmd, shell=True)
463
464 # Need to remove this or we'd copy it into the target directory and may
465 # conflict with another writer
466 os.remove(fixmefn)
467}
468
469def sstate_clean_cachefile(ss, d):
470 import oe.path
471
472 if d.getVarFlag('do_%s' % ss['task'], 'task'):
473 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
474 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
475 bb.note("Removing %s" % sstatepkgfile)
476 oe.path.remove(sstatepkgfile)
477
478def sstate_clean_cachefiles(d):
479 for task in (d.getVar('SSTATETASKS') or "").split():
480 ld = d.createCopy()
481 ss = sstate_state_fromvars(ld, task)
482 sstate_clean_cachefile(ss, ld)
483
484def sstate_clean_manifest(manifest, d, prefix=None):
485 import oe.path
486
487 mfile = open(manifest)
488 entries = mfile.readlines()
489 mfile.close()
490
491 for entry in entries:
492 entry = entry.strip()
493 if prefix and not entry.startswith("/"):
494 entry = prefix + "/" + entry
495 bb.debug(2, "Removing manifest: %s" % entry)
496 # We can race against another package populating directories as we're removing them
497 # so we ignore errors here.
498 try:
499 if entry.endswith("/"):
500 if os.path.islink(entry[:-1]):
501 os.remove(entry[:-1])
502 elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
503 os.rmdir(entry[:-1])
504 else:
505 os.remove(entry)
506 except OSError:
507 pass
508
509 postrm = manifest + ".postrm"
510 if os.path.exists(manifest + ".postrm"):
511 import subprocess
512 os.chmod(postrm, 0o755)
513 subprocess.check_call(postrm, shell=True)
514 oe.path.remove(postrm)
515
516 oe.path.remove(manifest)
517
518def sstate_clean(ss, d):
519 import oe.path
520 import glob
521
522 d2 = d.createCopy()
523 stamp_clean = d.getVar("STAMPCLEAN")
524 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
525 if extrainf:
526 d2.setVar("SSTATE_MANMACH", extrainf)
527 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
528 else:
529 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
530
531 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
532
533 if os.path.exists(manifest):
534 locks = []
535 for lock in ss['lockfiles-shared']:
536 locks.append(bb.utils.lockfile(lock))
537 for lock in ss['lockfiles']:
538 locks.append(bb.utils.lockfile(lock))
539
540 sstate_clean_manifest(manifest, d)
541
542 for lock in locks:
543 bb.utils.unlockfile(lock)
544
545 # Remove the current and previous stamps, but keep the sigdata.
546 #
547 # The glob() matches do_task* which may match multiple tasks, for
548 # example: do_package and do_package_write_ipk, so we need to
549 # exactly match *.do_task.* and *.do_task_setscene.*
550 rm_stamp = '.do_%s.' % ss['task']
551 rm_setscene = '.do_%s_setscene.' % ss['task']
552 # For BB_SIGNATURE_HANDLER = "noop"
553 rm_nohash = ".do_%s" % ss['task']
554 for stfile in glob.glob(wildcard_stfile):
555 # Keep the sigdata
556 if ".sigdata." in stfile or ".sigbasedata." in stfile:
557 continue
558 # Preserve taint files in the stamps directory
559 if stfile.endswith('.taint'):
560 continue
561 if rm_stamp in stfile or rm_setscene in stfile or \
562 stfile.endswith(rm_nohash):
563 oe.path.remove(stfile)
564
565sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
566
567CLEANFUNCS += "sstate_cleanall"
568
569python sstate_cleanall() {
570 bb.note("Removing shared state for package %s" % d.getVar('PN'))
571
572 manifest_dir = d.getVar('SSTATE_MANIFESTS')
573 if not os.path.exists(manifest_dir):
574 return
575
576 tasks = d.getVar('SSTATETASKS').split()
577 for name in tasks:
578 ld = d.createCopy()
579 shared_state = sstate_state_fromvars(ld, name)
580 sstate_clean(shared_state, ld)
581}
582
583python sstate_hardcode_path () {
584 import subprocess, platform
585
586 # Need to remove hardcoded paths and fix these when we install the
587 # staging packages.
588 #
589 # Note: the logic in this function needs to match the reverse logic
590 # in sstate_installpkg(ss, d)
591
592 staging_target = d.getVar('RECIPE_SYSROOT')
593 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
594 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
595
596 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
597 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
598 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
599 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
600 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
601 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
602 else:
603 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
604 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
605
606 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
607 for fixmevar in extra_staging_fixmes.split():
608 fixme_path = d.getVar(fixmevar)
609 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
610 sstate_grep_cmd += " -e '%s'" % (fixme_path)
611
612 fixmefn = sstate_builddir + "fixmepath"
613
614 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
615 sstate_filelist_cmd = "tee %s" % (fixmefn)
616
617 # fixmepath file needs relative paths, drop sstate_builddir prefix
618 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
619
620 xargs_no_empty_run_cmd = '--no-run-if-empty'
621 if platform.system() == 'Darwin':
622 xargs_no_empty_run_cmd = ''
623
624 # Limit the fixpaths and sed operations based on the initial grep search
625 # This has the side effect of making sure the vfs cache is hot
626 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
627
628 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
629 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
630
631 # If the fixmefn is empty, remove it..
632 if os.stat(fixmefn).st_size == 0:
633 os.remove(fixmefn)
634 else:
635 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
636 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
637}
638
639def sstate_package(ss, d):
640 import oe.path
641
642 tmpdir = d.getVar('TMPDIR')
643
644 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
645 d.setVar("SSTATE_CURRTASK", ss['task'])
646 bb.utils.remove(sstatebuild, recurse=True)
647 bb.utils.mkdirhier(sstatebuild)
648 for state in ss['dirs']:
649 if not os.path.exists(state[1]):
650 continue
651 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
652 # Find and error for absolute symlinks. We could attempt to relocate but its not
653 # clear where the symlink is relative to in this context. We could add that markup
654 # to sstate tasks but there aren't many of these so better just avoid them entirely.
655 for walkroot, dirs, files in os.walk(state[1]):
656 for file in files + dirs:
657 srcpath = os.path.join(walkroot, file)
658 if not os.path.islink(srcpath):
659 continue
660 link = os.readlink(srcpath)
661 if not os.path.isabs(link):
662 continue
663 if not link.startswith(tmpdir):
664 continue
665 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
666 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
667 os.rename(state[1], sstatebuild + state[0])
668
669 workdir = d.getVar('WORKDIR')
670 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
671 for plain in ss['plaindirs']:
672 pdir = plain.replace(workdir, sstatebuild)
673 if sharedworkdir in plain:
674 pdir = plain.replace(sharedworkdir, sstatebuild)
675 bb.utils.mkdirhier(plain)
676 bb.utils.mkdirhier(pdir)
677 os.rename(plain, pdir)
678
679 d.setVar('SSTATE_BUILDDIR', sstatebuild)
680 d.setVar('SSTATE_INSTDIR', sstatebuild)
681
682 if d.getVar('SSTATE_SKIP_CREATION') == '1':
683 return
684
685 sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
686 if d.getVar('SSTATE_SIG_KEY'):
687 sstate_create_package.append('sstate_sign_package')
688
689 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
690 sstate_create_package + \
691 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
692 # All hooks should run in SSTATE_BUILDDIR.
693 bb.build.exec_func(f, d, (sstatebuild,))
694
695 # SSTATE_PKG may have been changed by sstate_report_unihash
696 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
697 if not os.path.exists(siginfo):
698 bb.siggen.dump_this_task(siginfo, d)
699 else:
700 try:
701 os.utime(siginfo, None)
702 except PermissionError:
703 pass
704
705 return
706
707def pstaging_fetch(sstatefetch, d):
708 import bb.fetch2
709
710 # Only try and fetch if the user has configured a mirror
711 mirrors = d.getVar('SSTATE_MIRRORS')
712 if not mirrors:
713 return
714
715 # Copy the data object and override DL_DIR and SRC_URI
716 localdata = bb.data.createCopy(d)
717
718 dldir = localdata.expand("${SSTATE_DIR}")
719 bb.utils.mkdirhier(dldir)
720
721 localdata.delVar('MIRRORS')
722 localdata.setVar('FILESPATH', dldir)
723 localdata.setVar('DL_DIR', dldir)
724 localdata.setVar('PREMIRRORS', mirrors)
725
726 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
727 # we'll want to allow network access for the current set of fetches.
728 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
729 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
730 localdata.delVar('BB_NO_NETWORK')
731
732 # Try a fetch from the sstate mirror, if it fails just return and
733 # we will build the package
734 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
735 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
736 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
737 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
738
739 for srcuri in uris:
740 localdata.setVar('SRC_URI', srcuri)
741 try:
742 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
743 fetcher.checkstatus()
744 fetcher.download()
745
746 except bb.fetch2.BBFetchException:
747 pass
748
749def sstate_setscene(d):
750 shared_state = sstate_state_fromvars(d)
751 accelerate = sstate_installpkg(shared_state, d)
752 if not accelerate:
753 bb.fatal("No suitable staging package found")
754
755python sstate_task_prefunc () {
756 shared_state = sstate_state_fromvars(d)
757 sstate_clean(shared_state, d)
758}
759sstate_task_prefunc[dirs] = "${WORKDIR}"
760
761python sstate_task_postfunc () {
762 shared_state = sstate_state_fromvars(d)
763
764 for intercept in shared_state['interceptfuncs']:
765 bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
766
767 omask = os.umask(0o002)
768 if omask != 0o002:
769 bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
770 sstate_package(shared_state, d)
771 os.umask(omask)
772
773 sstateinst = d.getVar("SSTATE_INSTDIR")
774 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
775
776 sstate_installpkgdir(shared_state, d)
777
778 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
779}
780sstate_task_postfunc[dirs] = "${WORKDIR}"
781
782
783#
784# Shell function to generate a sstate package from a directory
785# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
786#
787sstate_create_package () {
788 # Exit early if it already exists
789 if [ -e ${SSTATE_PKG} ]; then
790 [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
791 return
792 fi
793
794 mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
795 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
796
797 # Use pigz if available
798 OPT="-czS"
799 if [ -x "$(command -v pigz)" ]; then
800 OPT="-I pigz -cS"
801 fi
802
803 # Need to handle empty directories
804 if [ "$(ls -A)" ]; then
805 set +e
806 tar $OPT -f $TFILE *
807 ret=$?
808 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
809 exit 1
810 fi
811 set -e
812 else
813 tar $OPT --file=$TFILE --files-from=/dev/null
814 fi
815 chmod 0664 $TFILE
816 # Skip if it was already created by some other process
817 if [ ! -e ${SSTATE_PKG} ]; then
818 # Move into place using ln to attempt an atomic op.
819 # Abort if it already exists
820 ln $TFILE ${SSTATE_PKG} && rm $TFILE
821 else
822 rm $TFILE
823 fi
824 [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
825}
826
827python sstate_sign_package () {
828 from oe.gpg_sign import get_signer
829
830
831 signer = get_signer(d, 'local')
832 sstate_pkg = d.getVar('SSTATE_PKG')
833 if os.path.exists(sstate_pkg + '.sig'):
834 os.unlink(sstate_pkg + '.sig')
835 signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
836 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
837}
838
839python sstate_report_unihash() {
840 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
841
842 if report_unihash:
843 ss = sstate_state_fromvars(d)
844 report_unihash(os.getcwd(), ss['task'], d)
845}
846
847#
848# Shell function to decompress and prepare a package for installation
849# Will be run from within SSTATE_INSTDIR.
850#
851sstate_unpack_package () {
852 tar -xvzf ${SSTATE_PKG}
853 # update .siginfo atime on local/NFS mirror
854 [ -O ${SSTATE_PKG}.siginfo ] && [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo
855 # Use "! -w ||" to return true for read only files
856 [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
857 [ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
858 [ ! -w ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo
859}
860
861BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
862
863def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
864 found = set()
865 foundLocal = set()
866 foundNet = set()
867 missed = set()
868
869 def gethash(task):
870 return sq_data['unihash'][task]
871
872 def getpathcomponents(task, d):
873 # Magic data from BB_HASHFILENAME
874 splithashfn = sq_data['hashfn'][task].split(" ")
875 spec = splithashfn[1]
876 if splithashfn[0] == "True":
877 extrapath = d.getVar("NATIVELSBSTRING") + "/"
878 else:
879 extrapath = ""
880
881 tname = bb.runqueue.taskname_from_tid(task)[3:]
882
883 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
884 spec = splithashfn[2]
885 extrapath = ""
886
887 return spec, extrapath, tname
888
889
890 for tid in sq_data['hash']:
891
892 spec, extrapath, tname = getpathcomponents(tid, d)
893
894 sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
895
896 if os.path.exists(sstatefile):
897 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
898 found.add(tid)
899 foundLocal.add(tid)
900 continue
901 else:
902 missed.add(tid)
903 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
904
905 mirrors = d.getVar("SSTATE_MIRRORS")
906 if mirrors:
907 # Copy the data object and override DL_DIR and SRC_URI
908 localdata = bb.data.createCopy(d)
909
910 dldir = localdata.expand("${SSTATE_DIR}")
911 localdata.delVar('MIRRORS')
912 localdata.setVar('FILESPATH', dldir)
913 localdata.setVar('DL_DIR', dldir)
914 localdata.setVar('PREMIRRORS', mirrors)
915
916 bb.debug(2, "SState using premirror of: %s" % mirrors)
917
918 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
919 # we'll want to allow network access for the current set of fetches.
920 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
921 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
922 localdata.delVar('BB_NO_NETWORK')
923
924 from bb.fetch2 import FetchConnectionCache
925 def checkstatus_init(thread_worker):
926 thread_worker.connection_cache = FetchConnectionCache()
927
928 def checkstatus_end(thread_worker):
929 thread_worker.connection_cache.close_connections()
930
931 def checkstatus(thread_worker, arg):
932 (tid, sstatefile) = arg
933
934 localdata2 = bb.data.createCopy(localdata)
935 srcuri = "file://" + sstatefile
936 localdata.setVar('SRC_URI', srcuri)
937 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
938
939 try:
940 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
941 connection_cache=thread_worker.connection_cache)
942 fetcher.checkstatus()
943 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
944 found.add(tid)
945 foundNet.add(tid)
946 if tid in missed:
947 missed.remove(tid)
948 except:
949 missed.add(tid)
950 bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
951 pass
952 if len(tasklist) >= min_tasks:
953 bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
954
955 tasklist = []
956 min_tasks = 100
957 for tid in sq_data['hash']:
958 if tid in found:
959 continue
960 spec, extrapath, tname = getpathcomponents(tid, d)
961 sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
962 tasklist.append((tid, sstatefile))
963
964 if tasklist:
965 if len(tasklist) >= min_tasks:
966 msg = "Checking sstate mirror object availability"
967 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
968
969 import multiprocessing
970 nproc = min(multiprocessing.cpu_count(), len(tasklist))
971
972 bb.event.enable_threadlock()
973 pool = oe.utils.ThreadedPool(nproc, len(tasklist),
974 worker_init=checkstatus_init, worker_end=checkstatus_end)
975 for t in tasklist:
976 pool.add_task(checkstatus, t)
977 pool.start()
978 pool.wait_completion()
979 bb.event.disable_threadlock()
980
981 if len(tasklist) >= min_tasks:
982 bb.event.fire(bb.event.ProcessFinished(msg), d)
983
984 inheritlist = d.getVar("INHERIT")
985 if "toaster" in inheritlist:
986 evdata = {'missed': [], 'found': []};
987 for tid in missed:
988 spec, extrapath, tname = getpathcomponents(tid, d)
989 sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
990 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
991 for tid in found:
992 spec, extrapath, tname = getpathcomponents(tid, d)
993 sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
994 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
995 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
996
997 if summary:
998 # Print some summary statistics about the current task completion and how much sstate
999 # reuse there was. Avoid divide by zero errors.
1000 total = len(sq_data['hash'])
1001 complete = 0
1002 if currentcount:
1003 complete = (len(found) + currentcount) / (total + currentcount) * 100
1004 match = 0
1005 if total:
1006 match = len(found) / total * 100
1007 bb.plain("Sstate summary: Wanted %d Local %d Network %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(foundLocal), len(foundNet),len(missed), currentcount, match, complete))
1008
1009 if hasattr(bb.parse.siggen, "checkhashes"):
1010 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1011
1012 return found
1013
1014BB_SETSCENE_DEPVALID = "setscene_depvalid"
1015
1016def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1017 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1018 # task is included in taskdependees too
1019 # Return - False - We need this dependency
1020 # - True - We can skip this dependency
1021 import re
1022
1023 def logit(msg, log):
1024 if log is not None:
1025 log.append(msg)
1026 else:
1027 bb.debug(2, msg)
1028
1029 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1030
1031 def isNativeCross(x):
1032 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1033
1034 # We only need to trigger populate_lic through direct dependencies
1035 if taskdependees[task][1] == "do_populate_lic":
1036 return True
1037
1038 # stash_locale and gcc_stash_builddir are never needed as a dependency for built objects
1039 if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir":
1040 return True
1041
1042 # We only need to trigger packagedata through direct dependencies
1043 # but need to preserve packagedata on packagedata links
1044 if taskdependees[task][1] == "do_packagedata":
1045 for dep in taskdependees:
1046 if taskdependees[dep][1] == "do_packagedata":
1047 return False
1048 return True
1049
1050 for dep in taskdependees:
1051 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1052 if task == dep:
1053 continue
1054 if dep in notneeded:
1055 continue
1056 # do_package_write_* and do_package doesn't need do_package
1057 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1058 continue
1059 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1060 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1061 return False
1062 # do_package/packagedata/package_qa don't need do_populate_sysroot
1063 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa']:
1064 continue
1065 # Native/Cross packages don't exist and are noexec anyway
1066 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1067 continue
1068
1069 # This is due to the [depends] in useradd.bbclass complicating matters
1070 # The logic *is* reversed here due to the way hard setscene dependencies are injected
1071 if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
1072 continue
1073
1074 # Consider sysroot depending on sysroot tasks
1075 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1076 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1077 # specific dependency itself, rather than relying on one of its dependees to pull
1078 # them in.
1079 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1080 not_needed = False
1081 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1082 if excludedeps is None:
1083 # Cache the regular expressions for speed
1084 excludedeps = []
1085 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1086 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1087 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1088 for excl in excludedeps:
1089 if excl[0].match(taskdependees[dep][0]):
1090 if excl[1].match(taskdependees[task][0]):
1091 not_needed = True
1092 break
1093 if not_needed:
1094 continue
1095 # For meta-extsdk-toolchain we want all sysroot dependencies
1096 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1097 return False
1098 # Native/Cross populate_sysroot need their dependencies
1099 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1100 return False
1101 # Target populate_sysroot depended on by cross tools need to be installed
1102 if isNativeCross(taskdependees[dep][0]):
1103 return False
1104 # Native/cross tools depended upon by target sysroot are not needed
1105 # Add an exception for shadow-native as required by useradd.bbclass
1106 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1107 continue
1108 # Target populate_sysroot need their dependencies
1109 return False
1110
1111 if taskdependees[task][1] == 'do_shared_workdir':
1112 continue
1113
1114 if taskdependees[dep][1] == "do_populate_lic":
1115 continue
1116
1117
1118 # Safe fallthrough default
1119 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1120 return False
1121 return True
1122
1123addhandler sstate_eventhandler
1124sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1125python sstate_eventhandler() {
1126 d = e.data
1127 writtensstate = d.getVar('SSTATE_CURRTASK')
1128 if not writtensstate:
1129 taskname = d.getVar("BB_RUNTASK")[3:]
1130 spec = d.getVar('SSTATE_PKGSPEC')
1131 swspec = d.getVar('SSTATE_SWSPEC')
1132 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1133 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1134 d.setVar("SSTATE_EXTRAPATH", "")
1135 d.setVar("SSTATE_CURRTASK", taskname)
1136 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1137 if not os.path.exists(siginfo):
1138 bb.siggen.dump_this_task(siginfo, d)
1139 else:
1140 try:
1141 os.utime(siginfo, None)
1142 except PermissionError:
1143 pass
1144
1145}
1146
1147SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1148
1149# Event handler which removes manifests and stamps file for
1150# recipes which are no longer reachable in a build where they
1151# once were.
1152# Also optionally removes the workdir of those tasks/recipes
1153#
1154addhandler sstate_eventhandler2
1155sstate_eventhandler2[eventmask] = "bb.event.ReachableStamps"
1156python sstate_eventhandler2() {
1157 import glob
1158 d = e.data
1159 stamps = e.stamps.values()
1160 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1161 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1162 preservestamps = []
1163 if os.path.exists(preservestampfile):
1164 with open(preservestampfile, 'r') as f:
1165 preservestamps = f.readlines()
1166 seen = []
1167
1168 # The machine index contains all the stamps this machine has ever seen in this build directory.
1169 # We should only remove things which this machine once accessed but no longer does.
1170 machineindex = set()
1171 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1172 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1173 if os.path.exists(mi):
1174 with open(mi, "r") as f:
1175 machineindex = set(line.strip() for line in f.readlines())
1176
1177 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1178 toremove = []
1179 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1180 if not os.path.exists(i):
1181 continue
1182 with open(i, "r") as f:
1183 lines = f.readlines()
1184 for l in lines:
1185 try:
1186 (stamp, manifest, workdir) = l.split()
1187 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1188 toremove.append(l)
1189 if stamp not in seen:
1190 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1191 seen.append(stamp)
1192 except ValueError:
1193 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1194
1195 if toremove:
1196 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1197 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1198
1199 removed = 0
1200 for r in toremove:
1201 (stamp, manifest, workdir) = r.split()
1202 for m in glob.glob(manifest + ".*"):
1203 if m.endswith(".postrm"):
1204 continue
1205 sstate_clean_manifest(m, d)
1206 bb.utils.remove(stamp + "*")
1207 if removeworkdir:
1208 bb.utils.remove(workdir, recurse = True)
1209 lines.remove(r)
1210 removed = removed + 1
1211 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1212
1213 bb.event.fire(bb.event.ProcessFinished(msg), d)
1214
1215 with open(i, "w") as f:
1216 for l in lines:
1217 f.write(l)
1218 machineindex |= set(stamps)
1219 with open(mi, "w") as f:
1220 for l in machineindex:
1221 f.write(l + "\n")
1222
1223 if preservestamps:
1224 os.remove(preservestampfile)
1225}