diff options
author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2022-11-21 13:10:19 +0000 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2022-11-22 15:28:27 +0000 |
commit | 5fdd28e37face59410da781d5d70178565e69e5e (patch) | |
tree | ee6cb4abc5ffa8324d4b918f4400a2680558fdf1 /bitbake | |
parent | 09da786273e56225aaf35ec33975e6674c7dec70 (diff) | |
download | poky-5fdd28e37face59410da781d5d70178565e69e5e.tar.gz |
bitbake: runqueue: Fix race issues around hash equivalence and sstate reuse
We identified a use case where a native recipe (autoconf-native) was
rebuilt with no change in output yet the sstate for do_package tasks
wasn't being used.
The issue is that do_package tasks have a hard dependency on
pseudo-native:do_populate_sysroot. That task was one of the many
tasks being rehashed when autoconf-native's hash was changed.
If update_tasks processed a recipe before it had processed pseudo-native,
that recipe would be marked as not possible from sstate and would
run the full tasks.
The fix is to split the processing into two passes, first to handle
the existing covered/notcovered updates, then in the second pass,
check whether there are "harddep" issues.
This defers the do_package tasks until after pseudo-native is installed
from sstate as expected and everything works well again.
(Bitbake rev: e479d1e418a7d34f0a4663b4a0e22bb11503c8ab)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'bitbake')
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 36 |
1 files changed, 20 insertions, 16 deletions
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index bcaaf70abd..2c1714da9d 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -2508,17 +2508,6 @@ class RunQueueExecute: | |||
2508 | self.sq_buildable.remove(tid) | 2508 | self.sq_buildable.remove(tid) |
2509 | if tid in self.sq_running: | 2509 | if tid in self.sq_running: |
2510 | self.sq_running.remove(tid) | 2510 | self.sq_running.remove(tid) |
2511 | harddepfail = False | ||
2512 | for t in self.sqdata.sq_harddeps: | ||
2513 | if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered: | ||
2514 | harddepfail = True | ||
2515 | break | ||
2516 | if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered): | ||
2517 | if tid not in self.sq_buildable: | ||
2518 | self.sq_buildable.add(tid) | ||
2519 | if not self.sqdata.sq_revdeps[tid]: | ||
2520 | self.sq_buildable.add(tid) | ||
2521 | |||
2522 | if tid in self.sqdata.outrightfail: | 2511 | if tid in self.sqdata.outrightfail: |
2523 | self.sqdata.outrightfail.remove(tid) | 2512 | self.sqdata.outrightfail.remove(tid) |
2524 | if tid in self.scenequeue_notcovered: | 2513 | if tid in self.scenequeue_notcovered: |
@@ -2537,21 +2526,36 @@ class RunQueueExecute: | |||
2537 | if tid in self.build_stamps: | 2526 | if tid in self.build_stamps: |
2538 | del self.build_stamps[tid] | 2527 | del self.build_stamps[tid] |
2539 | 2528 | ||
2540 | update_tasks.append((tid, harddepfail, tid in self.sqdata.valid)) | 2529 | update_tasks.append(tid) |
2530 | |||
2531 | update_tasks2 = [] | ||
2532 | for tid in update_tasks: | ||
2533 | harddepfail = False | ||
2534 | for t in self.sqdata.sq_harddeps: | ||
2535 | if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered: | ||
2536 | harddepfail = True | ||
2537 | break | ||
2538 | if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered): | ||
2539 | if tid not in self.sq_buildable: | ||
2540 | self.sq_buildable.add(tid) | ||
2541 | if not self.sqdata.sq_revdeps[tid]: | ||
2542 | self.sq_buildable.add(tid) | ||
2543 | |||
2544 | update_tasks2.append((tid, harddepfail, tid in self.sqdata.valid)) | ||
2541 | 2545 | ||
2542 | if update_tasks: | 2546 | if update_tasks2: |
2543 | self.sqdone = False | 2547 | self.sqdone = False |
2544 | for mc in sorted(self.sqdata.multiconfigs): | 2548 | for mc in sorted(self.sqdata.multiconfigs): |
2545 | for tid in sorted([t[0] for t in update_tasks]): | 2549 | for tid in sorted([t[0] for t in update_tasks2]): |
2546 | if mc_from_tid(tid) != mc: | 2550 | if mc_from_tid(tid) != mc: |
2547 | continue | 2551 | continue |
2548 | h = pending_hash_index(tid, self.rqdata) | 2552 | h = pending_hash_index(tid, self.rqdata) |
2549 | if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]: | 2553 | if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]: |
2550 | self.sq_deferred[tid] = self.sqdata.hashes[h] | 2554 | self.sq_deferred[tid] = self.sqdata.hashes[h] |
2551 | bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h])) | 2555 | bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h])) |
2552 | update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False) | 2556 | update_scenequeue_data([t[0] for t in update_tasks2], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False) |
2553 | 2557 | ||
2554 | for (tid, harddepfail, origvalid) in update_tasks: | 2558 | for (tid, harddepfail, origvalid) in update_tasks2: |
2555 | if tid in self.sqdata.valid and not origvalid: | 2559 | if tid in self.sqdata.valid and not origvalid: |
2556 | hashequiv_logger.verbose("Setscene task %s became valid" % tid) | 2560 | hashequiv_logger.verbose("Setscene task %s became valid" % tid) |
2557 | if harddepfail: | 2561 | if harddepfail: |