summaryrefslogtreecommitdiffstats
path: root/bitbake/lib/bb/runqueue.py
diff options
context:
space:
mode:
Diffstat (limited to 'bitbake/lib/bb/runqueue.py')
-rw-r--r--bitbake/lib/bb/runqueue.py319
1 files changed, 195 insertions, 124 deletions
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py
index bc7e18175d..63d4edd892 100644
--- a/bitbake/lib/bb/runqueue.py
+++ b/bitbake/lib/bb/runqueue.py
@@ -10,10 +10,12 @@ Handles preparation and execution of a queue of tasks
10# 10#
11 11
12import copy 12import copy
13import enum
13import os 14import os
14import sys 15import sys
15import stat 16import stat
16import errno 17import errno
18import itertools
17import logging 19import logging
18import re 20import re
19import bb 21import bb
@@ -124,14 +126,16 @@ class RunQueueStats:
124 def updateActiveSetscene(self, active): 126 def updateActiveSetscene(self, active):
125 self.setscene_active = active 127 self.setscene_active = active
126 128
127# These values indicate the next step due to be run in the 129
128# runQueue state machine 130# Indicates the next step due to run in the runQueue state machine
129runQueuePrepare = 2 131class RunQueueState(enum.Enum):
130runQueueSceneInit = 3 132 PREPARE = 0
131runQueueRunning = 6 133 SCENE_INIT = 1
132runQueueFailed = 7 134 DUMP_SIGS = 2
133runQueueCleanUp = 8 135 RUNNING = 3
134runQueueComplete = 9 136 FAILED = 4
137 CLEAN_UP = 5
138 COMPLETE = 6
135 139
136class RunQueueScheduler(object): 140class RunQueueScheduler(object):
137 """ 141 """
@@ -475,7 +479,6 @@ class RunQueueData:
475 self.runtaskentries = {} 479 self.runtaskentries = {}
476 480
477 def runq_depends_names(self, ids): 481 def runq_depends_names(self, ids):
478 import re
479 ret = [] 482 ret = []
480 for id in ids: 483 for id in ids:
481 nam = os.path.basename(id) 484 nam = os.path.basename(id)
@@ -677,7 +680,7 @@ class RunQueueData:
677 680
678 self.init_progress_reporter.start() 681 self.init_progress_reporter.start()
679 self.init_progress_reporter.next_stage() 682 self.init_progress_reporter.next_stage()
680 bb.event.check_for_interrupts(self.cooker.data) 683 bb.event.check_for_interrupts()
681 684
682 # Step A - Work out a list of tasks to run 685 # Step A - Work out a list of tasks to run
683 # 686 #
@@ -728,6 +731,8 @@ class RunQueueData:
728 if mc == frommc: 731 if mc == frommc:
729 fn = taskData[mcdep].build_targets[pn][0] 732 fn = taskData[mcdep].build_targets[pn][0]
730 newdep = '%s:%s' % (fn,deptask) 733 newdep = '%s:%s' % (fn,deptask)
734 if newdep not in taskData[mcdep].taskentries:
735 bb.fatal("Task mcdepends on non-existent task %s" % (newdep))
731 taskData[mc].taskentries[tid].tdepends.append(newdep) 736 taskData[mc].taskentries[tid].tdepends.append(newdep)
732 737
733 for mc in taskData: 738 for mc in taskData:
@@ -826,7 +831,7 @@ class RunQueueData:
826 #self.dump_data() 831 #self.dump_data()
827 832
828 self.init_progress_reporter.next_stage() 833 self.init_progress_reporter.next_stage()
829 bb.event.check_for_interrupts(self.cooker.data) 834 bb.event.check_for_interrupts()
830 835
831 # Resolve recursive 'recrdeptask' dependencies (Part B) 836 # Resolve recursive 'recrdeptask' dependencies (Part B)
832 # 837 #
@@ -923,7 +928,7 @@ class RunQueueData:
923 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref) 928 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
924 929
925 self.init_progress_reporter.next_stage() 930 self.init_progress_reporter.next_stage()
926 bb.event.check_for_interrupts(self.cooker.data) 931 bb.event.check_for_interrupts()
927 932
928 #self.dump_data() 933 #self.dump_data()
929 934
@@ -1005,7 +1010,7 @@ class RunQueueData:
1005 mark_active(tid, 1) 1010 mark_active(tid, 1)
1006 1011
1007 self.init_progress_reporter.next_stage() 1012 self.init_progress_reporter.next_stage()
1008 bb.event.check_for_interrupts(self.cooker.data) 1013 bb.event.check_for_interrupts()
1009 1014
1010 # Step C - Prune all inactive tasks 1015 # Step C - Prune all inactive tasks
1011 # 1016 #
@@ -1052,7 +1057,7 @@ class RunQueueData:
1052 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets))) 1057 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
1053 1058
1054 self.init_progress_reporter.next_stage() 1059 self.init_progress_reporter.next_stage()
1055 bb.event.check_for_interrupts(self.cooker.data) 1060 bb.event.check_for_interrupts()
1056 1061
1057 # Handle runonly 1062 # Handle runonly
1058 if self.cooker.configuration.runonly: 1063 if self.cooker.configuration.runonly:
@@ -1093,7 +1098,7 @@ class RunQueueData:
1093 logger.verbose("Assign Weightings") 1098 logger.verbose("Assign Weightings")
1094 1099
1095 self.init_progress_reporter.next_stage() 1100 self.init_progress_reporter.next_stage()
1096 bb.event.check_for_interrupts(self.cooker.data) 1101 bb.event.check_for_interrupts()
1097 1102
1098 # Generate a list of reverse dependencies to ease future calculations 1103 # Generate a list of reverse dependencies to ease future calculations
1099 for tid in self.runtaskentries: 1104 for tid in self.runtaskentries:
@@ -1101,7 +1106,7 @@ class RunQueueData:
1101 self.runtaskentries[dep].revdeps.add(tid) 1106 self.runtaskentries[dep].revdeps.add(tid)
1102 1107
1103 self.init_progress_reporter.next_stage() 1108 self.init_progress_reporter.next_stage()
1104 bb.event.check_for_interrupts(self.cooker.data) 1109 bb.event.check_for_interrupts()
1105 1110
1106 # Identify tasks at the end of dependency chains 1111 # Identify tasks at the end of dependency chains
1107 # Error on circular dependency loops (length two) 1112 # Error on circular dependency loops (length two)
@@ -1118,14 +1123,14 @@ class RunQueueData:
1118 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints)) 1123 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
1119 1124
1120 self.init_progress_reporter.next_stage() 1125 self.init_progress_reporter.next_stage()
1121 bb.event.check_for_interrupts(self.cooker.data) 1126 bb.event.check_for_interrupts()
1122 1127
1123 # Calculate task weights 1128 # Calculate task weights
1124 # Check of higher length circular dependencies 1129 # Check of higher length circular dependencies
1125 self.runq_weight = self.calculate_task_weights(endpoints) 1130 self.runq_weight = self.calculate_task_weights(endpoints)
1126 1131
1127 self.init_progress_reporter.next_stage() 1132 self.init_progress_reporter.next_stage()
1128 bb.event.check_for_interrupts(self.cooker.data) 1133 bb.event.check_for_interrupts()
1129 1134
1130 # Sanity Check - Check for multiple tasks building the same provider 1135 # Sanity Check - Check for multiple tasks building the same provider
1131 for mc in self.dataCaches: 1136 for mc in self.dataCaches:
@@ -1226,7 +1231,7 @@ class RunQueueData:
1226 1231
1227 self.init_progress_reporter.next_stage() 1232 self.init_progress_reporter.next_stage()
1228 self.init_progress_reporter.next_stage() 1233 self.init_progress_reporter.next_stage()
1229 bb.event.check_for_interrupts(self.cooker.data) 1234 bb.event.check_for_interrupts()
1230 1235
1231 # Iterate over the task list looking for tasks with a 'setscene' function 1236 # Iterate over the task list looking for tasks with a 'setscene' function
1232 self.runq_setscene_tids = set() 1237 self.runq_setscene_tids = set()
@@ -1239,7 +1244,7 @@ class RunQueueData:
1239 self.runq_setscene_tids.add(tid) 1244 self.runq_setscene_tids.add(tid)
1240 1245
1241 self.init_progress_reporter.next_stage() 1246 self.init_progress_reporter.next_stage()
1242 bb.event.check_for_interrupts(self.cooker.data) 1247 bb.event.check_for_interrupts()
1243 1248
1244 # Invalidate task if force mode active 1249 # Invalidate task if force mode active
1245 if self.cooker.configuration.force: 1250 if self.cooker.configuration.force:
@@ -1256,7 +1261,7 @@ class RunQueueData:
1256 invalidate_task(fn + ":" + st, True) 1261 invalidate_task(fn + ":" + st, True)
1257 1262
1258 self.init_progress_reporter.next_stage() 1263 self.init_progress_reporter.next_stage()
1259 bb.event.check_for_interrupts(self.cooker.data) 1264 bb.event.check_for_interrupts()
1260 1265
1261 # Create and print to the logs a virtual/xxxx -> PN (fn) table 1266 # Create and print to the logs a virtual/xxxx -> PN (fn) table
1262 for mc in taskData: 1267 for mc in taskData:
@@ -1269,31 +1274,45 @@ class RunQueueData:
1269 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc]) 1274 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1270 1275
1271 self.init_progress_reporter.next_stage() 1276 self.init_progress_reporter.next_stage()
1272 bb.event.check_for_interrupts(self.cooker.data) 1277 bb.event.check_for_interrupts()
1273 1278
1274 bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids) 1279 bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids)
1275 1280
1281 starttime = time.time()
1282 lasttime = starttime
1283
1276 # Iterate over the task list and call into the siggen code 1284 # Iterate over the task list and call into the siggen code
1277 dealtwith = set() 1285 dealtwith = set()
1278 todeal = set(self.runtaskentries) 1286 todeal = set(self.runtaskentries)
1279 while todeal: 1287 while todeal:
1288 ready = set()
1280 for tid in todeal.copy(): 1289 for tid in todeal.copy():
1281 if not (self.runtaskentries[tid].depends - dealtwith): 1290 if not (self.runtaskentries[tid].depends - dealtwith):
1282 dealtwith.add(tid) 1291 self.runtaskentries[tid].taskhash_deps = bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
1283 todeal.remove(tid) 1292 # get_taskhash for a given tid *must* be called before get_unihash* below
1284 self.prepare_task_hash(tid) 1293 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
1285 bb.event.check_for_interrupts(self.cooker.data) 1294 ready.add(tid)
1295 unihashes = bb.parse.siggen.get_unihashes(ready)
1296 for tid in ready:
1297 dealtwith.add(tid)
1298 todeal.remove(tid)
1299 self.runtaskentries[tid].unihash = unihashes[tid]
1300
1301 bb.event.check_for_interrupts()
1302
1303 if time.time() > (lasttime + 30):
1304 lasttime = time.time()
1305 hashequiv_logger.verbose("Initial setup loop progress: %s of %s in %s" % (len(todeal), len(self.runtaskentries), lasttime - starttime))
1306
1307 endtime = time.time()
1308 if (endtime-starttime > 60):
1309 hashequiv_logger.verbose("Initial setup loop took: %s" % (endtime-starttime))
1286 1310
1287 bb.parse.siggen.writeout_file_checksum_cache() 1311 bb.parse.siggen.writeout_file_checksum_cache()
1288 1312
1289 #self.dump_data() 1313 #self.dump_data()
1290 return len(self.runtaskentries) 1314 return len(self.runtaskentries)
1291 1315
1292 def prepare_task_hash(self, tid):
1293 bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
1294 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
1295 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
1296
1297 def dump_data(self): 1316 def dump_data(self):
1298 """ 1317 """
1299 Dump some debug information on the internal data structures 1318 Dump some debug information on the internal data structures
@@ -1320,13 +1339,13 @@ class RunQueue:
1320 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None 1339 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
1321 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None 1340 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
1322 1341
1323 self.state = runQueuePrepare 1342 self.state = RunQueueState.PREPARE
1324 1343
1325 # For disk space monitor 1344 # For disk space monitor
1326 # Invoked at regular time intervals via the bitbake heartbeat event 1345 # Invoked at regular time intervals via the bitbake heartbeat event
1327 # while the build is running. We generate a unique name for the handler 1346 # while the build is running. We generate a unique name for the handler
1328 # here, just in case that there ever is more than one RunQueue instance, 1347 # here, just in case that there ever is more than one RunQueue instance,
1329 # start the handler when reaching runQueueSceneInit, and stop it when 1348 # start the handler when reaching RunQueueState.SCENE_INIT, and stop it when
1330 # done with the build. 1349 # done with the build.
1331 self.dm = monitordisk.diskMonitor(cfgData) 1350 self.dm = monitordisk.diskMonitor(cfgData)
1332 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self)) 1351 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
@@ -1538,9 +1557,9 @@ class RunQueue:
1538 """ 1557 """
1539 1558
1540 retval = True 1559 retval = True
1541 bb.event.check_for_interrupts(self.cooker.data) 1560 bb.event.check_for_interrupts()
1542 1561
1543 if self.state is runQueuePrepare: 1562 if self.state == RunQueueState.PREPARE:
1544 # NOTE: if you add, remove or significantly refactor the stages of this 1563 # NOTE: if you add, remove or significantly refactor the stages of this
1545 # process then you should recalculate the weightings here. This is quite 1564 # process then you should recalculate the weightings here. This is quite
1546 # easy to do - just change the next line temporarily to pass debug=True as 1565 # easy to do - just change the next line temporarily to pass debug=True as
@@ -1551,12 +1570,12 @@ class RunQueue:
1551 "Initialising tasks", 1570 "Initialising tasks",
1552 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244]) 1571 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
1553 if self.rqdata.prepare() == 0: 1572 if self.rqdata.prepare() == 0:
1554 self.state = runQueueComplete 1573 self.state = RunQueueState.COMPLETE
1555 else: 1574 else:
1556 self.state = runQueueSceneInit 1575 self.state = RunQueueState.SCENE_INIT
1557 bb.parse.siggen.save_unitaskhashes() 1576 bb.parse.siggen.save_unitaskhashes()
1558 1577
1559 if self.state is runQueueSceneInit: 1578 if self.state == RunQueueState.SCENE_INIT:
1560 self.rqdata.init_progress_reporter.next_stage() 1579 self.rqdata.init_progress_reporter.next_stage()
1561 1580
1562 # we are ready to run, emit dependency info to any UI or class which 1581 # we are ready to run, emit dependency info to any UI or class which
@@ -1567,24 +1586,29 @@ class RunQueue:
1567 1586
1568 if not self.dm_event_handler_registered: 1587 if not self.dm_event_handler_registered:
1569 res = bb.event.register(self.dm_event_handler_name, 1588 res = bb.event.register(self.dm_event_handler_name,
1570 lambda x, y: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False, 1589 lambda x, y: self.dm.check(self) if self.state in [RunQueueState.RUNNING, RunQueueState.CLEAN_UP] else False,
1571 ('bb.event.HeartbeatEvent',), data=self.cfgData) 1590 ('bb.event.HeartbeatEvent',), data=self.cfgData)
1572 self.dm_event_handler_registered = True 1591 self.dm_event_handler_registered = True
1573 1592
1574 self.rqdata.init_progress_reporter.next_stage() 1593 self.rqdata.init_progress_reporter.next_stage()
1575 self.rqexe = RunQueueExecute(self) 1594 self.rqexe = RunQueueExecute(self)
1576 1595
1577 dump = self.cooker.configuration.dump_signatures 1596 dumpsigs = self.cooker.configuration.dump_signatures
1578 if dump: 1597 if dumpsigs:
1579 self.rqdata.init_progress_reporter.finish() 1598 self.rqdata.init_progress_reporter.finish()
1580 if 'printdiff' in dump: 1599 if 'printdiff' in dumpsigs:
1581 invalidtasks = self.print_diffscenetasks() 1600 self.invalidtasks_dump = self.print_diffscenetasks()
1582 self.dump_signatures(dump) 1601 self.state = RunQueueState.DUMP_SIGS
1583 if 'printdiff' in dump: 1602
1584 self.write_diffscenetasks(invalidtasks) 1603 if self.state == RunQueueState.DUMP_SIGS:
1585 self.state = runQueueComplete 1604 dumpsigs = self.cooker.configuration.dump_signatures
1586 1605 retval = self.dump_signatures(dumpsigs)
1587 if self.state is runQueueSceneInit: 1606 if retval is False:
1607 if 'printdiff' in dumpsigs:
1608 self.write_diffscenetasks(self.invalidtasks_dump)
1609 self.state = RunQueueState.COMPLETE
1610
1611 if self.state == RunQueueState.SCENE_INIT:
1588 self.start_worker(self.rqexe) 1612 self.start_worker(self.rqexe)
1589 self.rqdata.init_progress_reporter.finish() 1613 self.rqdata.init_progress_reporter.finish()
1590 1614
@@ -1597,15 +1621,15 @@ class RunQueue:
1597 self.rqexe.tasks_notcovered.add(tid) 1621 self.rqexe.tasks_notcovered.add(tid)
1598 self.rqexe.sqdone = True 1622 self.rqexe.sqdone = True
1599 logger.info('Executing Tasks') 1623 logger.info('Executing Tasks')
1600 self.state = runQueueRunning 1624 self.state = RunQueueState.RUNNING
1601 1625
1602 if self.state is runQueueRunning: 1626 if self.state == RunQueueState.RUNNING:
1603 retval = self.rqexe.execute() 1627 retval = self.rqexe.execute()
1604 1628
1605 if self.state is runQueueCleanUp: 1629 if self.state == RunQueueState.CLEAN_UP:
1606 retval = self.rqexe.finish() 1630 retval = self.rqexe.finish()
1607 1631
1608 build_done = self.state is runQueueComplete or self.state is runQueueFailed 1632 build_done = self.state in [RunQueueState.COMPLETE, RunQueueState.FAILED]
1609 1633
1610 if build_done and self.dm_event_handler_registered: 1634 if build_done and self.dm_event_handler_registered:
1611 bb.event.remove(self.dm_event_handler_name, None, data=self.cfgData) 1635 bb.event.remove(self.dm_event_handler_name, None, data=self.cfgData)
@@ -1621,10 +1645,10 @@ class RunQueue:
1621 # Let's avoid the word "failed" if nothing actually did 1645 # Let's avoid the word "failed" if nothing actually did
1622 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped) 1646 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
1623 1647
1624 if self.state is runQueueFailed: 1648 if self.state == RunQueueState.FAILED:
1625 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids) 1649 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
1626 1650
1627 if self.state is runQueueComplete: 1651 if self.state == RunQueueState.COMPLETE:
1628 # All done 1652 # All done
1629 return False 1653 return False
1630 1654
@@ -1644,7 +1668,7 @@ class RunQueue:
1644 self.teardown_workers() 1668 self.teardown_workers()
1645 except: 1669 except:
1646 pass 1670 pass
1647 self.state = runQueueComplete 1671 self.state = RunQueueState.COMPLETE
1648 raise 1672 raise
1649 except Exception as err: 1673 except Exception as err:
1650 logger.exception("An uncaught exception occurred in runqueue") 1674 logger.exception("An uncaught exception occurred in runqueue")
@@ -1652,12 +1676,12 @@ class RunQueue:
1652 self.teardown_workers() 1676 self.teardown_workers()
1653 except: 1677 except:
1654 pass 1678 pass
1655 self.state = runQueueComplete 1679 self.state = RunQueueState.COMPLETE
1656 raise 1680 raise
1657 1681
1658 def finish_runqueue(self, now = False): 1682 def finish_runqueue(self, now = False):
1659 if not self.rqexe: 1683 if not self.rqexe:
1660 self.state = runQueueComplete 1684 self.state = RunQueueState.COMPLETE
1661 return 1685 return
1662 1686
1663 if now: 1687 if now:
@@ -1672,33 +1696,42 @@ class RunQueue:
1672 bb.parse.siggen.dump_sigtask(taskfn, taskname, dataCaches[mc].stamp[taskfn], True) 1696 bb.parse.siggen.dump_sigtask(taskfn, taskname, dataCaches[mc].stamp[taskfn], True)
1673 1697
1674 def dump_signatures(self, options): 1698 def dump_signatures(self, options):
1675 if bb.cooker.CookerFeatures.RECIPE_SIGGEN_INFO not in self.cooker.featureset: 1699 if not hasattr(self, "dumpsigs_launched"):
1676 bb.fatal("The dump signatures functionality needs the RECIPE_SIGGEN_INFO feature enabled") 1700 if bb.cooker.CookerFeatures.RECIPE_SIGGEN_INFO not in self.cooker.featureset:
1677 1701 bb.fatal("The dump signatures functionality needs the RECIPE_SIGGEN_INFO feature enabled")
1678 bb.note("Writing task signature files") 1702
1679 1703 bb.note("Writing task signature files")
1680 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1) 1704
1681 def chunkify(l, n): 1705 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1682 return [l[i::n] for i in range(n)] 1706 def chunkify(l, n):
1683 tids = chunkify(list(self.rqdata.runtaskentries), max_process) 1707 return [l[i::n] for i in range(n)]
1684 # We cannot use the real multiprocessing.Pool easily due to some local data 1708 dumpsigs_tids = chunkify(list(self.rqdata.runtaskentries), max_process)
1685 # that can't be pickled. This is a cheap multi-process solution. 1709
1686 launched = [] 1710 # We cannot use the real multiprocessing.Pool easily due to some local data
1687 while tids: 1711 # that can't be pickled. This is a cheap multi-process solution.
1688 if len(launched) < max_process: 1712 self.dumpsigs_launched = []
1689 p = Process(target=self._rq_dump_sigtid, args=(tids.pop(), )) 1713
1714 for tids in dumpsigs_tids:
1715 p = Process(target=self._rq_dump_sigtid, args=(tids, ))
1690 p.start() 1716 p.start()
1691 launched.append(p) 1717 self.dumpsigs_launched.append(p)
1692 for q in launched: 1718
1693 # The finished processes are joined when calling is_alive() 1719 return 1.0
1694 if not q.is_alive(): 1720
1695 launched.remove(q) 1721 for q in self.dumpsigs_launched:
1696 for p in launched: 1722 # The finished processes are joined when calling is_alive()
1723 if not q.is_alive():
1724 self.dumpsigs_launched.remove(q)
1725
1726 if self.dumpsigs_launched:
1727 return 1.0
1728
1729 for p in self.dumpsigs_launched:
1697 p.join() 1730 p.join()
1698 1731
1699 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options) 1732 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
1700 1733
1701 return 1734 return False
1702 1735
1703 def print_diffscenetasks(self): 1736 def print_diffscenetasks(self):
1704 def get_root_invalid_tasks(task, taskdepends, valid, noexec, visited_invalid): 1737 def get_root_invalid_tasks(task, taskdepends, valid, noexec, visited_invalid):
@@ -1971,14 +2004,14 @@ class RunQueueExecute:
1971 pass 2004 pass
1972 2005
1973 if self.failed_tids: 2006 if self.failed_tids:
1974 self.rq.state = runQueueFailed 2007 self.rq.state = RunQueueState.FAILED
1975 return 2008 return
1976 2009
1977 self.rq.state = runQueueComplete 2010 self.rq.state = RunQueueState.COMPLETE
1978 return 2011 return
1979 2012
1980 def finish(self): 2013 def finish(self):
1981 self.rq.state = runQueueCleanUp 2014 self.rq.state = RunQueueState.CLEAN_UP
1982 2015
1983 active = self.stats.active + len(self.sq_live) 2016 active = self.stats.active + len(self.sq_live)
1984 if active > 0: 2017 if active > 0:
@@ -1987,10 +2020,10 @@ class RunQueueExecute:
1987 return self.rq.active_fds() 2020 return self.rq.active_fds()
1988 2021
1989 if self.failed_tids: 2022 if self.failed_tids:
1990 self.rq.state = runQueueFailed 2023 self.rq.state = RunQueueState.FAILED
1991 return True 2024 return True
1992 2025
1993 self.rq.state = runQueueComplete 2026 self.rq.state = RunQueueState.COMPLETE
1994 return True 2027 return True
1995 2028
1996 # Used by setscene only 2029 # Used by setscene only
@@ -2109,7 +2142,7 @@ class RunQueueExecute:
2109 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=("".join(fakeroot_log) or None)), self.cfgData) 2142 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=("".join(fakeroot_log) or None)), self.cfgData)
2110 2143
2111 if self.rqdata.taskData[''].halt: 2144 if self.rqdata.taskData[''].halt:
2112 self.rq.state = runQueueCleanUp 2145 self.rq.state = RunQueueState.CLEAN_UP
2113 2146
2114 def task_skip(self, task, reason): 2147 def task_skip(self, task, reason):
2115 self.runq_running.add(task) 2148 self.runq_running.add(task)
@@ -2175,12 +2208,20 @@ class RunQueueExecute:
2175 if not hasattr(self, "sorted_setscene_tids"): 2208 if not hasattr(self, "sorted_setscene_tids"):
2176 # Don't want to sort this set every execution 2209 # Don't want to sort this set every execution
2177 self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids) 2210 self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids)
2211 # Resume looping where we left off when we returned to feed the mainloop
2212 self.setscene_tids_generator = itertools.cycle(self.rqdata.runq_setscene_tids)
2178 2213
2179 task = None 2214 task = None
2180 if not self.sqdone and self.can_start_task(): 2215 if not self.sqdone and self.can_start_task():
2181 # Find the next setscene to run 2216 loopcount = 0
2182 for nexttask in self.sorted_setscene_tids: 2217 # Find the next setscene to run, exit the loop when we've processed all tids or found something to execute
2218 while loopcount < len(self.rqdata.runq_setscene_tids):
2219 loopcount += 1
2220 nexttask = next(self.setscene_tids_generator)
2183 if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values() and nexttask not in self.sq_harddep_deferred: 2221 if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values() and nexttask not in self.sq_harddep_deferred:
2222 if nexttask in self.sq_deferred and self.sq_deferred[nexttask] not in self.runq_complete:
2223 # Skip deferred tasks quickly before the 'expensive' tests below - this is key to performant multiconfig builds
2224 continue
2184 if nexttask not in self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and \ 2225 if nexttask not in self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and \
2185 nexttask not in self.sq_needed_harddeps and \ 2226 nexttask not in self.sq_needed_harddeps and \
2186 self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and \ 2227 self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and \
@@ -2210,8 +2251,7 @@ class RunQueueExecute:
2210 if t in self.runq_running and t not in self.runq_complete: 2251 if t in self.runq_running and t not in self.runq_complete:
2211 continue 2252 continue
2212 if nexttask in self.sq_deferred: 2253 if nexttask in self.sq_deferred:
2213 if self.sq_deferred[nexttask] not in self.runq_complete: 2254 # Deferred tasks that were still deferred were skipped above so we now need to process
2214 continue
2215 logger.debug("Task %s no longer deferred" % nexttask) 2255 logger.debug("Task %s no longer deferred" % nexttask)
2216 del self.sq_deferred[nexttask] 2256 del self.sq_deferred[nexttask]
2217 valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False) 2257 valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False)
@@ -2296,17 +2336,17 @@ class RunQueueExecute:
2296 2336
2297 err = self.summarise_scenequeue_errors() 2337 err = self.summarise_scenequeue_errors()
2298 if err: 2338 if err:
2299 self.rq.state = runQueueFailed 2339 self.rq.state = RunQueueState.FAILED
2300 return True 2340 return True
2301 2341
2302 if self.cooker.configuration.setsceneonly: 2342 if self.cooker.configuration.setsceneonly:
2303 self.rq.state = runQueueComplete 2343 self.rq.state = RunQueueState.COMPLETE
2304 return True 2344 return True
2305 self.sqdone = True 2345 self.sqdone = True
2306 2346
2307 if self.stats.total == 0: 2347 if self.stats.total == 0:
2308 # nothing to do 2348 # nothing to do
2309 self.rq.state = runQueueComplete 2349 self.rq.state = RunQueueState.COMPLETE
2310 return True 2350 return True
2311 2351
2312 if self.cooker.configuration.setsceneonly: 2352 if self.cooker.configuration.setsceneonly:
@@ -2373,7 +2413,7 @@ class RunQueueExecute:
2373 self.rq.start_fakeworker(self, mc) 2413 self.rq.start_fakeworker(self, mc)
2374 except OSError as exc: 2414 except OSError as exc:
2375 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc))) 2415 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
2376 self.rq.state = runQueueFailed 2416 self.rq.state = RunQueueState.FAILED
2377 self.stats.taskFailed() 2417 self.stats.taskFailed()
2378 return True 2418 return True
2379 RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, runtask, "runtask") 2419 RunQueue.send_pickled_data(self.rq.fakeworker[mc].process, runtask, "runtask")
@@ -2401,7 +2441,7 @@ class RunQueueExecute:
2401 return True 2441 return True
2402 2442
2403 if self.failed_tids: 2443 if self.failed_tids:
2404 self.rq.state = runQueueFailed 2444 self.rq.state = RunQueueState.FAILED
2405 return True 2445 return True
2406 2446
2407 # Sanity Checks 2447 # Sanity Checks
@@ -2418,9 +2458,9 @@ class RunQueueExecute:
2418 err = True 2458 err = True
2419 2459
2420 if err: 2460 if err:
2421 self.rq.state = runQueueFailed 2461 self.rq.state = RunQueueState.FAILED
2422 else: 2462 else:
2423 self.rq.state = runQueueComplete 2463 self.rq.state = RunQueueState.COMPLETE
2424 2464
2425 return True 2465 return True
2426 2466
@@ -2438,14 +2478,17 @@ class RunQueueExecute:
2438 taskdepdata_cache = {} 2478 taskdepdata_cache = {}
2439 for task in self.rqdata.runtaskentries: 2479 for task in self.rqdata.runtaskentries:
2440 (mc, fn, taskname, taskfn) = split_tid_mcfn(task) 2480 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2441 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] 2481 taskdepdata_cache[task] = bb.TaskData(
2442 deps = self.rqdata.runtaskentries[task].depends 2482 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn],
2443 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn] 2483 taskname = taskname,
2444 taskhash = self.rqdata.runtaskentries[task].hash 2484 fn = fn,
2445 unihash = self.rqdata.runtaskentries[task].unihash 2485 deps = self.filtermcdeps(task, mc, self.rqdata.runtaskentries[task].depends),
2446 deps = self.filtermcdeps(task, mc, deps) 2486 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn],
2447 hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn] 2487 taskhash = self.rqdata.runtaskentries[task].hash,
2448 taskdepdata_cache[task] = [pn, taskname, fn, deps, provides, taskhash, unihash, hashfn] 2488 unihash = self.rqdata.runtaskentries[task].unihash,
2489 hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn],
2490 taskhash_deps = self.rqdata.runtaskentries[task].taskhash_deps,
2491 )
2449 2492
2450 self.taskdepdata_cache = taskdepdata_cache 2493 self.taskdepdata_cache = taskdepdata_cache
2451 2494
@@ -2460,9 +2503,11 @@ class RunQueueExecute:
2460 while next: 2503 while next:
2461 additional = [] 2504 additional = []
2462 for revdep in next: 2505 for revdep in next:
2463 self.taskdepdata_cache[revdep][6] = self.rqdata.runtaskentries[revdep].unihash 2506 self.taskdepdata_cache[revdep] = self.taskdepdata_cache[revdep]._replace(
2507 unihash=self.rqdata.runtaskentries[revdep].unihash
2508 )
2464 taskdepdata[revdep] = self.taskdepdata_cache[revdep] 2509 taskdepdata[revdep] = self.taskdepdata_cache[revdep]
2465 for revdep2 in self.taskdepdata_cache[revdep][3]: 2510 for revdep2 in self.taskdepdata_cache[revdep].deps:
2466 if revdep2 not in taskdepdata: 2511 if revdep2 not in taskdepdata:
2467 additional.append(revdep2) 2512 additional.append(revdep2)
2468 next = additional 2513 next = additional
@@ -2531,9 +2576,6 @@ class RunQueueExecute:
2531 self.rqdata.runtaskentries[hashtid].unihash = unihash 2576 self.rqdata.runtaskentries[hashtid].unihash = unihash
2532 bb.parse.siggen.set_unihash(hashtid, unihash) 2577 bb.parse.siggen.set_unihash(hashtid, unihash)
2533 toprocess.add(hashtid) 2578 toprocess.add(hashtid)
2534 if torehash:
2535 # Need to save after set_unihash above
2536 bb.parse.siggen.save_unitaskhashes()
2537 2579
2538 # Work out all tasks which depend upon these 2580 # Work out all tasks which depend upon these
2539 total = set() 2581 total = set()
@@ -2556,17 +2598,28 @@ class RunQueueExecute:
2556 elif self.rqdata.runtaskentries[p].depends.isdisjoint(total): 2598 elif self.rqdata.runtaskentries[p].depends.isdisjoint(total):
2557 next.add(p) 2599 next.add(p)
2558 2600
2601 starttime = time.time()
2602 lasttime = starttime
2603
2559 # When an item doesn't have dependencies in total, we can process it. Drop items from total when handled 2604 # When an item doesn't have dependencies in total, we can process it. Drop items from total when handled
2560 while next: 2605 while next:
2561 current = next.copy() 2606 current = next.copy()
2562 next = set() 2607 next = set()
2608 ready = {}
2563 for tid in current: 2609 for tid in current:
2564 if self.rqdata.runtaskentries[p].depends and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total): 2610 if self.rqdata.runtaskentries[p].depends and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
2565 continue 2611 continue
2612 # get_taskhash for a given tid *must* be called before get_unihash* below
2613 ready[tid] = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, self.rqdata.dataCaches)
2614
2615 unihashes = bb.parse.siggen.get_unihashes(ready.keys())
2616
2617 for tid in ready:
2566 orighash = self.rqdata.runtaskentries[tid].hash 2618 orighash = self.rqdata.runtaskentries[tid].hash
2567 newhash = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, self.rqdata.dataCaches) 2619 newhash = ready[tid]
2568 origuni = self.rqdata.runtaskentries[tid].unihash 2620 origuni = self.rqdata.runtaskentries[tid].unihash
2569 newuni = bb.parse.siggen.get_unihash(tid) 2621 newuni = unihashes[tid]
2622
2570 # FIXME, need to check it can come from sstate at all for determinism? 2623 # FIXME, need to check it can come from sstate at all for determinism?
2571 remapped = False 2624 remapped = False
2572 if newuni == origuni: 2625 if newuni == origuni:
@@ -2587,6 +2640,15 @@ class RunQueueExecute:
2587 next |= self.rqdata.runtaskentries[tid].revdeps 2640 next |= self.rqdata.runtaskentries[tid].revdeps
2588 total.remove(tid) 2641 total.remove(tid)
2589 next.intersection_update(total) 2642 next.intersection_update(total)
2643 bb.event.check_for_interrupts()
2644
2645 if time.time() > (lasttime + 30):
2646 lasttime = time.time()
2647 hashequiv_logger.verbose("Rehash loop slow progress: %s in %s" % (len(total), lasttime - starttime))
2648
2649 endtime = time.time()
2650 if (endtime-starttime > 60):
2651 hashequiv_logger.verbose("Rehash loop took more than 60s: %s" % (endtime-starttime))
2590 2652
2591 if changed: 2653 if changed:
2592 for mc in self.rq.worker: 2654 for mc in self.rq.worker:
@@ -2628,7 +2690,7 @@ class RunQueueExecute:
2628 if dep in self.runq_complete and dep not in self.runq_tasksrun: 2690 if dep in self.runq_complete and dep not in self.runq_tasksrun:
2629 bb.error("Task %s marked as completed but now needing to rerun? Halting build." % dep) 2691 bb.error("Task %s marked as completed but now needing to rerun? Halting build." % dep)
2630 self.failed_tids.append(tid) 2692 self.failed_tids.append(tid)
2631 self.rq.state = runQueueCleanUp 2693 self.rq.state = RunQueueState.CLEAN_UP
2632 return 2694 return
2633 2695
2634 if dep not in self.runq_complete: 2696 if dep not in self.runq_complete:
@@ -2712,8 +2774,12 @@ class RunQueueExecute:
2712 logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep)) 2774 logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
2713 self.sq_task_failoutright(dep) 2775 self.sq_task_failoutright(dep)
2714 continue 2776 continue
2777
2778 # For performance, only compute allcovered once if needed
2779 if self.sqdata.sq_deps[task]:
2780 allcovered = self.scenequeue_covered | self.scenequeue_notcovered
2715 for dep in sorted(self.sqdata.sq_deps[task]): 2781 for dep in sorted(self.sqdata.sq_deps[task]):
2716 if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered): 2782 if self.sqdata.sq_revdeps[dep].issubset(allcovered):
2717 if dep not in self.sq_buildable: 2783 if dep not in self.sq_buildable:
2718 self.sq_buildable.add(dep) 2784 self.sq_buildable.add(dep)
2719 2785
@@ -2759,7 +2825,7 @@ class RunQueueExecute:
2759 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] 2825 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2760 if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks): 2826 if not check_setscene_enforce_ignore_tasks(pn, taskname, self.rqdata.setscene_ignore_tasks):
2761 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene")) 2827 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2762 self.rq.state = runQueueCleanUp 2828 self.rq.state = RunQueueState.CLEAN_UP
2763 2829
2764 def sq_task_complete(self, task): 2830 def sq_task_complete(self, task):
2765 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData) 2831 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
@@ -2806,13 +2872,19 @@ class RunQueueExecute:
2806 additional = [] 2872 additional = []
2807 for revdep in next: 2873 for revdep in next:
2808 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep) 2874 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2809 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2810 deps = getsetscenedeps(revdep) 2875 deps = getsetscenedeps(revdep)
2811 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn] 2876
2812 taskhash = self.rqdata.runtaskentries[revdep].hash 2877 taskdepdata[revdep] = bb.TaskData(
2813 unihash = self.rqdata.runtaskentries[revdep].unihash 2878 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn],
2814 hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn] 2879 taskname = taskname,
2815 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash, hashfn] 2880 fn = fn,
2881 deps = deps,
2882 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn],
2883 taskhash = self.rqdata.runtaskentries[revdep].hash,
2884 unihash = self.rqdata.runtaskentries[revdep].unihash,
2885 hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn],
2886 taskhash_deps = self.rqdata.runtaskentries[revdep].taskhash_deps,
2887 )
2816 for revdep2 in deps: 2888 for revdep2 in deps:
2817 if revdep2 not in taskdepdata: 2889 if revdep2 not in taskdepdata:
2818 additional.append(revdep2) 2890 additional.append(revdep2)
@@ -2964,14 +3036,13 @@ def build_scenequeue_data(sqdata, rqdata, sqrq):
2964 rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries)) 3036 rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries))
2965 3037
2966 # Sanity check all dependencies could be changed to setscene task references 3038 # Sanity check all dependencies could be changed to setscene task references
2967 for taskcounter, tid in enumerate(rqdata.runtaskentries): 3039 for tid in rqdata.runtaskentries:
2968 if tid in rqdata.runq_setscene_tids: 3040 if tid in rqdata.runq_setscene_tids:
2969 pass 3041 pass
2970 elif sq_revdeps_squash[tid]: 3042 elif sq_revdeps_squash[tid]:
2971 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, halting. Please report this problem.") 3043 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, halting. Please report this problem.")
2972 else: 3044 else:
2973 del sq_revdeps_squash[tid] 3045 del sq_revdeps_squash[tid]
2974 rqdata.init_progress_reporter.update(taskcounter)
2975 3046
2976 rqdata.init_progress_reporter.next_stage() 3047 rqdata.init_progress_reporter.next_stage()
2977 3048
@@ -3261,7 +3332,7 @@ class runQueuePipe():
3261 3332
3262 start = len(self.queue) 3333 start = len(self.queue)
3263 try: 3334 try:
3264 self.queue.extend(self.input.read(102400) or b"") 3335 self.queue.extend(self.input.read(512 * 1024) or b"")
3265 except (OSError, IOError) as e: 3336 except (OSError, IOError) as e:
3266 if e.errno != errno.EAGAIN: 3337 if e.errno != errno.EAGAIN:
3267 raise 3338 raise