diff options
author | Richard Purdie <richard@openedhand.com> | 2008-01-06 16:51:51 +0000 |
---|---|---|
committer | Richard Purdie <richard@openedhand.com> | 2008-01-06 16:51:51 +0000 |
commit | 7821f2250d16e24097d973c34197db7fd60bc51a (patch) | |
tree | a65ca61381af497265f02b5363196b36fe1b0150 /bitbake/lib/bb/runqueue.py | |
parent | c7fca99aab060efe14332ce6d913bc7d346a4478 (diff) | |
download | poky-7821f2250d16e24097d973c34197db7fd60bc51a.tar.gz |
bitbake: Sync with bitbake upstream for various fixes
git-svn-id: https://svn.o-hand.com/repos/poky/trunk@3411 311d38ba-8fff-0310-9ca6-ca027cbcb966
Diffstat (limited to 'bitbake/lib/bb/runqueue.py')
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 399 |
1 files changed, 305 insertions, 94 deletions
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 2f80dd4c88..68ef3a722f 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -51,6 +51,88 @@ class RunQueueStats: | |||
51 | def taskSkipped(self): | 51 | def taskSkipped(self): |
52 | self.skipped = self.skipped + 1 | 52 | self.skipped = self.skipped + 1 |
53 | 53 | ||
54 | class RunQueueScheduler: | ||
55 | """ | ||
56 | Control the order tasks are scheduled in. | ||
57 | """ | ||
58 | def __init__(self, runqueue): | ||
59 | """ | ||
60 | The default scheduler just returns the first buildable task (the | ||
61 | priority map is sorted by task numer) | ||
62 | """ | ||
63 | self.rq = runqueue | ||
64 | numTasks = len(self.rq.runq_fnid) | ||
65 | |||
66 | self.prio_map = [] | ||
67 | self.prio_map.extend(range(numTasks)) | ||
68 | |||
69 | def next(self): | ||
70 | """ | ||
71 | Return the id of the first task we find that is buildable | ||
72 | """ | ||
73 | for task1 in range(len(self.rq.runq_fnid)): | ||
74 | task = self.prio_map[task1] | ||
75 | if self.rq.runq_running[task] == 1: | ||
76 | continue | ||
77 | if self.rq.runq_buildable[task] == 1: | ||
78 | return task | ||
79 | |||
80 | class RunQueueSchedulerSpeed(RunQueueScheduler): | ||
81 | """ | ||
82 | A scheduler optimised for speed. The priority map is sorted by task weight, | ||
83 | heavier weighted tasks (tasks needed by the most other tasks) are run first. | ||
84 | """ | ||
85 | def __init__(self, runqueue): | ||
86 | """ | ||
87 | The priority map is sorted by task weight. | ||
88 | """ | ||
89 | from copy import deepcopy | ||
90 | |||
91 | self.rq = runqueue | ||
92 | |||
93 | sortweight = deepcopy(self.rq.runq_weight) | ||
94 | sortweight.sort() | ||
95 | copyweight = deepcopy(self.rq.runq_weight) | ||
96 | self.prio_map = [] | ||
97 | |||
98 | for weight in sortweight: | ||
99 | idx = copyweight.index(weight) | ||
100 | self.prio_map.append(idx) | ||
101 | copyweight[idx] = -1 | ||
102 | |||
103 | self.prio_map.reverse() | ||
104 | |||
105 | class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed): | ||
106 | """ | ||
107 | A scheduler optimised to complete .bb files are quickly as possible. The | ||
108 | priority map is sorted by task weight, but then reordered so once a given | ||
109 | .bb file starts to build, its completed as quickly as possible. This works | ||
110 | well where disk space is at a premium and classes like OE's rm_work are in | ||
111 | force. | ||
112 | """ | ||
113 | def __init__(self, runqueue): | ||
114 | RunQueueSchedulerSpeed.__init__(self, runqueue) | ||
115 | from copy import deepcopy | ||
116 | |||
117 | #FIXME - whilst this groups all fnids together it does not reorder the | ||
118 | #fnid groups optimally. | ||
119 | |||
120 | basemap = deepcopy(self.prio_map) | ||
121 | self.prio_map = [] | ||
122 | while (len(basemap) > 0): | ||
123 | entry = basemap.pop(0) | ||
124 | self.prio_map.append(entry) | ||
125 | fnid = self.rq.runq_fnid[entry] | ||
126 | todel = [] | ||
127 | for entry in basemap: | ||
128 | entry_fnid = self.rq.runq_fnid[entry] | ||
129 | if entry_fnid == fnid: | ||
130 | todel.append(basemap.index(entry)) | ||
131 | self.prio_map.append(entry) | ||
132 | todel.reverse() | ||
133 | for idx in todel: | ||
134 | del basemap[idx] | ||
135 | |||
54 | class RunQueue: | 136 | class RunQueue: |
55 | """ | 137 | """ |
56 | BitBake Run Queue implementation | 138 | BitBake Run Queue implementation |
@@ -71,14 +153,158 @@ class RunQueue: | |||
71 | self.runq_task = [] | 153 | self.runq_task = [] |
72 | self.runq_depends = [] | 154 | self.runq_depends = [] |
73 | self.runq_revdeps = [] | 155 | self.runq_revdeps = [] |
74 | self.runq_weight = [] | ||
75 | self.prio_map = [] | ||
76 | 156 | ||
77 | def get_user_idstring(self, task): | 157 | def get_user_idstring(self, task): |
78 | fn = self.taskData.fn_index[self.runq_fnid[task]] | 158 | fn = self.taskData.fn_index[self.runq_fnid[task]] |
79 | taskname = self.runq_task[task] | 159 | taskname = self.runq_task[task] |
80 | return "%s, %s" % (fn, taskname) | 160 | return "%s, %s" % (fn, taskname) |
81 | 161 | ||
162 | def circular_depchains_handler(self, tasks): | ||
163 | """ | ||
164 | Some tasks aren't buildable, likely due to circular dependency issues. | ||
165 | Identify the circular dependencies and print them in a user readable format. | ||
166 | """ | ||
167 | from copy import deepcopy | ||
168 | |||
169 | valid_chains = [] | ||
170 | explored_deps = {} | ||
171 | msgs = [] | ||
172 | |||
173 | def chain_reorder(chain): | ||
174 | """ | ||
175 | Reorder a dependency chain so the lowest task id is first | ||
176 | """ | ||
177 | lowest = 0 | ||
178 | new_chain = [] | ||
179 | for entry in range(len(chain)): | ||
180 | if chain[entry] < chain[lowest]: | ||
181 | lowest = entry | ||
182 | new_chain.extend(chain[lowest:]) | ||
183 | new_chain.extend(chain[:lowest]) | ||
184 | return new_chain | ||
185 | |||
186 | def chain_compare_equal(chain1, chain2): | ||
187 | """ | ||
188 | Compare two dependency chains and see if they're the same | ||
189 | """ | ||
190 | if len(chain1) != len(chain2): | ||
191 | return False | ||
192 | for index in range(len(chain1)): | ||
193 | if chain1[index] != chain2[index]: | ||
194 | return False | ||
195 | return True | ||
196 | |||
197 | def chain_array_contains(chain, chain_array): | ||
198 | """ | ||
199 | Return True if chain_array contains chain | ||
200 | """ | ||
201 | for ch in chain_array: | ||
202 | if chain_compare_equal(ch, chain): | ||
203 | return True | ||
204 | return False | ||
205 | |||
206 | def find_chains(taskid, prev_chain): | ||
207 | prev_chain.append(taskid) | ||
208 | total_deps = [] | ||
209 | total_deps.extend(self.runq_revdeps[taskid]) | ||
210 | for revdep in self.runq_revdeps[taskid]: | ||
211 | if revdep in prev_chain: | ||
212 | idx = prev_chain.index(revdep) | ||
213 | # To prevent duplicates, reorder the chain to start with the lowest taskid | ||
214 | # and search through an array of those we've already printed | ||
215 | chain = prev_chain[idx:] | ||
216 | new_chain = chain_reorder(chain) | ||
217 | if not chain_array_contains(new_chain, valid_chains): | ||
218 | valid_chains.append(new_chain) | ||
219 | msgs.append("Dependency loop #%d found:\n" % len(valid_chains)) | ||
220 | for dep in new_chain: | ||
221 | msgs.append(" Task %s (%s) (depends: %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends[dep])) | ||
222 | msgs.append("\n") | ||
223 | if len(valid_chains) > 10: | ||
224 | msgs.append("Aborted dependency loops search after 10 matches.\n") | ||
225 | return msgs | ||
226 | continue | ||
227 | scan = False | ||
228 | if revdep not in explored_deps: | ||
229 | scan = True | ||
230 | elif revdep in explored_deps[revdep]: | ||
231 | scan = True | ||
232 | else: | ||
233 | for dep in prev_chain: | ||
234 | if dep in explored_deps[revdep]: | ||
235 | scan = True | ||
236 | if scan: | ||
237 | find_chains(revdep, deepcopy(prev_chain)) | ||
238 | for dep in explored_deps[revdep]: | ||
239 | if dep not in total_deps: | ||
240 | total_deps.append(dep) | ||
241 | |||
242 | explored_deps[taskid] = total_deps | ||
243 | |||
244 | for task in tasks: | ||
245 | find_chains(task, []) | ||
246 | |||
247 | return msgs | ||
248 | |||
249 | def calculate_task_weights(self, endpoints): | ||
250 | """ | ||
251 | Calculate a number representing the "weight" of each task. Heavier weighted tasks | ||
252 | have more dependencies and hence should be executed sooner for maximum speed. | ||
253 | |||
254 | This function also sanity checks the task list finding tasks that its not | ||
255 | possible to execute due to circular dependencies. | ||
256 | """ | ||
257 | |||
258 | numTasks = len(self.runq_fnid) | ||
259 | weight = [] | ||
260 | deps_left = [] | ||
261 | task_done = [] | ||
262 | |||
263 | for listid in range(numTasks): | ||
264 | task_done.append(False) | ||
265 | weight.append(0) | ||
266 | deps_left.append(len(self.runq_revdeps[listid])) | ||
267 | |||
268 | for listid in endpoints: | ||
269 | weight[listid] = 1 | ||
270 | task_done[listid] = True | ||
271 | |||
272 | while 1: | ||
273 | next_points = [] | ||
274 | for listid in endpoints: | ||
275 | for revdep in self.runq_depends[listid]: | ||
276 | weight[revdep] = weight[revdep] + weight[listid] | ||
277 | deps_left[revdep] = deps_left[revdep] - 1 | ||
278 | if deps_left[revdep] == 0: | ||
279 | next_points.append(revdep) | ||
280 | task_done[revdep] = True | ||
281 | endpoints = next_points | ||
282 | if len(next_points) == 0: | ||
283 | break | ||
284 | |||
285 | # Circular dependency sanity check | ||
286 | problem_tasks = [] | ||
287 | for task in range(numTasks): | ||
288 | if task_done[task] is False or deps_left[task] != 0: | ||
289 | problem_tasks.append(task) | ||
290 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s) is not buildable\n" % (task, self.get_user_idstring(task))) | ||
291 | bb.msg.debug(2, bb.msg.domain.RunQueue, "(Complete marker was %s and the remaining dependency count was %s)\n\n" % (task_done[task], deps_left[task])) | ||
292 | |||
293 | if problem_tasks: | ||
294 | message = "Unbuildable tasks were found.\n" | ||
295 | message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n" | ||
296 | message = message + "Identifying dependency loops (this may take a short while)...\n" | ||
297 | bb.msg.error(bb.msg.domain.RunQueue, message) | ||
298 | |||
299 | msgs = self.circular_depchains_handler(problem_tasks) | ||
300 | |||
301 | message = "\n" | ||
302 | for msg in msgs: | ||
303 | message = message + msg | ||
304 | bb.msg.fatal(bb.msg.domain.RunQueue, message) | ||
305 | |||
306 | return weight | ||
307 | |||
82 | def prepare_runqueue(self): | 308 | def prepare_runqueue(self): |
83 | """ | 309 | """ |
84 | Turn a set of taskData into a RunQueue and compute data needed | 310 | Turn a set of taskData into a RunQueue and compute data needed |
@@ -86,9 +312,7 @@ class RunQueue: | |||
86 | """ | 312 | """ |
87 | 313 | ||
88 | depends = [] | 314 | depends = [] |
89 | runq_weight1 = [] | ||
90 | runq_build = [] | 315 | runq_build = [] |
91 | runq_done = [] | ||
92 | 316 | ||
93 | taskData = self.taskData | 317 | taskData = self.taskData |
94 | 318 | ||
@@ -98,6 +322,17 @@ class RunQueue: | |||
98 | 322 | ||
99 | bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing runqueue") | 323 | bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing runqueue") |
100 | 324 | ||
325 | # Step A - Work out a list of tasks to run | ||
326 | # | ||
327 | # Taskdata gives us a list of possible providers for a every target | ||
328 | # ordered by priority (build_targets, run_targets). It also gives | ||
329 | # information on each of those providers. | ||
330 | # | ||
331 | # To create the actual list of tasks to execute we fix the list of | ||
332 | # providers and then resolve the dependencies into task IDs. This | ||
333 | # process is repeated for each type of dependency (tdepends, deptask, | ||
334 | # rdeptast, recrdeptask, idepends). | ||
335 | |||
101 | for task in range(len(taskData.tasks_name)): | 336 | for task in range(len(taskData.tasks_name)): |
102 | fnid = taskData.tasks_fnid[task] | 337 | fnid = taskData.tasks_fnid[task] |
103 | fn = taskData.fn_index[fnid] | 338 | fn = taskData.fn_index[fnid] |
@@ -105,9 +340,15 @@ class RunQueue: | |||
105 | 340 | ||
106 | if fnid not in taskData.failed_fnids: | 341 | if fnid not in taskData.failed_fnids: |
107 | 342 | ||
343 | # Resolve task internal dependencies | ||
344 | # | ||
345 | # e.g. addtask before X after Y | ||
108 | depends = taskData.tasks_tdepends[task] | 346 | depends = taskData.tasks_tdepends[task] |
109 | 347 | ||
110 | # Resolve Depends | 348 | # Resolve 'deptask' dependencies |
349 | # | ||
350 | # e.g. do_sometask[deptask] = "do_someothertask" | ||
351 | # (makes sure sometask runs after someothertask of all DEPENDS) | ||
111 | if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']: | 352 | if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']: |
112 | tasknames = task_deps['deptask'][taskData.tasks_name[task]].split() | 353 | tasknames = task_deps['deptask'][taskData.tasks_name[task]].split() |
113 | for depid in taskData.depids[fnid]: | 354 | for depid in taskData.depids[fnid]: |
@@ -119,7 +360,10 @@ class RunQueue: | |||
119 | for taskname in tasknames: | 360 | for taskname in tasknames: |
120 | depends.append(taskData.gettask_id(dep, taskname)) | 361 | depends.append(taskData.gettask_id(dep, taskname)) |
121 | 362 | ||
122 | # Resolve Runtime Depends | 363 | # Resolve 'rdeptask' dependencies |
364 | # | ||
365 | # e.g. do_sometask[rdeptask] = "do_someothertask" | ||
366 | # (makes sure sometask runs after someothertask of all RDEPENDS) | ||
123 | if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']: | 367 | if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']: |
124 | taskname = task_deps['rdeptask'][taskData.tasks_name[task]] | 368 | taskname = task_deps['rdeptask'][taskData.tasks_name[task]] |
125 | for depid in taskData.rdepids[fnid]: | 369 | for depid in taskData.rdepids[fnid]: |
@@ -129,6 +373,10 @@ class RunQueue: | |||
129 | dep = taskData.fn_index[depdata] | 373 | dep = taskData.fn_index[depdata] |
130 | depends.append(taskData.gettask_id(dep, taskname)) | 374 | depends.append(taskData.gettask_id(dep, taskname)) |
131 | 375 | ||
376 | # Resolve inter-task dependencies | ||
377 | # | ||
378 | # e.g. do_sometask[depends] = "targetname:do_someothertask" | ||
379 | # (makes sure sometask runs after targetname's someothertask) | ||
132 | idepends = taskData.tasks_idepends[task] | 380 | idepends = taskData.tasks_idepends[task] |
133 | for idepend in idepends: | 381 | for idepend in idepends: |
134 | depid = int(idepend.split(":")[0]) | 382 | depid = int(idepend.split(":")[0]) |
@@ -207,9 +455,10 @@ class RunQueue: | |||
207 | if nextdepid not in dep_seen: | 455 | if nextdepid not in dep_seen: |
208 | add_recursive_build(nextdepid, fnid) | 456 | add_recursive_build(nextdepid, fnid) |
209 | 457 | ||
210 | 458 | # Resolve recursive 'recrdeptask' dependencies | |
211 | # Resolve Recursive Runtime Depends | 459 | # |
212 | # Also includes all thier build depends, intertask depends and runtime depends | 460 | # e.g. do_sometask[recrdeptask] = "do_someothertask" |
461 | # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively) | ||
213 | if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']: | 462 | if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']: |
214 | for taskname in task_deps['recrdeptask'][taskData.tasks_name[task]].split(): | 463 | for taskname in task_deps['recrdeptask'][taskData.tasks_name[task]].split(): |
215 | dep_seen = [] | 464 | dep_seen = [] |
@@ -223,7 +472,7 @@ class RunQueue: | |||
223 | depid = int(idepend.split(":")[0]) | 472 | depid = int(idepend.split(":")[0]) |
224 | add_recursive_build(depid, fnid) | 473 | add_recursive_build(depid, fnid) |
225 | 474 | ||
226 | #Prune self references | 475 | # Rmove all self references |
227 | if task in depends: | 476 | if task in depends: |
228 | newdep = [] | 477 | newdep = [] |
229 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends)) | 478 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends)) |
@@ -237,11 +486,14 @@ class RunQueue: | |||
237 | self.runq_task.append(taskData.tasks_name[task]) | 486 | self.runq_task.append(taskData.tasks_name[task]) |
238 | self.runq_depends.append(Set(depends)) | 487 | self.runq_depends.append(Set(depends)) |
239 | self.runq_revdeps.append(Set()) | 488 | self.runq_revdeps.append(Set()) |
240 | self.runq_weight.append(0) | ||
241 | 489 | ||
242 | runq_weight1.append(0) | ||
243 | runq_build.append(0) | 490 | runq_build.append(0) |
244 | runq_done.append(0) | 491 | |
492 | # Step B - Mark all active tasks | ||
493 | # | ||
494 | # Start with the tasks we were asked to run and mark all dependencies | ||
495 | # as active too. If the task is to be 'forced', clear its stamp. Once | ||
496 | # all active tasks are marked, prune the ones we don't need. | ||
245 | 497 | ||
246 | bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks") | 498 | bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks") |
247 | 499 | ||
@@ -280,11 +532,17 @@ class RunQueue: | |||
280 | if fnid in taskData.failed_fnids: | 532 | if fnid in taskData.failed_fnids: |
281 | continue | 533 | continue |
282 | 534 | ||
535 | if target[1] not in taskData.tasks_lookup[fnid]: | ||
536 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s does not exist for target %s" % (target[1], target[0])) | ||
537 | |||
283 | listid = taskData.tasks_lookup[fnid][target[1]] | 538 | listid = taskData.tasks_lookup[fnid][target[1]] |
284 | 539 | ||
285 | mark_active(listid, 1) | 540 | mark_active(listid, 1) |
286 | 541 | ||
287 | # Prune inactive tasks | 542 | # Step C - Prune all inactive tasks |
543 | # | ||
544 | # Once all active tasks are marked, prune the ones we don't need. | ||
545 | |||
288 | maps = [] | 546 | maps = [] |
289 | delcount = 0 | 547 | delcount = 0 |
290 | for listid in range(len(self.runq_fnid)): | 548 | for listid in range(len(self.runq_fnid)): |
@@ -294,14 +552,16 @@ class RunQueue: | |||
294 | del self.runq_fnid[listid-delcount] | 552 | del self.runq_fnid[listid-delcount] |
295 | del self.runq_task[listid-delcount] | 553 | del self.runq_task[listid-delcount] |
296 | del self.runq_depends[listid-delcount] | 554 | del self.runq_depends[listid-delcount] |
297 | del self.runq_weight[listid-delcount] | ||
298 | del runq_weight1[listid-delcount] | ||
299 | del runq_build[listid-delcount] | 555 | del runq_build[listid-delcount] |
300 | del runq_done[listid-delcount] | ||
301 | del self.runq_revdeps[listid-delcount] | 556 | del self.runq_revdeps[listid-delcount] |
302 | delcount = delcount + 1 | 557 | delcount = delcount + 1 |
303 | maps.append(-1) | 558 | maps.append(-1) |
304 | 559 | ||
560 | # | ||
561 | # Step D - Sanity checks and computation | ||
562 | # | ||
563 | |||
564 | # Check to make sure we still have tasks to run | ||
305 | if len(self.runq_fnid) == 0: | 565 | if len(self.runq_fnid) == 0: |
306 | if not taskData.abort: | 566 | if not taskData.abort: |
307 | bb.msg.note(1, bb.msg.domain.RunQueue, "All possible tasks have been run but build incomplete (--continue mode). See errors above for incomplete tasks.") | 567 | bb.msg.note(1, bb.msg.domain.RunQueue, "All possible tasks have been run but build incomplete (--continue mode). See errors above for incomplete tasks.") |
@@ -310,6 +570,8 @@ class RunQueue: | |||
310 | 570 | ||
311 | bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid))) | 571 | bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid))) |
312 | 572 | ||
573 | # Remap the dependencies to account for the deleted tasks | ||
574 | # Check we didn't delete a task we depend on | ||
313 | for listid in range(len(self.runq_fnid)): | 575 | for listid in range(len(self.runq_fnid)): |
314 | newdeps = [] | 576 | newdeps = [] |
315 | origdeps = self.runq_depends[listid] | 577 | origdeps = self.runq_depends[listid] |
@@ -321,62 +583,37 @@ class RunQueue: | |||
321 | 583 | ||
322 | bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings") | 584 | bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings") |
323 | 585 | ||
586 | # Generate a list of reverse dependencies to ease future calculations | ||
324 | for listid in range(len(self.runq_fnid)): | 587 | for listid in range(len(self.runq_fnid)): |
325 | for dep in self.runq_depends[listid]: | 588 | for dep in self.runq_depends[listid]: |
326 | self.runq_revdeps[dep].add(listid) | 589 | self.runq_revdeps[dep].add(listid) |
327 | 590 | ||
591 | # Identify tasks at the end of dependency chains | ||
592 | # Error on circular dependency loops (length two) | ||
328 | endpoints = [] | 593 | endpoints = [] |
329 | for listid in range(len(self.runq_fnid)): | 594 | for listid in range(len(self.runq_fnid)): |
330 | revdeps = self.runq_revdeps[listid] | 595 | revdeps = self.runq_revdeps[listid] |
331 | if len(revdeps) == 0: | 596 | if len(revdeps) == 0: |
332 | runq_done[listid] = 1 | ||
333 | self.runq_weight[listid] = 1 | ||
334 | endpoints.append(listid) | 597 | endpoints.append(listid) |
335 | for dep in revdeps: | 598 | for dep in revdeps: |
336 | if dep in self.runq_depends[listid]: | 599 | if dep in self.runq_depends[listid]: |
337 | #self.dump_data(taskData) | 600 | #self.dump_data(taskData) |
338 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid])) | 601 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid])) |
339 | runq_weight1[listid] = len(revdeps) | ||
340 | 602 | ||
341 | bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints)) | 603 | bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints)) |
342 | 604 | ||
343 | while 1: | ||
344 | next_points = [] | ||
345 | for listid in endpoints: | ||
346 | for revdep in self.runq_depends[listid]: | ||
347 | self.runq_weight[revdep] = self.runq_weight[revdep] + self.runq_weight[listid] | ||
348 | runq_weight1[revdep] = runq_weight1[revdep] - 1 | ||
349 | if runq_weight1[revdep] == 0: | ||
350 | next_points.append(revdep) | ||
351 | runq_done[revdep] = 1 | ||
352 | endpoints = next_points | ||
353 | if len(next_points) == 0: | ||
354 | break | ||
355 | 605 | ||
356 | # Sanity Checks | 606 | # Calculate task weights |
357 | for task in range(len(self.runq_fnid)): | 607 | # Check of higher length circular dependencies |
358 | if runq_done[task] == 0: | 608 | self.runq_weight = self.calculate_task_weights(endpoints) |
359 | seen = [] | 609 | |
360 | deps_seen = [] | 610 | # Decide what order to execute the tasks in, pick a scheduler |
361 | def print_chain(taskid, finish): | 611 | # FIXME - Allow user selection |
362 | seen.append(taskid) | 612 | #self.sched = RunQueueScheduler(self) |
363 | for revdep in self.runq_revdeps[taskid]: | 613 | self.sched = RunQueueSchedulerSpeed(self) |
364 | if runq_done[revdep] == 0 and revdep not in seen and not finish: | 614 | #self.sched = RunQueueSchedulerCompletion(self) |
365 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) (depends: %s)" % (revdep, self.get_user_idstring(revdep), self.runq_depends[revdep])) | 615 | |
366 | if revdep in deps_seen: | 616 | # Sanity Check - Check for multiple tasks building the same provider |
367 | bb.msg.error(bb.msg.domain.RunQueue, "Chain ends at Task %s (%s)" % (revdep, self.get_user_idstring(revdep))) | ||
368 | finish = True | ||
369 | return | ||
370 | for dep in self.runq_depends[revdep]: | ||
371 | deps_seen.append(dep) | ||
372 | print_chain(revdep, finish) | ||
373 | print_chain(task, False) | ||
374 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) not processed!\nThis is probably a circular dependency (the chain might be printed above)." % (task, self.get_user_idstring(task))) | ||
375 | if runq_weight1[task] != 0: | ||
376 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) count not zero!" % (task, self.get_user_idstring(task))) | ||
377 | |||
378 | |||
379 | # Check for multiple tasks building the same provider | ||
380 | prov_list = {} | 617 | prov_list = {} |
381 | seen_fn = [] | 618 | seen_fn = [] |
382 | for task in range(len(self.runq_fnid)): | 619 | for task in range(len(self.runq_fnid)): |
@@ -397,21 +634,6 @@ class RunQueue: | |||
397 | #if error: | 634 | #if error: |
398 | # bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...") | 635 | # bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...") |
399 | 636 | ||
400 | |||
401 | # Make a weight sorted map | ||
402 | from copy import deepcopy | ||
403 | |||
404 | sortweight = deepcopy(self.runq_weight) | ||
405 | sortweight.sort() | ||
406 | copyweight = deepcopy(self.runq_weight) | ||
407 | self.prio_map = [] | ||
408 | |||
409 | for weight in sortweight: | ||
410 | idx = copyweight.index(weight) | ||
411 | self.prio_map.append(idx) | ||
412 | copyweight[idx] = -1 | ||
413 | self.prio_map.reverse() | ||
414 | |||
415 | #self.dump_data(taskData) | 637 | #self.dump_data(taskData) |
416 | 638 | ||
417 | def execute_runqueue(self): | 639 | def execute_runqueue(self): |
@@ -483,18 +705,6 @@ class RunQueue: | |||
483 | taskname = self.runq_task[revdep] | 705 | taskname = self.runq_task[revdep] |
484 | bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname)) | 706 | bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname)) |
485 | 707 | ||
486 | def get_next_task(self): | ||
487 | """ | ||
488 | Return the id of the highest priority task that is buildable | ||
489 | """ | ||
490 | for task1 in range(len(self.runq_fnid)): | ||
491 | task = self.prio_map[task1] | ||
492 | if self.runq_running[task] == 1: | ||
493 | continue | ||
494 | if self.runq_buildable[task] == 1: | ||
495 | return task | ||
496 | return None | ||
497 | |||
498 | def execute_runqueue_internal(self): | 708 | def execute_runqueue_internal(self): |
499 | """ | 709 | """ |
500 | Run the tasks in a queue prepared by prepare_runqueue | 710 | Run the tasks in a queue prepared by prepare_runqueue |
@@ -511,20 +721,21 @@ class RunQueue: | |||
511 | def sigint_handler(signum, frame): | 721 | def sigint_handler(signum, frame): |
512 | raise KeyboardInterrupt | 722 | raise KeyboardInterrupt |
513 | 723 | ||
724 | # RP - this code allows tasks to run out of the correct order - disabled, FIXME | ||
514 | # Find any tasks with current stamps and remove them from the queue | 725 | # Find any tasks with current stamps and remove them from the queue |
515 | for task1 in range(len(self.runq_fnid)): | 726 | #for task1 in range(len(self.runq_fnid)): |
516 | task = self.prio_map[task1] | 727 | # task = self.prio_map[task1] |
517 | fn = self.taskData.fn_index[self.runq_fnid[task]] | 728 | # fn = self.taskData.fn_index[self.runq_fnid[task]] |
518 | taskname = self.runq_task[task] | 729 | # taskname = self.runq_task[task] |
519 | if bb.build.stamp_is_current(taskname, self.dataCache, fn): | 730 | # if bb.build.stamp_is_current(taskname, self.dataCache, fn): |
520 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task))) | 731 | # bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task))) |
521 | self.runq_running[task] = 1 | 732 | # self.runq_running[task] = 1 |
522 | self.task_complete(task) | 733 | # self.task_complete(task) |
523 | self.stats.taskCompleted() | 734 | # self.stats.taskCompleted() |
524 | self.stats.taskSkipped() | 735 | # self.stats.taskSkipped() |
525 | 736 | ||
526 | while True: | 737 | while True: |
527 | task = self.get_next_task() | 738 | task = self.sched.next() |
528 | if task is not None: | 739 | if task is not None: |
529 | fn = self.taskData.fn_index[self.runq_fnid[task]] | 740 | fn = self.taskData.fn_index[self.runq_fnid[task]] |
530 | 741 | ||