diff options
Diffstat (limited to 'bitbake')
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 125 |
1 files changed, 101 insertions, 24 deletions
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 7e651a45d7..728b5fbb28 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -184,6 +184,18 @@ class RunQueueScheduler(object): | |||
184 | def newbuilable(self, task): | 184 | def newbuilable(self, task): |
185 | self.buildable.append(task) | 185 | self.buildable.append(task) |
186 | 186 | ||
187 | def describe_task(self, taskid): | ||
188 | result = 'ID %s' % taskid | ||
189 | if self.rev_prio_map: | ||
190 | result = result + (' pri %d' % self.rev_prio_map[taskid]) | ||
191 | return result | ||
192 | |||
193 | def dump_prio(self, comment): | ||
194 | bb.debug(3, '%s (most important first):\n%s' % | ||
195 | (comment, | ||
196 | '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for | ||
197 | index, taskid in enumerate(self.prio_map)]))) | ||
198 | |||
187 | class RunQueueSchedulerSpeed(RunQueueScheduler): | 199 | class RunQueueSchedulerSpeed(RunQueueScheduler): |
188 | """ | 200 | """ |
189 | A scheduler optimised for speed. The priority map is sorted by task weight, | 201 | A scheduler optimised for speed. The priority map is sorted by task weight, |
@@ -213,35 +225,100 @@ class RunQueueSchedulerSpeed(RunQueueScheduler): | |||
213 | 225 | ||
214 | class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed): | 226 | class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed): |
215 | """ | 227 | """ |
216 | A scheduler optimised to complete .bb files are quickly as possible. The | 228 | A scheduler optimised to complete .bb files as quickly as possible. The |
217 | priority map is sorted by task weight, but then reordered so once a given | 229 | priority map is sorted by task weight, but then reordered so once a given |
218 | .bb file starts to build, it's completed as quickly as possible. This works | 230 | .bb file starts to build, it's completed as quickly as possible by |
219 | well where disk space is at a premium and classes like OE's rm_work are in | 231 | running all tasks related to the same .bb file one after the after. |
220 | force. | 232 | This works well where disk space is at a premium and classes like OE's |
233 | rm_work are in force. | ||
221 | """ | 234 | """ |
222 | name = "completion" | 235 | name = "completion" |
223 | 236 | ||
224 | def __init__(self, runqueue, rqdata): | 237 | def __init__(self, runqueue, rqdata): |
225 | RunQueueSchedulerSpeed.__init__(self, runqueue, rqdata) | 238 | super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata) |
226 | 239 | ||
227 | #FIXME - whilst this groups all fns together it does not reorder the | 240 | # Extract list of tasks for each recipe, with tasks sorted |
228 | #fn groups optimally. | 241 | # ascending from "must run first" (typically do_fetch) to |
229 | 242 | # "runs last" (do_build). The speed scheduler prioritizes | |
230 | basemap = copy.deepcopy(self.prio_map) | 243 | # tasks that must run first before the ones that run later; |
231 | self.prio_map = [] | 244 | # this is what we depend on here. |
232 | while (len(basemap) > 0): | 245 | task_lists = {} |
233 | entry = basemap.pop(0) | 246 | for taskid in self.prio_map: |
234 | self.prio_map.append(entry) | 247 | fn, taskname = taskid.rsplit(':', 1) |
235 | fn = fn_from_tid(entry) | 248 | task_lists.setdefault(fn, []).append(taskname) |
236 | todel = [] | 249 | |
237 | for entry in basemap: | 250 | # Now unify the different task lists. The strategy is that |
238 | entry_fn = fn_from_tid(entry) | 251 | # common tasks get skipped and new ones get inserted after the |
239 | if entry_fn == fn: | 252 | # preceeding common one(s) as they are found. Because task |
240 | todel.append(basemap.index(entry)) | 253 | # lists should differ only by their number of tasks, but not |
241 | self.prio_map.append(entry) | 254 | # the ordering of the common tasks, this should result in a |
242 | todel.reverse() | 255 | # deterministic result that is a superset of the individual |
243 | for idx in todel: | 256 | # task ordering. |
244 | del basemap[idx] | 257 | all_tasks = [] |
258 | for recipe, new_tasks in task_lists.items(): | ||
259 | index = 0 | ||
260 | old_task = all_tasks[index] if index < len(all_tasks) else None | ||
261 | for new_task in new_tasks: | ||
262 | if old_task == new_task: | ||
263 | # Common task, skip it. This is the fast-path which | ||
264 | # avoids a full search. | ||
265 | index += 1 | ||
266 | old_task = all_tasks[index] if index < len(all_tasks) else None | ||
267 | else: | ||
268 | try: | ||
269 | index = all_tasks.index(new_task) | ||
270 | # Already present, just not at the current | ||
271 | # place. We re-synchronized by changing the | ||
272 | # index so that it matches again. Now | ||
273 | # move on to the next existing task. | ||
274 | index += 1 | ||
275 | old_task = all_tasks[index] if index < len(all_tasks) else None | ||
276 | except ValueError: | ||
277 | # Not present. Insert before old_task, which | ||
278 | # remains the same (but gets shifted back). | ||
279 | all_tasks.insert(index, new_task) | ||
280 | index += 1 | ||
281 | bb.debug(3, 'merged task list: %s' % all_tasks) | ||
282 | |||
283 | # Now reverse the order so that tasks that finish the work on one | ||
284 | # recipe are considered more imporant (= come first). The ordering | ||
285 | # is now so that do_build is most important. | ||
286 | all_tasks.reverse() | ||
287 | |||
288 | # Group tasks of the same kind before tasks of less important | ||
289 | # kinds at the head of the queue (because earlier = lower | ||
290 | # priority number = runs earlier), while preserving the | ||
291 | # ordering by recipe. If recipe foo is more important than | ||
292 | # bar, then the goal is to work on foo's do_populate_sysroot | ||
293 | # before bar's do_populate_sysroot and on the more important | ||
294 | # tasks of foo before any of the less important tasks in any | ||
295 | # other recipe (if those other recipes are more important than | ||
296 | # foo). | ||
297 | # | ||
298 | # All of this only applies when tasks are runable. Explicit | ||
299 | # dependencies still override this ordering by priority. | ||
300 | # | ||
301 | # Here's an example why this priority re-ordering helps with | ||
302 | # minimizing disk usage. Consider a recipe foo with a higher | ||
303 | # priority than bar where foo DEPENDS on bar. Then the | ||
304 | # implicit rule (from base.bbclass) is that foo's do_configure | ||
305 | # depends on bar's do_populate_sysroot. This ensures that | ||
306 | # bar's do_populate_sysroot gets done first. Normally the | ||
307 | # tasks from foo would continue to run once that is done, and | ||
308 | # bar only gets completed and cleaned up later. By ordering | ||
309 | # bar's task that depend on bar's do_populate_sysroot before foo's | ||
310 | # do_configure, that problem gets avoided. | ||
311 | task_index = 0 | ||
312 | self.dump_prio('original priorities') | ||
313 | for task in all_tasks: | ||
314 | for index in range(task_index, self.numTasks): | ||
315 | taskid = self.prio_map[index] | ||
316 | taskname = taskid.rsplit(':', 1)[1] | ||
317 | if taskname == task: | ||
318 | del self.prio_map[index] | ||
319 | self.prio_map.insert(task_index, taskid) | ||
320 | task_index += 1 | ||
321 | self.dump_prio('completion priorities') | ||
245 | 322 | ||
246 | class RunTaskEntry(object): | 323 | class RunTaskEntry(object): |
247 | def __init__(self): | 324 | def __init__(self): |