diff options
author | Patrick Ohly <patrick.ohly@intel.com> | 2017-01-19 20:35:16 +0100 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2017-01-20 11:52:52 +0000 |
commit | bf0ea57e4a5dc5f47b2fb7623c3cea9a3ae7e4cb (patch) | |
tree | 586665013b75330b9638d242ba0b4b4d39ef7fb6 /bitbake/lib | |
parent | 633697cce651ddb22c18083087531dce333c752c (diff) | |
download | poky-bf0ea57e4a5dc5f47b2fb7623c3cea9a3ae7e4cb.tar.gz |
bitbake: runqueue.py: revised completion scheduler
The idea is that tasks which complete building a recipe (like
do_package_qa) are more important than tasks which start building new
recipes (do_fetch) or those which increase disk usage
(do_compile). Therefore tasks get ordered like this (most important
first, do_rm_work before do_build because the enhanced rm_work.bbclass
was used):
1. ID /work/poky/meta/recipes-support/popt/popt_1.16.bb:do_build
2. ID /work/poky/meta/recipes-core/readline/readline_6.3.bb:do_build
3. ID /work/poky/meta/recipes-connectivity/libnss-mdns/libnss-mdns_0.10.bb:do_build
...
464. ID /work/poky/meta/recipes-sato/images/core-image-sato.bb:do_build
465. ID /work/poky/meta/recipes-graphics/xorg-proto/inputproto_2.3.2.bb:do_rm_work
466. ID /work/poky/meta/recipes-devtools/python/python3_3.5.2.bb:do_rm_work
467. ID /work/poky/meta/recipes-core/packagegroups/packagegroup-base.bb:do_rm_work
...
3620. ID virtual:native:/work/poky/meta/recipes-extended/pbzip2/pbzip2_1.1.13.bb:do_install
3621. ID /work/poky/meta/recipes-devtools/qemu/qemu-helper-native_1.0.bb:do_install
3622. ID /work/poky/meta/recipes-core/zlib/zlib_1.2.8.bb:do_compile_ptest_base
3623. ID /work/poky/meta/recipes-extended/bzip2/bzip2_1.0.6.bb:do_compile_ptest_base
...
3645. ID /work/poky/meta/recipes-support/libevent/libevent_2.0.22.bb:do_compile_ptest_base
3646. ID /work/poky/meta/recipes-core/busybox/busybox_1.24.1.bb:do_compile_ptest_base
3647. ID /work/poky/meta/recipes-kernel/linux/linux-yocto_4.8.bb:do_uboot_mkimage
3648. ID /work/poky/meta/recipes-kernel/linux/linux-yocto_4.8.bb:do_sizecheck
3649. ID /work/poky/meta/recipes-kernel/linux/linux-yocto_4.8.bb:do_strip
3650. ID /work/poky/meta/recipes-kernel/linux/linux-yocto_4.8.bb:do_compile_kernelmodules
3651. ID /work/poky/meta/recipes-kernel/linux/linux-yocto_4.8.bb:do_shared_workdir
3652. ID /work/poky/meta/recipes-kernel/linux/linux-yocto_4.8.bb:do_kernel_link_images
3653. ID /work/poky/meta/recipes-devtools/quilt/quilt-native_0.64.bb:do_compile
3654. ID /work/poky/meta/recipes-extended/texinfo-dummy-native/texinfo-dummy-native.bb:do_compile
...
The order of the same task between different recipes is the same as
with the speed scheduler, i.e. more important recipes come first.
(Bitbake rev: 70e297e5c285ce0a02e9efd3117ff62cdc77ec12)
Signed-off-by: Patrick Ohly <patrick.ohly@intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'bitbake/lib')
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 125 |
1 files changed, 101 insertions, 24 deletions
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 7e651a45d7..728b5fbb28 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -184,6 +184,18 @@ class RunQueueScheduler(object): | |||
184 | def newbuilable(self, task): | 184 | def newbuilable(self, task): |
185 | self.buildable.append(task) | 185 | self.buildable.append(task) |
186 | 186 | ||
187 | def describe_task(self, taskid): | ||
188 | result = 'ID %s' % taskid | ||
189 | if self.rev_prio_map: | ||
190 | result = result + (' pri %d' % self.rev_prio_map[taskid]) | ||
191 | return result | ||
192 | |||
193 | def dump_prio(self, comment): | ||
194 | bb.debug(3, '%s (most important first):\n%s' % | ||
195 | (comment, | ||
196 | '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for | ||
197 | index, taskid in enumerate(self.prio_map)]))) | ||
198 | |||
187 | class RunQueueSchedulerSpeed(RunQueueScheduler): | 199 | class RunQueueSchedulerSpeed(RunQueueScheduler): |
188 | """ | 200 | """ |
189 | A scheduler optimised for speed. The priority map is sorted by task weight, | 201 | A scheduler optimised for speed. The priority map is sorted by task weight, |
@@ -213,35 +225,100 @@ class RunQueueSchedulerSpeed(RunQueueScheduler): | |||
213 | 225 | ||
214 | class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed): | 226 | class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed): |
215 | """ | 227 | """ |
216 | A scheduler optimised to complete .bb files are quickly as possible. The | 228 | A scheduler optimised to complete .bb files as quickly as possible. The |
217 | priority map is sorted by task weight, but then reordered so once a given | 229 | priority map is sorted by task weight, but then reordered so once a given |
218 | .bb file starts to build, it's completed as quickly as possible. This works | 230 | .bb file starts to build, it's completed as quickly as possible by |
219 | well where disk space is at a premium and classes like OE's rm_work are in | 231 | running all tasks related to the same .bb file one after the after. |
220 | force. | 232 | This works well where disk space is at a premium and classes like OE's |
233 | rm_work are in force. | ||
221 | """ | 234 | """ |
222 | name = "completion" | 235 | name = "completion" |
223 | 236 | ||
224 | def __init__(self, runqueue, rqdata): | 237 | def __init__(self, runqueue, rqdata): |
225 | RunQueueSchedulerSpeed.__init__(self, runqueue, rqdata) | 238 | super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata) |
226 | 239 | ||
227 | #FIXME - whilst this groups all fns together it does not reorder the | 240 | # Extract list of tasks for each recipe, with tasks sorted |
228 | #fn groups optimally. | 241 | # ascending from "must run first" (typically do_fetch) to |
229 | 242 | # "runs last" (do_build). The speed scheduler prioritizes | |
230 | basemap = copy.deepcopy(self.prio_map) | 243 | # tasks that must run first before the ones that run later; |
231 | self.prio_map = [] | 244 | # this is what we depend on here. |
232 | while (len(basemap) > 0): | 245 | task_lists = {} |
233 | entry = basemap.pop(0) | 246 | for taskid in self.prio_map: |
234 | self.prio_map.append(entry) | 247 | fn, taskname = taskid.rsplit(':', 1) |
235 | fn = fn_from_tid(entry) | 248 | task_lists.setdefault(fn, []).append(taskname) |
236 | todel = [] | 249 | |
237 | for entry in basemap: | 250 | # Now unify the different task lists. The strategy is that |
238 | entry_fn = fn_from_tid(entry) | 251 | # common tasks get skipped and new ones get inserted after the |
239 | if entry_fn == fn: | 252 | # preceeding common one(s) as they are found. Because task |
240 | todel.append(basemap.index(entry)) | 253 | # lists should differ only by their number of tasks, but not |
241 | self.prio_map.append(entry) | 254 | # the ordering of the common tasks, this should result in a |
242 | todel.reverse() | 255 | # deterministic result that is a superset of the individual |
243 | for idx in todel: | 256 | # task ordering. |
244 | del basemap[idx] | 257 | all_tasks = [] |
258 | for recipe, new_tasks in task_lists.items(): | ||
259 | index = 0 | ||
260 | old_task = all_tasks[index] if index < len(all_tasks) else None | ||
261 | for new_task in new_tasks: | ||
262 | if old_task == new_task: | ||
263 | # Common task, skip it. This is the fast-path which | ||
264 | # avoids a full search. | ||
265 | index += 1 | ||
266 | old_task = all_tasks[index] if index < len(all_tasks) else None | ||
267 | else: | ||
268 | try: | ||
269 | index = all_tasks.index(new_task) | ||
270 | # Already present, just not at the current | ||
271 | # place. We re-synchronized by changing the | ||
272 | # index so that it matches again. Now | ||
273 | # move on to the next existing task. | ||
274 | index += 1 | ||
275 | old_task = all_tasks[index] if index < len(all_tasks) else None | ||
276 | except ValueError: | ||
277 | # Not present. Insert before old_task, which | ||
278 | # remains the same (but gets shifted back). | ||
279 | all_tasks.insert(index, new_task) | ||
280 | index += 1 | ||
281 | bb.debug(3, 'merged task list: %s' % all_tasks) | ||
282 | |||
283 | # Now reverse the order so that tasks that finish the work on one | ||
284 | # recipe are considered more imporant (= come first). The ordering | ||
285 | # is now so that do_build is most important. | ||
286 | all_tasks.reverse() | ||
287 | |||
288 | # Group tasks of the same kind before tasks of less important | ||
289 | # kinds at the head of the queue (because earlier = lower | ||
290 | # priority number = runs earlier), while preserving the | ||
291 | # ordering by recipe. If recipe foo is more important than | ||
292 | # bar, then the goal is to work on foo's do_populate_sysroot | ||
293 | # before bar's do_populate_sysroot and on the more important | ||
294 | # tasks of foo before any of the less important tasks in any | ||
295 | # other recipe (if those other recipes are more important than | ||
296 | # foo). | ||
297 | # | ||
298 | # All of this only applies when tasks are runable. Explicit | ||
299 | # dependencies still override this ordering by priority. | ||
300 | # | ||
301 | # Here's an example why this priority re-ordering helps with | ||
302 | # minimizing disk usage. Consider a recipe foo with a higher | ||
303 | # priority than bar where foo DEPENDS on bar. Then the | ||
304 | # implicit rule (from base.bbclass) is that foo's do_configure | ||
305 | # depends on bar's do_populate_sysroot. This ensures that | ||
306 | # bar's do_populate_sysroot gets done first. Normally the | ||
307 | # tasks from foo would continue to run once that is done, and | ||
308 | # bar only gets completed and cleaned up later. By ordering | ||
309 | # bar's task that depend on bar's do_populate_sysroot before foo's | ||
310 | # do_configure, that problem gets avoided. | ||
311 | task_index = 0 | ||
312 | self.dump_prio('original priorities') | ||
313 | for task in all_tasks: | ||
314 | for index in range(task_index, self.numTasks): | ||
315 | taskid = self.prio_map[index] | ||
316 | taskname = taskid.rsplit(':', 1)[1] | ||
317 | if taskname == task: | ||
318 | del self.prio_map[index] | ||
319 | self.prio_map.insert(task_index, taskid) | ||
320 | task_index += 1 | ||
321 | self.dump_prio('completion priorities') | ||
245 | 322 | ||
246 | class RunTaskEntry(object): | 323 | class RunTaskEntry(object): |
247 | def __init__(self): | 324 | def __init__(self): |