diff options
Diffstat (limited to 'bitbake/lib/bb/runqueue.py')
-rw-r--r-- | bitbake/lib/bb/runqueue.py | 474 |
1 files changed, 264 insertions, 210 deletions
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index 6a953b844a..ce30fccd43 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -48,6 +48,31 @@ def fn_from_tid(tid): | |||
48 | def taskname_from_tid(tid): | 48 | def taskname_from_tid(tid): |
49 | return tid.rsplit(":", 1)[1] | 49 | return tid.rsplit(":", 1)[1] |
50 | 50 | ||
51 | def split_tid(tid): | ||
52 | if tid.startswith('multiconfig:'): | ||
53 | elems = tid.split(':') | ||
54 | mc = elems[1] | ||
55 | fn = ":".join(elems[2:-1]) | ||
56 | taskname = elems[-1] | ||
57 | else: | ||
58 | tid = tid.rsplit(":", 1) | ||
59 | mc = "" | ||
60 | fn = tid[0] | ||
61 | taskname = tid[1] | ||
62 | |||
63 | return (mc, fn, taskname) | ||
64 | |||
65 | def build_tid(mc, fn, taskname): | ||
66 | if mc: | ||
67 | return "multiconfig:" + mc + ":" + fn + ":" + taskname | ||
68 | return fn + ":" + taskname | ||
69 | |||
70 | def taskfn_fromtid(tid): | ||
71 | (mc, fn, taskname) = split_tid(tid) | ||
72 | if mc: | ||
73 | return "multiconfig:" + mc + ":" + fn | ||
74 | return fn | ||
75 | |||
51 | class RunQueueStats: | 76 | class RunQueueStats: |
52 | """ | 77 | """ |
53 | Holds statistics on the tasks handled by the associated runQueue | 78 | Holds statistics on the tasks handled by the associated runQueue |
@@ -110,9 +135,9 @@ class RunQueueScheduler(object): | |||
110 | self.buildable = [] | 135 | self.buildable = [] |
111 | self.stamps = {} | 136 | self.stamps = {} |
112 | for tid in self.rqdata.runtaskentries: | 137 | for tid in self.rqdata.runtaskentries: |
113 | fn = fn_from_tid(tid) | 138 | (mc, fn, taskname) = split_tid(tid) |
114 | taskname = taskname_from_tid(tid) | 139 | taskfn = taskfn_fromtid(tid) |
115 | self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn) | 140 | self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn) |
116 | if tid in self.rq.runq_buildable: | 141 | if tid in self.rq.runq_buildable: |
117 | self.buildable.append(tid) | 142 | self.buildable.append(tid) |
118 | 143 | ||
@@ -230,9 +255,9 @@ class RunQueueData: | |||
230 | """ | 255 | """ |
231 | BitBake Run Queue implementation | 256 | BitBake Run Queue implementation |
232 | """ | 257 | """ |
233 | def __init__(self, rq, cooker, cfgData, dataCache, taskData, targets): | 258 | def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets): |
234 | self.cooker = cooker | 259 | self.cooker = cooker |
235 | self.dataCache = dataCache | 260 | self.dataCaches = dataCaches |
236 | self.taskData = taskData | 261 | self.taskData = taskData |
237 | self.targets = targets | 262 | self.targets = targets |
238 | self.rq = rq | 263 | self.rq = rq |
@@ -264,8 +289,8 @@ class RunQueueData: | |||
264 | return tid + task_name_suffix | 289 | return tid + task_name_suffix |
265 | 290 | ||
266 | def get_short_user_idstring(self, task, task_name_suffix = ""): | 291 | def get_short_user_idstring(self, task, task_name_suffix = ""): |
267 | fn = fn_from_tid(task) | 292 | (mc, fn, taskname) = split_tid(task) |
268 | pn = self.dataCache.pkg_fn[fn] | 293 | pn = self.dataCaches[mc].pkg_fn[fn] |
269 | taskname = taskname_from_tid(task) + task_name_suffix | 294 | taskname = taskname_from_tid(task) + task_name_suffix |
270 | return "%s:%s" % (pn, taskname) | 295 | return "%s:%s" % (pn, taskname) |
271 | 296 | ||
@@ -429,7 +454,12 @@ class RunQueueData: | |||
429 | 454 | ||
430 | taskData = self.taskData | 455 | taskData = self.taskData |
431 | 456 | ||
432 | if len(taskData.taskentries) == 0: | 457 | found = False |
458 | for mc in self.taskData: | ||
459 | if len(taskData[mc].taskentries) > 0: | ||
460 | found = True | ||
461 | break | ||
462 | if not found: | ||
433 | # Nothing to do | 463 | # Nothing to do |
434 | return 0 | 464 | return 0 |
435 | 465 | ||
@@ -447,55 +477,60 @@ class RunQueueData: | |||
447 | # process is repeated for each type of dependency (tdepends, deptask, | 477 | # process is repeated for each type of dependency (tdepends, deptask, |
448 | # rdeptast, recrdeptask, idepends). | 478 | # rdeptast, recrdeptask, idepends). |
449 | 479 | ||
450 | def add_build_dependencies(depids, tasknames, depends): | 480 | def add_build_dependencies(depids, tasknames, depends, mc): |
451 | for depname in depids: | 481 | for depname in depids: |
452 | # Won't be in build_targets if ASSUME_PROVIDED | 482 | # Won't be in build_targets if ASSUME_PROVIDED |
453 | if depname not in taskData.build_targets or not taskData.build_targets[depname]: | 483 | if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]: |
454 | continue | 484 | continue |
455 | depdata = taskData.build_targets[depname][0] | 485 | depdata = taskData[mc].build_targets[depname][0] |
456 | if depdata is None: | 486 | if depdata is None: |
457 | continue | 487 | continue |
458 | for taskname in tasknames: | 488 | for taskname in tasknames: |
459 | t = depdata + ":" + taskname | 489 | t = depdata + ":" + taskname |
460 | if t in taskData.taskentries: | 490 | if t in taskData[mc].taskentries: |
461 | depends.add(t) | 491 | depends.add(t) |
462 | 492 | ||
463 | def add_runtime_dependencies(depids, tasknames, depends): | 493 | def add_runtime_dependencies(depids, tasknames, depends, mc): |
464 | for depname in depids: | 494 | for depname in depids: |
465 | if depname not in taskData.run_targets or not taskData.run_targets[depname]: | 495 | if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]: |
466 | continue | 496 | continue |
467 | depdata = taskData.run_targets[depname][0] | 497 | depdata = taskData[mc].run_targets[depname][0] |
468 | if depdata is None: | 498 | if depdata is None: |
469 | continue | 499 | continue |
470 | for taskname in tasknames: | 500 | for taskname in tasknames: |
471 | t = depdata + ":" + taskname | 501 | t = depdata + ":" + taskname |
472 | if t in taskData.taskentries: | 502 | if t in taskData[mc].taskentries: |
473 | depends.add(t) | 503 | depends.add(t) |
474 | 504 | ||
475 | def add_resolved_dependencies(fn, tasknames, depends): | 505 | def add_resolved_dependencies(mc, fn, tasknames, depends): |
476 | for taskname in tasknames: | 506 | for taskname in tasknames: |
477 | tid = fn + ":" + taskname | 507 | tid = build_tid(mc, fn, taskname) |
478 | if tid in self.runtaskentries: | 508 | if tid in self.runtaskentries: |
479 | depends.add(tid) | 509 | depends.add(tid) |
480 | 510 | ||
481 | for tid in taskData.taskentries: | 511 | for mc in taskData: |
512 | for tid in taskData[mc].taskentries: | ||
482 | 513 | ||
483 | fn = fn_from_tid(tid) | 514 | (mc, fn, taskname) = split_tid(tid) |
484 | taskname = taskname_from_tid(tid) | 515 | #runtid = build_tid(mc, fn, taskname) |
516 | taskfn = taskfn_fromtid(tid) | ||
485 | 517 | ||
486 | depends = set() | 518 | #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname) |
487 | task_deps = self.dataCache.task_deps[fn] | ||
488 | 519 | ||
489 | self.runtaskentries[tid] = RunTaskEntry() | 520 | depends = set() |
521 | task_deps = self.dataCaches[mc].task_deps[taskfn] | ||
490 | 522 | ||
491 | #logger.debug(2, "Processing %s:%s", fn, taskname) | 523 | self.runtaskentries[tid] = RunTaskEntry() |
492 | 524 | ||
493 | if fn not in taskData.failed_fns: | 525 | if fn in taskData[mc].failed_fns: |
526 | continue | ||
494 | 527 | ||
495 | # Resolve task internal dependencies | 528 | # Resolve task internal dependencies |
496 | # | 529 | # |
497 | # e.g. addtask before X after Y | 530 | # e.g. addtask before X after Y |
498 | depends.update(taskData.taskentries[tid].tdepends) | 531 | for t in taskData[mc].taskentries[tid].tdepends: |
532 | (_, depfn, deptaskname) = split_tid(t) | ||
533 | depends.add(build_tid(mc, depfn, deptaskname)) | ||
499 | 534 | ||
500 | # Resolve 'deptask' dependencies | 535 | # Resolve 'deptask' dependencies |
501 | # | 536 | # |
@@ -503,7 +538,7 @@ class RunQueueData: | |||
503 | # (makes sure sometask runs after someothertask of all DEPENDS) | 538 | # (makes sure sometask runs after someothertask of all DEPENDS) |
504 | if 'deptask' in task_deps and taskname in task_deps['deptask']: | 539 | if 'deptask' in task_deps and taskname in task_deps['deptask']: |
505 | tasknames = task_deps['deptask'][taskname].split() | 540 | tasknames = task_deps['deptask'][taskname].split() |
506 | add_build_dependencies(taskData.depids[fn], tasknames, depends) | 541 | add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc) |
507 | 542 | ||
508 | # Resolve 'rdeptask' dependencies | 543 | # Resolve 'rdeptask' dependencies |
509 | # | 544 | # |
@@ -511,31 +546,31 @@ class RunQueueData: | |||
511 | # (makes sure sometask runs after someothertask of all RDEPENDS) | 546 | # (makes sure sometask runs after someothertask of all RDEPENDS) |
512 | if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']: | 547 | if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']: |
513 | tasknames = task_deps['rdeptask'][taskname].split() | 548 | tasknames = task_deps['rdeptask'][taskname].split() |
514 | add_runtime_dependencies(taskData.rdepids[fn], tasknames, depends) | 549 | add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc) |
515 | 550 | ||
516 | # Resolve inter-task dependencies | 551 | # Resolve inter-task dependencies |
517 | # | 552 | # |
518 | # e.g. do_sometask[depends] = "targetname:do_someothertask" | 553 | # e.g. do_sometask[depends] = "targetname:do_someothertask" |
519 | # (makes sure sometask runs after targetname's someothertask) | 554 | # (makes sure sometask runs after targetname's someothertask) |
520 | idepends = taskData.taskentries[tid].idepends | 555 | idepends = taskData[mc].taskentries[tid].idepends |
521 | for (depname, idependtask) in idepends: | 556 | for (depname, idependtask) in idepends: |
522 | if depname in taskData.build_targets and taskData.build_targets[depname] and not depname in taskData.failed_deps: | 557 | if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps: |
523 | # Won't be in build_targets if ASSUME_PROVIDED | 558 | # Won't be in build_targets if ASSUME_PROVIDED |
524 | depdata = taskData.build_targets[depname][0] | 559 | depdata = taskData[mc].build_targets[depname][0] |
525 | if depdata is not None: | 560 | if depdata is not None: |
526 | t = depdata + ":" + idependtask | 561 | t = depdata + ":" + idependtask |
527 | depends.add(t) | 562 | depends.add(t) |
528 | if t not in taskData.taskentries: | 563 | if t not in taskData[mc].taskentries: |
529 | bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata)) | 564 | bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata)) |
530 | irdepends = taskData.taskentries[tid].irdepends | 565 | irdepends = taskData[mc].taskentries[tid].irdepends |
531 | for (depname, idependtask) in irdepends: | 566 | for (depname, idependtask) in irdepends: |
532 | if depname in taskData.run_targets: | 567 | if depname in taskData[mc].run_targets: |
533 | # Won't be in run_targets if ASSUME_PROVIDED | 568 | # Won't be in run_targets if ASSUME_PROVIDED |
534 | depdata = taskData.run_targets[depname][0] | 569 | depdata = taskData[mc].run_targets[depname][0] |
535 | if depdata is not None: | 570 | if depdata is not None: |
536 | t = depdata + ":" + idependtask | 571 | t = depdata + ":" + idependtask |
537 | depends.add(t) | 572 | depends.add(t) |
538 | if t not in taskData.taskentries: | 573 | if t not in taskData[mc].taskentries: |
539 | bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata)) | 574 | bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata)) |
540 | 575 | ||
541 | # Resolve recursive 'recrdeptask' dependencies (Part A) | 576 | # Resolve recursive 'recrdeptask' dependencies (Part A) |
@@ -546,18 +581,20 @@ class RunQueueData: | |||
546 | if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']: | 581 | if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']: |
547 | tasknames = task_deps['recrdeptask'][taskname].split() | 582 | tasknames = task_deps['recrdeptask'][taskname].split() |
548 | recursivetasks[tid] = tasknames | 583 | recursivetasks[tid] = tasknames |
549 | add_build_dependencies(taskData.depids[fn], tasknames, depends) | 584 | add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc) |
550 | add_runtime_dependencies(taskData.rdepids[fn], tasknames, depends) | 585 | add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc) |
551 | if taskname in tasknames: | 586 | if taskname in tasknames: |
552 | recursivetasksselfref.add(tid) | 587 | recursivetasksselfref.add(tid) |
553 | 588 | ||
554 | if 'recideptask' in task_deps and taskname in task_deps['recideptask']: | 589 | if 'recideptask' in task_deps and taskname in task_deps['recideptask']: |
555 | recursiveitasks[tid] = [] | 590 | recursiveitasks[tid] = [] |
556 | for t in task_deps['recideptask'][taskname].split(): | 591 | for t in task_deps['recideptask'][taskname].split(): |
557 | newdep = fn + ":" + t | 592 | newdep = build_tid(mc, fn, t) |
558 | recursiveitasks[tid].append(newdep) | 593 | recursiveitasks[tid].append(newdep) |
559 | 594 | ||
560 | self.runtaskentries[tid].depends = depends | 595 | self.runtaskentries[tid].depends = depends |
596 | |||
597 | #self.dump_data() | ||
561 | 598 | ||
562 | # Resolve recursive 'recrdeptask' dependencies (Part B) | 599 | # Resolve recursive 'recrdeptask' dependencies (Part B) |
563 | # | 600 | # |
@@ -574,7 +611,8 @@ class RunQueueData: | |||
574 | 611 | ||
575 | def generate_recdeps(t): | 612 | def generate_recdeps(t): |
576 | newdeps = set() | 613 | newdeps = set() |
577 | add_resolved_dependencies(fn_from_tid(t), tasknames, newdeps) | 614 | (mc, fn, taskname) = split_tid(t) |
615 | add_resolved_dependencies(mc, fn, tasknames, newdeps) | ||
578 | extradeps[tid].update(newdeps) | 616 | extradeps[tid].update(newdeps) |
579 | seendeps.add(t) | 617 | seendeps.add(t) |
580 | newdeps.add(t) | 618 | newdeps.add(t) |
@@ -606,6 +644,8 @@ class RunQueueData: | |||
606 | 644 | ||
607 | self.init_progress_reporter.next_stage() | 645 | self.init_progress_reporter.next_stage() |
608 | 646 | ||
647 | #self.dump_data() | ||
648 | |||
609 | # Step B - Mark all active tasks | 649 | # Step B - Mark all active tasks |
610 | # | 650 | # |
611 | # Start with the tasks we were asked to run and mark all dependencies | 651 | # Start with the tasks we were asked to run and mark all dependencies |
@@ -629,31 +669,30 @@ class RunQueueData: | |||
629 | for depend in depends: | 669 | for depend in depends: |
630 | mark_active(depend, depth+1) | 670 | mark_active(depend, depth+1) |
631 | 671 | ||
632 | self.target_pairs = [] | 672 | self.target_tids = [] |
633 | for target in self.targets: | 673 | for (mc, target, task, fn) in self.targets: |
634 | if target[0] not in taskData.build_targets or not taskData.build_targets[target[0]]: | 674 | |
675 | if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]: | ||
635 | continue | 676 | continue |
636 | 677 | ||
637 | if target[0] in taskData.failed_deps: | 678 | if target in taskData[mc].failed_deps: |
638 | continue | 679 | continue |
639 | 680 | ||
640 | fn = taskData.build_targets[target[0]][0] | ||
641 | task = target[1] | ||
642 | parents = False | 681 | parents = False |
643 | if task.endswith('-'): | 682 | if task.endswith('-'): |
644 | parents = True | 683 | parents = True |
645 | task = task[:-1] | 684 | task = task[:-1] |
646 | 685 | ||
647 | self.target_pairs.append((fn, task)) | 686 | if fn in taskData[mc].failed_fns: |
648 | |||
649 | if fn in taskData.failed_fns: | ||
650 | continue | 687 | continue |
651 | 688 | ||
689 | # fn already has mc prefix | ||
652 | tid = fn + ":" + task | 690 | tid = fn + ":" + task |
653 | if tid not in taskData.taskentries: | 691 | self.target_tids.append(tid) |
692 | if tid not in taskData[mc].taskentries: | ||
654 | import difflib | 693 | import difflib |
655 | tasks = [] | 694 | tasks = [] |
656 | for x in taskData.taskentries: | 695 | for x in taskData[mc].taskentries: |
657 | if x.startswith(fn + ":"): | 696 | if x.startswith(fn + ":"): |
658 | tasks.append(taskname_from_tid(x)) | 697 | tasks.append(taskname_from_tid(x)) |
659 | close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7) | 698 | close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7) |
@@ -661,7 +700,7 @@ class RunQueueData: | |||
661 | extra = ". Close matches:\n %s" % "\n ".join(close_matches) | 700 | extra = ". Close matches:\n %s" % "\n ".join(close_matches) |
662 | else: | 701 | else: |
663 | extra = "" | 702 | extra = "" |
664 | bb.msg.fatal("RunQueue", "Task %s does not exist for target %s%s" % (task, target[0], extra)) | 703 | bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra)) |
665 | 704 | ||
666 | # For tasks called "XXXX-", ony run their dependencies | 705 | # For tasks called "XXXX-", ony run their dependencies |
667 | if parents: | 706 | if parents: |
@@ -690,7 +729,7 @@ class RunQueueData: | |||
690 | 729 | ||
691 | # Check to make sure we still have tasks to run | 730 | # Check to make sure we still have tasks to run |
692 | if len(self.runtaskentries) == 0: | 731 | if len(self.runtaskentries) == 0: |
693 | if not taskData.abort: | 732 | if not taskData[''].abort: |
694 | bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.") | 733 | bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.") |
695 | else: | 734 | else: |
696 | bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.") | 735 | bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.") |
@@ -717,7 +756,6 @@ class RunQueueData: | |||
717 | endpoints.append(tid) | 756 | endpoints.append(tid) |
718 | for dep in revdeps: | 757 | for dep in revdeps: |
719 | if dep in self.runtaskentries[tid].depends: | 758 | if dep in self.runtaskentries[tid].depends: |
720 | #self.dump_data(taskData) | ||
721 | bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep)) | 759 | bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep)) |
722 | 760 | ||
723 | 761 | ||
@@ -732,24 +770,31 @@ class RunQueueData: | |||
732 | self.init_progress_reporter.next_stage() | 770 | self.init_progress_reporter.next_stage() |
733 | 771 | ||
734 | # Sanity Check - Check for multiple tasks building the same provider | 772 | # Sanity Check - Check for multiple tasks building the same provider |
735 | prov_list = {} | 773 | for mc in self.dataCaches: |
736 | seen_fn = [] | 774 | prov_list = {} |
737 | for tid in self.runtaskentries: | 775 | seen_fn = [] |
738 | fn = fn_from_tid(tid) | 776 | for tid in self.runtaskentries: |
739 | if fn in seen_fn: | 777 | (tidmc, fn, taskname) = split_tid(tid) |
740 | continue | 778 | taskfn = taskfn_fromtid(tid) |
741 | seen_fn.append(fn) | 779 | if taskfn in seen_fn: |
742 | for prov in self.dataCache.fn_provides[fn]: | 780 | continue |
743 | if prov not in prov_list: | 781 | if mc != tidmc: |
744 | prov_list[prov] = [fn] | 782 | continue |
745 | elif fn not in prov_list[prov]: | 783 | seen_fn.append(taskfn) |
746 | prov_list[prov].append(fn) | 784 | for prov in self.dataCaches[mc].fn_provides[taskfn]: |
747 | for prov in prov_list: | 785 | if prov not in prov_list: |
748 | if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist: | 786 | prov_list[prov] = [taskfn] |
787 | elif taskfn not in prov_list[prov]: | ||
788 | prov_list[prov].append(taskfn) | ||
789 | for prov in prov_list: | ||
790 | if len(prov_list[prov]) < 2: | ||
791 | continue | ||
792 | if prov in self.multi_provider_whitelist: | ||
793 | continue | ||
749 | seen_pn = [] | 794 | seen_pn = [] |
750 | # If two versions of the same PN are being built its fatal, we don't support it. | 795 | # If two versions of the same PN are being built its fatal, we don't support it. |
751 | for fn in prov_list[prov]: | 796 | for fn in prov_list[prov]: |
752 | pn = self.dataCache.pkg_fn[fn] | 797 | pn = self.dataCaches[mc].pkg_fn[fn] |
753 | if pn not in seen_pn: | 798 | if pn not in seen_pn: |
754 | seen_pn.append(pn) | 799 | seen_pn.append(pn) |
755 | else: | 800 | else: |
@@ -790,16 +835,16 @@ class RunQueueData: | |||
790 | commonprovs = None | 835 | commonprovs = None |
791 | commonrprovs = None | 836 | commonrprovs = None |
792 | for provfn in prov_list[prov]: | 837 | for provfn in prov_list[prov]: |
793 | provides = set(self.dataCache.fn_provides[provfn]) | 838 | provides = set(self.dataCaches[mc].fn_provides[provfn]) |
794 | rprovides = set() | 839 | rprovides = set() |
795 | for rprovide in self.dataCache.rproviders: | 840 | for rprovide in self.dataCaches[mc].rproviders: |
796 | if provfn in self.dataCache.rproviders[rprovide]: | 841 | if provfn in self.dataCaches[mc].rproviders[rprovide]: |
797 | rprovides.add(rprovide) | 842 | rprovides.add(rprovide) |
798 | for package in self.dataCache.packages: | 843 | for package in self.dataCaches[mc].packages: |
799 | if provfn in self.dataCache.packages[package]: | 844 | if provfn in self.dataCaches[mc].packages[package]: |
800 | rprovides.add(package) | 845 | rprovides.add(package) |
801 | for package in self.dataCache.packages_dynamic: | 846 | for package in self.dataCaches[mc].packages_dynamic: |
802 | if provfn in self.dataCache.packages_dynamic[package]: | 847 | if provfn in self.dataCaches[mc].packages_dynamic[package]: |
803 | rprovides.add(package) | 848 | rprovides.add(package) |
804 | if not commonprovs: | 849 | if not commonprovs: |
805 | commonprovs = set(provides) | 850 | commonprovs = set(provides) |
@@ -825,13 +870,14 @@ class RunQueueData: | |||
825 | self.init_progress_reporter.next_stage() | 870 | self.init_progress_reporter.next_stage() |
826 | 871 | ||
827 | # Create a whitelist usable by the stamp checks | 872 | # Create a whitelist usable by the stamp checks |
828 | stampfnwhitelist = [] | 873 | self.stampfnwhitelist = {} |
829 | for entry in self.stampwhitelist.split(): | 874 | for mc in self.taskData: |
830 | if entry not in self.taskData.build_targets: | 875 | self.stampfnwhitelist[mc] = [] |
831 | continue | 876 | for entry in self.stampwhitelist.split(): |
832 | fn = self.taskData.build_targets[entry][0] | 877 | if entry not in self.taskData[mc].build_targets: |
833 | stampfnwhitelist.append(fn) | 878 | continue |
834 | self.stampfnwhitelist = stampfnwhitelist | 879 | fn = self.taskData.build_targets[entry][0] |
880 | self.stampfnwhitelist[mc].append(fn) | ||
835 | 881 | ||
836 | self.init_progress_reporter.next_stage() | 882 | self.init_progress_reporter.next_stage() |
837 | 883 | ||
@@ -839,16 +885,16 @@ class RunQueueData: | |||
839 | self.runq_setscene_tids = [] | 885 | self.runq_setscene_tids = [] |
840 | if not self.cooker.configuration.nosetscene: | 886 | if not self.cooker.configuration.nosetscene: |
841 | for tid in self.runtaskentries: | 887 | for tid in self.runtaskentries: |
842 | setscenetid = tid + "_setscene" | 888 | (mc, fn, taskname) = split_tid(tid) |
843 | if setscenetid not in taskData.taskentries: | 889 | setscenetid = fn + ":" + taskname + "_setscene" |
890 | if setscenetid not in taskData[mc].taskentries: | ||
844 | continue | 891 | continue |
845 | task = self.runtaskentries[tid].task | ||
846 | self.runq_setscene_tids.append(tid) | 892 | self.runq_setscene_tids.append(tid) |
847 | 893 | ||
848 | def invalidate_task(fn, taskname, error_nostamp): | 894 | def invalidate_task(tid, error_nostamp): |
849 | taskdep = self.dataCache.task_deps[fn] | 895 | (mc, fn, taskname) = split_tid(tid) |
850 | tid = fn + ":" + taskname | 896 | taskdep = self.dataCaches[mc].task_deps[fn] |
851 | if tid not in taskData.taskentries: | 897 | if fn + ":" + taskname not in taskData[mc].taskentries: |
852 | logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname) | 898 | logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname) |
853 | if 'nostamp' in taskdep and taskname in taskdep['nostamp']: | 899 | if 'nostamp' in taskdep and taskname in taskdep['nostamp']: |
854 | if error_nostamp: | 900 | if error_nostamp: |
@@ -857,33 +903,35 @@ class RunQueueData: | |||
857 | bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname) | 903 | bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname) |
858 | else: | 904 | else: |
859 | logger.verbose("Invalidate task %s, %s", taskname, fn) | 905 | logger.verbose("Invalidate task %s, %s", taskname, fn) |
860 | bb.parse.siggen.invalidate_task(taskname, self.dataCache, fn) | 906 | bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], fn) |
861 | 907 | ||
862 | self.init_progress_reporter.next_stage() | 908 | self.init_progress_reporter.next_stage() |
863 | 909 | ||
864 | # Invalidate task if force mode active | 910 | # Invalidate task if force mode active |
865 | if self.cooker.configuration.force: | 911 | if self.cooker.configuration.force: |
866 | for (fn, target) in self.target_pairs: | 912 | for tid in self.target_tids: |
867 | invalidate_task(fn, target, False) | 913 | invalidate_task(tid, False) |
868 | 914 | ||
869 | # Invalidate task if invalidate mode active | 915 | # Invalidate task if invalidate mode active |
870 | if self.cooker.configuration.invalidate_stamp: | 916 | if self.cooker.configuration.invalidate_stamp: |
871 | for (fn, target) in self.target_pairs: | 917 | for tid in self.target_tids: |
918 | fn = fn_from_tid(tid) | ||
872 | for st in self.cooker.configuration.invalidate_stamp.split(','): | 919 | for st in self.cooker.configuration.invalidate_stamp.split(','): |
873 | if not st.startswith("do_"): | 920 | if not st.startswith("do_"): |
874 | st = "do_%s" % st | 921 | st = "do_%s" % st |
875 | invalidate_task(fn, st, True) | 922 | invalidate_task(fn + ":" + st, True) |
876 | 923 | ||
877 | self.init_progress_reporter.next_stage() | 924 | self.init_progress_reporter.next_stage() |
878 | 925 | ||
879 | # Create and print to the logs a virtual/xxxx -> PN (fn) table | 926 | # Create and print to the logs a virtual/xxxx -> PN (fn) table |
880 | virtmap = taskData.get_providermap(prefix="virtual/") | 927 | for mc in taskData: |
881 | virtpnmap = {} | 928 | virtmap = taskData[mc].get_providermap(prefix="virtual/") |
882 | for v in virtmap: | 929 | virtpnmap = {} |
883 | virtpnmap[v] = self.dataCache.pkg_fn[virtmap[v]] | 930 | for v in virtmap: |
884 | bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v])) | 931 | virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]] |
885 | if hasattr(bb.parse.siggen, "tasks_resolved"): | 932 | bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v])) |
886 | bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCache) | 933 | if hasattr(bb.parse.siggen, "tasks_resolved"): |
934 | bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc]) | ||
887 | 935 | ||
888 | self.init_progress_reporter.next_stage() | 936 | self.init_progress_reporter.next_stage() |
889 | 937 | ||
@@ -898,13 +946,17 @@ class RunQueueData: | |||
898 | procdep = [] | 946 | procdep = [] |
899 | for dep in self.runtaskentries[tid].depends: | 947 | for dep in self.runtaskentries[tid].depends: |
900 | procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep)) | 948 | procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep)) |
901 | self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(fn_from_tid(tid), taskname_from_tid(tid), procdep, self.dataCache) | 949 | (mc, fn, taskname) = split_tid(tid) |
950 | taskfn = taskfn_fromtid(tid) | ||
951 | self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc]) | ||
902 | task = self.runtaskentries[tid].task | 952 | task = self.runtaskentries[tid].task |
903 | 953 | ||
904 | bb.parse.siggen.writeout_file_checksum_cache() | 954 | bb.parse.siggen.writeout_file_checksum_cache() |
955 | |||
956 | #self.dump_data() | ||
905 | return len(self.runtaskentries) | 957 | return len(self.runtaskentries) |
906 | 958 | ||
907 | def dump_data(self, taskQueue): | 959 | def dump_data(self): |
908 | """ | 960 | """ |
909 | Dump some debug information on the internal data structures | 961 | Dump some debug information on the internal data structures |
910 | """ | 962 | """ |
@@ -915,24 +967,17 @@ class RunQueueData: | |||
915 | self.runtaskentries[tid].depends, | 967 | self.runtaskentries[tid].depends, |
916 | self.runtaskentries[tid].revdeps) | 968 | self.runtaskentries[tid].revdeps) |
917 | 969 | ||
918 | logger.debug(3, "sorted_tasks:") | ||
919 | for tid in self.prio_map: | ||
920 | logger.debug(3, " %s: %s Deps %s RevDeps %s", tid, | ||
921 | self.runtaskentries[tid].weight, | ||
922 | self.runtaskentries[tid].depends, | ||
923 | self.runtaskentries[tid].revdeps) | ||
924 | |||
925 | class RunQueueWorker(): | 970 | class RunQueueWorker(): |
926 | def __init__(self, process, pipe): | 971 | def __init__(self, process, pipe): |
927 | self.process = process | 972 | self.process = process |
928 | self.pipe = pipe | 973 | self.pipe = pipe |
929 | 974 | ||
930 | class RunQueue: | 975 | class RunQueue: |
931 | def __init__(self, cooker, cfgData, dataCache, taskData, targets): | 976 | def __init__(self, cooker, cfgData, dataCaches, taskData, targets): |
932 | 977 | ||
933 | self.cooker = cooker | 978 | self.cooker = cooker |
934 | self.cfgData = cfgData | 979 | self.cfgData = cfgData |
935 | self.rqdata = RunQueueData(self, cooker, cfgData, dataCache, taskData, targets) | 980 | self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets) |
936 | 981 | ||
937 | self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY", True) or "perfile" | 982 | self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY", True) or "perfile" |
938 | self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION", True) or None | 983 | self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION", True) or None |
@@ -948,7 +993,7 @@ class RunQueue: | |||
948 | self.worker = {} | 993 | self.worker = {} |
949 | self.fakeworker = {} | 994 | self.fakeworker = {} |
950 | 995 | ||
951 | def _start_worker(self, fakeroot = False, rqexec = None): | 996 | def _start_worker(self, mc, fakeroot = False, rqexec = None): |
952 | logger.debug(1, "Starting bitbake-worker") | 997 | logger.debug(1, "Starting bitbake-worker") |
953 | magic = "decafbad" | 998 | magic = "decafbad" |
954 | if self.cooker.configuration.profile: | 999 | if self.cooker.configuration.profile: |
@@ -971,10 +1016,10 @@ class RunQueue: | |||
971 | runqhash[tid] = self.rqdata.runtaskentries[tid].hash | 1016 | runqhash[tid] = self.rqdata.runtaskentries[tid].hash |
972 | 1017 | ||
973 | workerdata = { | 1018 | workerdata = { |
974 | "taskdeps" : self.rqdata.dataCache.task_deps, | 1019 | "taskdeps" : self.rqdata.dataCaches[mc].task_deps, |
975 | "fakerootenv" : self.rqdata.dataCache.fakerootenv, | 1020 | "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv, |
976 | "fakerootdirs" : self.rqdata.dataCache.fakerootdirs, | 1021 | "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs, |
977 | "fakerootnoenv" : self.rqdata.dataCache.fakerootnoenv, | 1022 | "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv, |
978 | "sigdata" : bb.parse.siggen.get_taskdata(), | 1023 | "sigdata" : bb.parse.siggen.get_taskdata(), |
979 | "runq_hash" : runqhash, | 1024 | "runq_hash" : runqhash, |
980 | "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel, | 1025 | "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel, |
@@ -1014,11 +1059,13 @@ class RunQueue: | |||
1014 | if self.worker: | 1059 | if self.worker: |
1015 | self.teardown_workers() | 1060 | self.teardown_workers() |
1016 | self.teardown = False | 1061 | self.teardown = False |
1017 | self.worker[''] = self._start_worker() | 1062 | for mc in self.rqdata.dataCaches: |
1063 | self.worker[mc] = self._start_worker(mc) | ||
1018 | 1064 | ||
1019 | def start_fakeworker(self, rqexec): | 1065 | def start_fakeworker(self, rqexec): |
1020 | if not self.fakeworker: | 1066 | if not self.fakeworker: |
1021 | self.fakeworker[''] = self._start_worker(True, rqexec) | 1067 | for mc in self.rqdata.dataCaches: |
1068 | self.fakeworker[mc] = self._start_worker(mc, True, rqexec) | ||
1022 | 1069 | ||
1023 | def teardown_workers(self): | 1070 | def teardown_workers(self): |
1024 | self.teardown = True | 1071 | self.teardown = True |
@@ -1052,26 +1099,27 @@ class RunQueue: | |||
1052 | except: | 1099 | except: |
1053 | return None | 1100 | return None |
1054 | 1101 | ||
1102 | (mc, fn, tn) = split_tid(tid) | ||
1103 | taskfn = taskfn_fromtid(tid) | ||
1104 | if taskname is None: | ||
1105 | taskname = tn | ||
1106 | |||
1055 | if self.stamppolicy == "perfile": | 1107 | if self.stamppolicy == "perfile": |
1056 | fulldeptree = False | 1108 | fulldeptree = False |
1057 | else: | 1109 | else: |
1058 | fulldeptree = True | 1110 | fulldeptree = True |
1059 | stampwhitelist = [] | 1111 | stampwhitelist = [] |
1060 | if self.stamppolicy == "whitelist": | 1112 | if self.stamppolicy == "whitelist": |
1061 | stampwhitelist = self.rqdata.stampfnwhitelist | 1113 | stampwhitelist = self.rqdata.stampfnwhitelist[mc] |
1062 | 1114 | ||
1063 | fn = fn_from_tid(tid) | 1115 | stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn) |
1064 | if taskname is None: | ||
1065 | taskname = taskname_from_tid(tid) | ||
1066 | |||
1067 | stampfile = bb.build.stampfile(taskname, self.rqdata.dataCache, fn) | ||
1068 | 1116 | ||
1069 | # If the stamp is missing, it's not current | 1117 | # If the stamp is missing, it's not current |
1070 | if not os.access(stampfile, os.F_OK): | 1118 | if not os.access(stampfile, os.F_OK): |
1071 | logger.debug(2, "Stampfile %s not available", stampfile) | 1119 | logger.debug(2, "Stampfile %s not available", stampfile) |
1072 | return False | 1120 | return False |
1073 | # If it's a 'nostamp' task, it's not current | 1121 | # If it's a 'nostamp' task, it's not current |
1074 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1122 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
1075 | if 'nostamp' in taskdep and taskname in taskdep['nostamp']: | 1123 | if 'nostamp' in taskdep and taskname in taskdep['nostamp']: |
1076 | logger.debug(2, "%s.%s is nostamp\n", fn, taskname) | 1124 | logger.debug(2, "%s.%s is nostamp\n", fn, taskname) |
1077 | return False | 1125 | return False |
@@ -1086,10 +1134,10 @@ class RunQueue: | |||
1086 | t1 = get_timestamp(stampfile) | 1134 | t1 = get_timestamp(stampfile) |
1087 | for dep in self.rqdata.runtaskentries[tid].depends: | 1135 | for dep in self.rqdata.runtaskentries[tid].depends: |
1088 | if iscurrent: | 1136 | if iscurrent: |
1089 | fn2 = fn_from_tid(dep) | 1137 | (mc2, fn2, taskname2) = split_tid(dep) |
1090 | taskname2 = taskname_from_tid(dep) | 1138 | taskfn2 = taskfn_fromtid(dep) |
1091 | stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCache, fn2) | 1139 | stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2) |
1092 | stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCache, fn2) | 1140 | stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2) |
1093 | t2 = get_timestamp(stampfile2) | 1141 | t2 = get_timestamp(stampfile2) |
1094 | t3 = get_timestamp(stampfile3) | 1142 | t3 = get_timestamp(stampfile3) |
1095 | if t3 and not t2: | 1143 | if t3 and not t2: |
@@ -1196,10 +1244,11 @@ class RunQueue: | |||
1196 | logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped) | 1244 | logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped) |
1197 | 1245 | ||
1198 | if self.state is runQueueFailed: | 1246 | if self.state is runQueueFailed: |
1199 | if not self.rqdata.taskData.tryaltconfigs: | 1247 | if not self.rqdata.taskData[''].tryaltconfigs: |
1200 | raise bb.runqueue.TaskFailure(self.rqexe.failed_fns) | 1248 | raise bb.runqueue.TaskFailure(self.rqexe.failed_tids) |
1201 | for fn in self.rqexe.failed_fns: | 1249 | for tid in self.rqexe.failed_tids: |
1202 | self.rqdata.taskData.fail_fn(fn) | 1250 | (mc, fn, tn) = split_tid(tid) |
1251 | self.rqdata.taskData[mc].fail_fn(fn) | ||
1203 | self.rqdata.reset() | 1252 | self.rqdata.reset() |
1204 | 1253 | ||
1205 | if self.state is runQueueComplete: | 1254 | if self.state is runQueueComplete: |
@@ -1246,13 +1295,14 @@ class RunQueue: | |||
1246 | def dump_signatures(self, options): | 1295 | def dump_signatures(self, options): |
1247 | done = set() | 1296 | done = set() |
1248 | bb.note("Reparsing files to collect dependency data") | 1297 | bb.note("Reparsing files to collect dependency data") |
1298 | bb_cache = bb.cache.NoCache(self.cooker.databuilder) | ||
1249 | for tid in self.rqdata.runtaskentries: | 1299 | for tid in self.rqdata.runtaskentries: |
1250 | fn = fn_from_tid(tid) | 1300 | fn = taskfn_fromtid(tid) |
1251 | if fn not in done: | 1301 | if fn not in done: |
1252 | the_data = bb.cache.Cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn), self.cooker.data) | 1302 | the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn)) |
1253 | done.add(fn) | 1303 | done.add(fn) |
1254 | 1304 | ||
1255 | bb.parse.siggen.dump_sigs(self.rqdata.dataCache, options) | 1305 | bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options) |
1256 | 1306 | ||
1257 | return | 1307 | return |
1258 | 1308 | ||
@@ -1269,16 +1319,16 @@ class RunQueue: | |||
1269 | valid_new = set() | 1319 | valid_new = set() |
1270 | 1320 | ||
1271 | for tid in self.rqdata.runtaskentries: | 1321 | for tid in self.rqdata.runtaskentries: |
1272 | fn = fn_from_tid(tid) | 1322 | (mc, fn, taskname) = split_tid(tid) |
1273 | taskname = taskname_from_tid(tid) | 1323 | taskfn = taskfn_fromtid(tid) |
1274 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1324 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
1275 | 1325 | ||
1276 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | 1326 | if 'noexec' in taskdep and taskname in taskdep['noexec']: |
1277 | noexec.append(tid) | 1327 | noexec.append(tid) |
1278 | continue | 1328 | continue |
1279 | 1329 | ||
1280 | sq_fn.append(fn) | 1330 | sq_fn.append(fn) |
1281 | sq_hashfn.append(self.rqdata.dataCache.hashfn[fn]) | 1331 | sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn]) |
1282 | sq_hash.append(self.rqdata.runtaskentries[tid].hash) | 1332 | sq_hash.append(self.rqdata.runtaskentries[tid].hash) |
1283 | sq_taskname.append(taskname) | 1333 | sq_taskname.append(taskname) |
1284 | sq_task.append(tid) | 1334 | sq_task.append(tid) |
@@ -1358,9 +1408,8 @@ class RunQueue: | |||
1358 | 1408 | ||
1359 | 1409 | ||
1360 | for tid in invalidtasks: | 1410 | for tid in invalidtasks: |
1361 | fn = fn_from_tid(tid) | 1411 | (mc, fn, taskname) = split_tid(tid) |
1362 | pn = self.rqdata.dataCache.pkg_fn[fn] | 1412 | pn = self.rqdata.dataCaches[mc].pkg_fn[fn] |
1363 | taskname = taskname_from_tid(tid) | ||
1364 | h = self.rqdata.runtaskentries[tid].hash | 1413 | h = self.rqdata.runtaskentries[tid].hash |
1365 | matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData) | 1414 | matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData) |
1366 | match = None | 1415 | match = None |
@@ -1393,7 +1442,7 @@ class RunQueueExecute: | |||
1393 | 1442 | ||
1394 | self.build_stamps = {} | 1443 | self.build_stamps = {} |
1395 | self.build_stamps2 = [] | 1444 | self.build_stamps2 = [] |
1396 | self.failed_fns = [] | 1445 | self.failed_tids = [] |
1397 | 1446 | ||
1398 | self.stampcache = {} | 1447 | self.stampcache = {} |
1399 | 1448 | ||
@@ -1434,7 +1483,7 @@ class RunQueueExecute: | |||
1434 | # worker must have died? | 1483 | # worker must have died? |
1435 | pass | 1484 | pass |
1436 | 1485 | ||
1437 | if len(self.failed_fns) != 0: | 1486 | if len(self.failed_tids) != 0: |
1438 | self.rq.state = runQueueFailed | 1487 | self.rq.state = runQueueFailed |
1439 | return | 1488 | return |
1440 | 1489 | ||
@@ -1449,7 +1498,7 @@ class RunQueueExecute: | |||
1449 | self.rq.read_workers() | 1498 | self.rq.read_workers() |
1450 | return self.rq.active_fds() | 1499 | return self.rq.active_fds() |
1451 | 1500 | ||
1452 | if len(self.failed_fns) != 0: | 1501 | if len(self.failed_tids) != 0: |
1453 | self.rq.state = runQueueFailed | 1502 | self.rq.state = runQueueFailed |
1454 | return True | 1503 | return True |
1455 | 1504 | ||
@@ -1463,9 +1512,8 @@ class RunQueueExecute: | |||
1463 | taskdata = {} | 1512 | taskdata = {} |
1464 | taskdeps.add(task) | 1513 | taskdeps.add(task) |
1465 | for dep in taskdeps: | 1514 | for dep in taskdeps: |
1466 | fn = fn_from_tid(dep) | 1515 | (mc, fn, taskname) = split_tid(dep) |
1467 | pn = self.rqdata.dataCache.pkg_fn[fn] | 1516 | pn = self.rqdata.dataCaches[mc].pkg_fn[fn] |
1468 | taskname = taskname_from_tid(dep) | ||
1469 | taskdata[dep] = [pn, taskname, fn] | 1517 | taskdata[dep] = [pn, taskname, fn] |
1470 | call = self.rq.depvalidate + "(task, taskdata, notneeded, d)" | 1518 | call = self.rq.depvalidate + "(task, taskdata, notneeded, d)" |
1471 | locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.expanded_data } | 1519 | locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.expanded_data } |
@@ -1519,10 +1567,10 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1519 | tasknames = {} | 1567 | tasknames = {} |
1520 | fns = {} | 1568 | fns = {} |
1521 | for tid in self.rqdata.runtaskentries: | 1569 | for tid in self.rqdata.runtaskentries: |
1522 | fn = fn_from_tid(tid) | 1570 | (mc, fn, taskname) = split_tid(tid) |
1523 | taskname = taskname_from_tid(tid) | 1571 | taskfn = taskfn_fromtid(tid) |
1524 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1572 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
1525 | fns[tid] = fn | 1573 | fns[tid] = taskfn |
1526 | tasknames[tid] = taskname | 1574 | tasknames[tid] = taskname |
1527 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | 1575 | if 'noexec' in taskdep and taskname in taskdep['noexec']: |
1528 | continue | 1576 | continue |
@@ -1539,9 +1587,10 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1539 | covered_remove = bb.utils.better_eval(call, locs) | 1587 | covered_remove = bb.utils.better_eval(call, locs) |
1540 | 1588 | ||
1541 | def removecoveredtask(tid): | 1589 | def removecoveredtask(tid): |
1542 | fn = fn_from_tid(tid) | 1590 | (mc, fn, taskname) = split_tid(tid) |
1543 | taskname = taskname_from_tid(tid) + '_setscene' | 1591 | taskname = taskname + '_setscene' |
1544 | bb.build.del_stamp(taskname, self.rqdata.dataCache, fn) | 1592 | taskfn = taskfn_fromtid(tid) |
1593 | bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn) | ||
1545 | self.rq.scenequeue_covered.remove(tid) | 1594 | self.rq.scenequeue_covered.remove(tid) |
1546 | 1595 | ||
1547 | toremove = covered_remove | 1596 | toremove = covered_remove |
@@ -1562,7 +1611,15 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1562 | 1611 | ||
1563 | logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered) | 1612 | logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered) |
1564 | 1613 | ||
1565 | event.fire(bb.event.StampUpdate(self.rqdata.target_pairs, self.rqdata.dataCache.stamp), self.cfgData) | 1614 | |
1615 | for mc in self.rqdata.dataCaches: | ||
1616 | target_pairs = [] | ||
1617 | for tid in self.rqdata.target_tids: | ||
1618 | (tidmc, fn, taskname) = split_tid(tid) | ||
1619 | if tidmc == mc: | ||
1620 | target_pairs.append((fn, taskname)) | ||
1621 | |||
1622 | event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData) | ||
1566 | 1623 | ||
1567 | schedulers = self.get_schedulers() | 1624 | schedulers = self.get_schedulers() |
1568 | for scheduler in schedulers: | 1625 | for scheduler in schedulers: |
@@ -1633,10 +1690,9 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1633 | Updates the state engine with the failure | 1690 | Updates the state engine with the failure |
1634 | """ | 1691 | """ |
1635 | self.stats.taskFailed() | 1692 | self.stats.taskFailed() |
1636 | fn = fn_from_tid(task) | 1693 | self.failed_tids.append(task) |
1637 | self.failed_fns.append(fn) | ||
1638 | bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData) | 1694 | bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData) |
1639 | if self.rqdata.taskData.abort: | 1695 | if self.rqdata.taskData[''].abort: |
1640 | self.rq.state = runQueueCleanUp | 1696 | self.rq.state = runQueueCleanUp |
1641 | 1697 | ||
1642 | def task_skip(self, task, reason): | 1698 | def task_skip(self, task, reason): |
@@ -1655,8 +1711,7 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1655 | if self.rqdata.setscenewhitelist: | 1711 | if self.rqdata.setscenewhitelist: |
1656 | # Check tasks that are going to run against the whitelist | 1712 | # Check tasks that are going to run against the whitelist |
1657 | def check_norun_task(tid, showerror=False): | 1713 | def check_norun_task(tid, showerror=False): |
1658 | fn = fn_from_tid(tid) | 1714 | (mc, fn, taskname) = split_tid(tid) |
1659 | taskname = taskname_from_tid(tid) | ||
1660 | # Ignore covered tasks | 1715 | # Ignore covered tasks |
1661 | if tid in self.rq.scenequeue_covered: | 1716 | if tid in self.rq.scenequeue_covered: |
1662 | return False | 1717 | return False |
@@ -1664,11 +1719,11 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1664 | if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache): | 1719 | if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache): |
1665 | return False | 1720 | return False |
1666 | # Ignore noexec tasks | 1721 | # Ignore noexec tasks |
1667 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1722 | taskdep = self.rqdata.dataCaches[mc].task_deps[fn] |
1668 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | 1723 | if 'noexec' in taskdep and taskname in taskdep['noexec']: |
1669 | return False | 1724 | return False |
1670 | 1725 | ||
1671 | pn = self.rqdata.dataCache.pkg_fn[fn] | 1726 | pn = self.rqdata.dataCaches[mc].pkg_fn[fn] |
1672 | if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): | 1727 | if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): |
1673 | if showerror: | 1728 | if showerror: |
1674 | if tid in self.rqdata.runq_setscene_tids: | 1729 | if tid in self.rqdata.runq_setscene_tids: |
@@ -1704,8 +1759,8 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1704 | 1759 | ||
1705 | task = self.sched.next() | 1760 | task = self.sched.next() |
1706 | if task is not None: | 1761 | if task is not None: |
1707 | fn = fn_from_tid(task) | 1762 | (mc, fn, taskname) = split_tid(task) |
1708 | taskname = taskname_from_tid(task) | 1763 | taskfn = taskfn_fromtid(task) |
1709 | 1764 | ||
1710 | if task in self.rq.scenequeue_covered: | 1765 | if task in self.rq.scenequeue_covered: |
1711 | logger.debug(2, "Setscene covered task %s", task) | 1766 | logger.debug(2, "Setscene covered task %s", task) |
@@ -1718,7 +1773,7 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1718 | self.task_skip(task, "existing") | 1773 | self.task_skip(task, "existing") |
1719 | return True | 1774 | return True |
1720 | 1775 | ||
1721 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1776 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
1722 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | 1777 | if 'noexec' in taskdep and taskname in taskdep['noexec']: |
1723 | startevent = runQueueTaskStarted(task, self.stats, self.rq, | 1778 | startevent = runQueueTaskStarted(task, self.stats, self.rq, |
1724 | noexec=True) | 1779 | noexec=True) |
@@ -1726,7 +1781,7 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1726 | self.runq_running.add(task) | 1781 | self.runq_running.add(task) |
1727 | self.stats.taskActive() | 1782 | self.stats.taskActive() |
1728 | if not self.cooker.configuration.dry_run: | 1783 | if not self.cooker.configuration.dry_run: |
1729 | bb.build.make_stamp(taskname, self.rqdata.dataCache, fn) | 1784 | bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn) |
1730 | self.task_complete(task) | 1785 | self.task_complete(task) |
1731 | return True | 1786 | return True |
1732 | else: | 1787 | else: |
@@ -1735,7 +1790,7 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1735 | 1790 | ||
1736 | taskdepdata = self.build_taskdepdata(task) | 1791 | taskdepdata = self.build_taskdepdata(task) |
1737 | 1792 | ||
1738 | taskdep = self.rqdata.dataCache.task_deps[fn] | 1793 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
1739 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: | 1794 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: |
1740 | if not self.rq.fakeworker: | 1795 | if not self.rq.fakeworker: |
1741 | try: | 1796 | try: |
@@ -1744,13 +1799,13 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1744 | logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc))) | 1799 | logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc))) |
1745 | self.rq.state = runQueueFailed | 1800 | self.rq.state = runQueueFailed |
1746 | return True | 1801 | return True |
1747 | self.rq.fakeworker[''].process.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>") | 1802 | self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>") |
1748 | self.rq.fakeworker[''].process.stdin.flush() | 1803 | self.rq.fakeworker[mc].process.stdin.flush() |
1749 | else: | 1804 | else: |
1750 | self.rq.worker[''].process.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>") | 1805 | self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata)) + b"</runtask>") |
1751 | self.rq.worker[''].process.stdin.flush() | 1806 | self.rq.worker[mc].process.stdin.flush() |
1752 | 1807 | ||
1753 | self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn) | 1808 | self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn) |
1754 | self.build_stamps2.append(self.build_stamps[task]) | 1809 | self.build_stamps2.append(self.build_stamps[task]) |
1755 | self.runq_running.add(task) | 1810 | self.runq_running.add(task) |
1756 | self.stats.taskActive() | 1811 | self.stats.taskActive() |
@@ -1761,7 +1816,7 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1761 | self.rq.read_workers() | 1816 | self.rq.read_workers() |
1762 | return self.rq.active_fds() | 1817 | return self.rq.active_fds() |
1763 | 1818 | ||
1764 | if len(self.failed_fns) != 0: | 1819 | if len(self.failed_tids) != 0: |
1765 | self.rq.state = runQueueFailed | 1820 | self.rq.state = runQueueFailed |
1766 | return True | 1821 | return True |
1767 | 1822 | ||
@@ -1784,11 +1839,11 @@ class RunQueueExecuteTasks(RunQueueExecute): | |||
1784 | while next: | 1839 | while next: |
1785 | additional = [] | 1840 | additional = [] |
1786 | for revdep in next: | 1841 | for revdep in next: |
1787 | fn = fn_from_tid(revdep) | 1842 | (mc, fn, taskname) = split_tid(revdep) |
1788 | pn = self.rqdata.dataCache.pkg_fn[fn] | 1843 | taskfn = taskfn_fromtid(revdep) |
1789 | taskname = taskname_from_tid(revdep) | 1844 | pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] |
1790 | deps = self.rqdata.runtaskentries[revdep].depends | 1845 | deps = self.rqdata.runtaskentries[revdep].depends |
1791 | provides = self.rqdata.dataCache.fn_provides[fn] | 1846 | provides = self.rqdata.dataCaches[mc].fn_provides[taskfn] |
1792 | taskdepdata[revdep] = [pn, taskname, fn, deps, provides] | 1847 | taskdepdata[revdep] = [pn, taskname, fn, deps, provides] |
1793 | for revdep2 in deps: | 1848 | for revdep2 in deps: |
1794 | if revdep2 not in taskdepdata: | 1849 | if revdep2 not in taskdepdata: |
@@ -1928,14 +1983,15 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
1928 | # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene" | 1983 | # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene" |
1929 | # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies | 1984 | # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies |
1930 | for tid in self.rqdata.runq_setscene_tids: | 1985 | for tid in self.rqdata.runq_setscene_tids: |
1931 | realtid = tid + "_setscene" | 1986 | (mc, fn, taskname) = split_tid(tid) |
1932 | idepends = self.rqdata.taskData.taskentries[realtid].idepends | 1987 | realtid = fn + ":" + taskname + "_setscene" |
1988 | idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends | ||
1933 | for (depname, idependtask) in idepends: | 1989 | for (depname, idependtask) in idepends: |
1934 | 1990 | ||
1935 | if depname not in self.rqdata.taskData.build_targets: | 1991 | if depname not in self.rqdata.taskData[mc].build_targets: |
1936 | continue | 1992 | continue |
1937 | 1993 | ||
1938 | depfn = self.rqdata.taskData.build_targets[depname][0] | 1994 | depfn = self.rqdata.taskData[mc].build_targets[depname][0] |
1939 | if depfn is None: | 1995 | if depfn is None: |
1940 | continue | 1996 | continue |
1941 | deptid = depfn + ":" + idependtask.replace("_setscene", "") | 1997 | deptid = depfn + ":" + idependtask.replace("_setscene", "") |
@@ -1991,15 +2047,15 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
1991 | noexec = [] | 2047 | noexec = [] |
1992 | stamppresent = [] | 2048 | stamppresent = [] |
1993 | for tid in self.sq_revdeps: | 2049 | for tid in self.sq_revdeps: |
1994 | fn = fn_from_tid(tid) | 2050 | (mc, fn, taskname) = split_tid(tid) |
1995 | taskname = taskname_from_tid(tid) | 2051 | taskfn = taskfn_fromtid(tid) |
1996 | 2052 | ||
1997 | taskdep = self.rqdata.dataCache.task_deps[fn] | 2053 | taskdep = self.rqdata.dataCaches[mc].task_deps[fn] |
1998 | 2054 | ||
1999 | if 'noexec' in taskdep and taskname in taskdep['noexec']: | 2055 | if 'noexec' in taskdep and taskname in taskdep['noexec']: |
2000 | noexec.append(tid) | 2056 | noexec.append(tid) |
2001 | self.task_skip(tid) | 2057 | self.task_skip(tid) |
2002 | bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCache, fn) | 2058 | bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn) |
2003 | continue | 2059 | continue |
2004 | 2060 | ||
2005 | if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache): | 2061 | if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache): |
@@ -2015,7 +2071,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
2015 | continue | 2071 | continue |
2016 | 2072 | ||
2017 | sq_fn.append(fn) | 2073 | sq_fn.append(fn) |
2018 | sq_hashfn.append(self.rqdata.dataCache.hashfn[fn]) | 2074 | sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn]) |
2019 | sq_hash.append(self.rqdata.runtaskentries[tid].hash) | 2075 | sq_hash.append(self.rqdata.runtaskentries[tid].hash) |
2020 | sq_taskname.append(taskname) | 2076 | sq_taskname.append(taskname) |
2021 | sq_task.append(tid) | 2077 | sq_task.append(tid) |
@@ -2063,9 +2119,8 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
2063 | def check_taskfail(self, task): | 2119 | def check_taskfail(self, task): |
2064 | if self.rqdata.setscenewhitelist: | 2120 | if self.rqdata.setscenewhitelist: |
2065 | realtask = task.split('_setscene')[0] | 2121 | realtask = task.split('_setscene')[0] |
2066 | fn = fn_from_tid(realtask) | 2122 | (mc, fn, taskname) = split_tid(realtask) |
2067 | taskname = taskname_from_tid(realtask) | 2123 | pn = self.rqdata.dataCaches[mc].pkg_fn[fn] |
2068 | pn = self.rqdata.dataCache.pkg_fn[fn] | ||
2069 | if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): | 2124 | if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): |
2070 | logger.error('Task %s.%s failed' % (pn, taskname + "_setscene")) | 2125 | logger.error('Task %s.%s failed' % (pn, taskname + "_setscene")) |
2071 | self.rq.state = runQueueCleanUp | 2126 | self.rq.state = runQueueCleanUp |
@@ -2114,10 +2169,9 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
2114 | if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True): | 2169 | if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True): |
2115 | fn = fn_from_tid(nexttask) | 2170 | fn = fn_from_tid(nexttask) |
2116 | foundtarget = False | 2171 | foundtarget = False |
2117 | for target in self.rqdata.target_pairs: | 2172 | |
2118 | if target[0] == fn and target[1] == taskname_from_tid(nexttask): | 2173 | if nexttask in self.rqdata.target_tids: |
2119 | foundtarget = True | 2174 | foundtarget = True |
2120 | break | ||
2121 | if not foundtarget: | 2175 | if not foundtarget: |
2122 | logger.debug(2, "Skipping setscene for task %s" % nexttask) | 2176 | logger.debug(2, "Skipping setscene for task %s" % nexttask) |
2123 | self.task_skip(nexttask) | 2177 | self.task_skip(nexttask) |
@@ -2129,18 +2183,18 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
2129 | task = nexttask | 2183 | task = nexttask |
2130 | break | 2184 | break |
2131 | if task is not None: | 2185 | if task is not None: |
2132 | fn = fn_from_tid(task) | 2186 | (mc, fn, taskname) = split_tid(task) |
2133 | taskname = taskname_from_tid(task) + "_setscene" | 2187 | taskfn = taskfn_fromtid(task) |
2188 | taskname = taskname + "_setscene" | ||
2134 | if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache): | 2189 | if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache): |
2135 | logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task) | 2190 | logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task) |
2136 | self.task_failoutright(task) | 2191 | self.task_failoutright(task) |
2137 | return True | 2192 | return True |
2138 | 2193 | ||
2139 | if self.cooker.configuration.force: | 2194 | if self.cooker.configuration.force: |
2140 | for target in self.rqdata.target_pairs: | 2195 | if task in self.rqdata.target_tids: |
2141 | if target[0] == fn and target[1] == taskname_from_tid(task): | 2196 | self.task_failoutright(task) |
2142 | self.task_failoutright(task) | 2197 | return True |
2143 | return True | ||
2144 | 2198 | ||
2145 | if self.rq.check_stamp_task(task, taskname, cache=self.stampcache): | 2199 | if self.rq.check_stamp_task(task, taskname, cache=self.stampcache): |
2146 | logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task) | 2200 | logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task) |
@@ -2150,15 +2204,15 @@ class RunQueueExecuteScenequeue(RunQueueExecute): | |||
2150 | startevent = sceneQueueTaskStarted(task, self.stats, self.rq) | 2204 | startevent = sceneQueueTaskStarted(task, self.stats, self.rq) |
2151 | bb.event.fire(startevent, self.cfgData) | 2205 | bb.event.fire(startevent, self.cfgData) |
2152 | 2206 | ||
2153 | taskdep = self.rqdata.dataCache.task_deps[fn] | 2207 | taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] |
2154 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: | 2208 | if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: |
2155 | if not self.rq.fakeworker: | 2209 | if not self.rq.fakeworker: |
2156 | self.rq.start_fakeworker(self) | 2210 | self.rq.start_fakeworker(self) |
2157 | self.rq.fakeworker[''].process.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, True, self.cooker.collection.get_file_appends(fn), None)) + b"</runtask>") | 2211 | self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>") |
2158 | self.rq.fakeworker[''].process.stdin.flush() | 2212 | self.rq.fakeworker[mc].process.stdin.flush() |
2159 | else: | 2213 | else: |
2160 | self.rq.worker[''].process.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, True, self.cooker.collection.get_file_appends(fn), None)) + b"</runtask>") | 2214 | self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>") |
2161 | self.rq.worker[''].process.stdin.flush() | 2215 | self.rq.worker[mc].process.stdin.flush() |
2162 | 2216 | ||
2163 | self.runq_running.add(task) | 2217 | self.runq_running.add(task) |
2164 | self.stats.taskActive() | 2218 | self.stats.taskActive() |